Bluetooth: Handle ADv set terminated event
[linux-2.6-block.git] / net / bluetooth / hci_request.c
CommitLineData
0857dd3b
JH
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
174cd4b1
IM
24#include <linux/sched/signal.h>
25
0857dd3b
JH
26#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
f2252570 28#include <net/bluetooth/mgmt.h>
0857dd3b
JH
29
30#include "smp.h"
31#include "hci_request.h"
32
be91cd05
JH
33#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
0857dd3b
JH
37void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
f17d858e
JK
44void hci_req_purge(struct hci_request *req)
45{
46 skb_queue_purge(&req->cmd_q);
47}
48
e6214487
JH
49static int req_run(struct hci_request *req, hci_req_complete_t complete,
50 hci_req_complete_skb_t complete_skb)
0857dd3b
JH
51{
52 struct hci_dev *hdev = req->hdev;
53 struct sk_buff *skb;
54 unsigned long flags;
55
56 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
57
58 /* If an error occurred during request building, remove all HCI
59 * commands queued on the HCI request queue.
60 */
61 if (req->err) {
62 skb_queue_purge(&req->cmd_q);
63 return req->err;
64 }
65
66 /* Do not allow empty requests */
67 if (skb_queue_empty(&req->cmd_q))
68 return -ENODATA;
69
70 skb = skb_peek_tail(&req->cmd_q);
44d27137
JH
71 if (complete) {
72 bt_cb(skb)->hci.req_complete = complete;
73 } else if (complete_skb) {
74 bt_cb(skb)->hci.req_complete_skb = complete_skb;
75 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
76 }
0857dd3b
JH
77
78 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
79 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
80 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
81
82 queue_work(hdev->workqueue, &hdev->cmd_work);
83
84 return 0;
85}
86
e6214487
JH
87int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
88{
89 return req_run(req, complete, NULL);
90}
91
92int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
93{
94 return req_run(req, NULL, complete);
95}
96
be91cd05
JH
97static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
98 struct sk_buff *skb)
99{
100 BT_DBG("%s result 0x%2.2x", hdev->name, result);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = result;
104 hdev->req_status = HCI_REQ_DONE;
105 if (skb)
106 hdev->req_skb = skb_get(skb);
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
b504430c 111void hci_req_sync_cancel(struct hci_dev *hdev, int err)
be91cd05
JH
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
123 const void *param, u8 event, u32 timeout)
124{
be91cd05
JH
125 struct hci_request req;
126 struct sk_buff *skb;
127 int err = 0;
128
129 BT_DBG("%s", hdev->name);
130
131 hci_req_init(&req, hdev);
132
133 hci_req_add_ev(&req, opcode, plen, param, event);
134
135 hdev->req_status = HCI_REQ_PEND;
136
be91cd05 137 err = hci_req_run_skb(&req, hci_req_sync_complete);
67d8cee4 138 if (err < 0)
be91cd05 139 return ERR_PTR(err);
be91cd05 140
67d8cee4
JK
141 err = wait_event_interruptible_timeout(hdev->req_wait_q,
142 hdev->req_status != HCI_REQ_PEND, timeout);
be91cd05 143
67d8cee4 144 if (err == -ERESTARTSYS)
be91cd05
JH
145 return ERR_PTR(-EINTR);
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_to_errno(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
159 }
160
161 hdev->req_status = hdev->req_result = 0;
162 skb = hdev->req_skb;
163 hdev->req_skb = NULL;
164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 if (err < 0) {
168 kfree_skb(skb);
169 return ERR_PTR(err);
170 }
171
172 if (!skb)
173 return ERR_PTR(-ENODATA);
174
175 return skb;
176}
177EXPORT_SYMBOL(__hci_cmd_sync_ev);
178
179struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
180 const void *param, u32 timeout)
181{
182 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
183}
184EXPORT_SYMBOL(__hci_cmd_sync);
185
186/* Execute request and wait for completion. */
a1d01db1
JH
187int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
188 unsigned long opt),
4ebeee2d 189 unsigned long opt, u32 timeout, u8 *hci_status)
be91cd05
JH
190{
191 struct hci_request req;
be91cd05
JH
192 int err = 0;
193
194 BT_DBG("%s start", hdev->name);
195
196 hci_req_init(&req, hdev);
197
198 hdev->req_status = HCI_REQ_PEND;
199
a1d01db1
JH
200 err = func(&req, opt);
201 if (err) {
202 if (hci_status)
203 *hci_status = HCI_ERROR_UNSPECIFIED;
204 return err;
205 }
be91cd05 206
be91cd05
JH
207 err = hci_req_run_skb(&req, hci_req_sync_complete);
208 if (err < 0) {
209 hdev->req_status = 0;
210
be91cd05
JH
211 /* ENODATA means the HCI request command queue is empty.
212 * This can happen when a request with conditionals doesn't
213 * trigger any commands to be sent. This is normal behavior
214 * and should not trigger an error return.
215 */
568f44f6
JH
216 if (err == -ENODATA) {
217 if (hci_status)
218 *hci_status = 0;
be91cd05 219 return 0;
568f44f6
JH
220 }
221
222 if (hci_status)
223 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
224
225 return err;
226 }
227
67d8cee4
JK
228 err = wait_event_interruptible_timeout(hdev->req_wait_q,
229 hdev->req_status != HCI_REQ_PEND, timeout);
be91cd05 230
67d8cee4 231 if (err == -ERESTARTSYS)
be91cd05
JH
232 return -EINTR;
233
234 switch (hdev->req_status) {
235 case HCI_REQ_DONE:
236 err = -bt_to_errno(hdev->req_result);
4ebeee2d
JH
237 if (hci_status)
238 *hci_status = hdev->req_result;
be91cd05
JH
239 break;
240
241 case HCI_REQ_CANCELED:
242 err = -hdev->req_result;
4ebeee2d
JH
243 if (hci_status)
244 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
245 break;
246
247 default:
248 err = -ETIMEDOUT;
4ebeee2d
JH
249 if (hci_status)
250 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
251 break;
252 }
253
9afee949
FD
254 kfree_skb(hdev->req_skb);
255 hdev->req_skb = NULL;
be91cd05
JH
256 hdev->req_status = hdev->req_result = 0;
257
258 BT_DBG("%s end: err %d", hdev->name, err);
259
260 return err;
261}
262
a1d01db1
JH
263int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
264 unsigned long opt),
4ebeee2d 265 unsigned long opt, u32 timeout, u8 *hci_status)
be91cd05
JH
266{
267 int ret;
268
269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
272 /* Serialize all requests */
b504430c 273 hci_req_sync_lock(hdev);
4ebeee2d 274 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
b504430c 275 hci_req_sync_unlock(hdev);
be91cd05
JH
276
277 return ret;
278}
279
0857dd3b
JH
280struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
281 const void *param)
282{
283 int len = HCI_COMMAND_HDR_SIZE + plen;
284 struct hci_command_hdr *hdr;
285 struct sk_buff *skb;
286
287 skb = bt_skb_alloc(len, GFP_ATOMIC);
288 if (!skb)
289 return NULL;
290
4df864c1 291 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
0857dd3b
JH
292 hdr->opcode = cpu_to_le16(opcode);
293 hdr->plen = plen;
294
295 if (plen)
59ae1d12 296 skb_put_data(skb, param, plen);
0857dd3b
JH
297
298 BT_DBG("skb len %d", skb->len);
299
d79f34e3
MH
300 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
301 hci_skb_opcode(skb) = opcode;
0857dd3b
JH
302
303 return skb;
304}
305
306/* Queue a command to an asynchronous HCI request */
307void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
308 const void *param, u8 event)
309{
310 struct hci_dev *hdev = req->hdev;
311 struct sk_buff *skb;
312
313 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
314
315 /* If an error occurred during request building, there is no point in
316 * queueing the HCI command. We can simply return.
317 */
318 if (req->err)
319 return;
320
321 skb = hci_prepare_cmd(hdev, opcode, plen, param);
322 if (!skb) {
2064ee33
MH
323 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
324 opcode);
0857dd3b
JH
325 req->err = -ENOMEM;
326 return;
327 }
328
329 if (skb_queue_empty(&req->cmd_q))
44d27137 330 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
0857dd3b 331
242c0ebd 332 bt_cb(skb)->hci.req_event = event;
0857dd3b
JH
333
334 skb_queue_tail(&req->cmd_q, skb);
335}
336
337void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
338 const void *param)
339{
340 hci_req_add_ev(req, opcode, plen, param, 0);
341}
342
bf943cbf
JH
343void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
344{
345 struct hci_dev *hdev = req->hdev;
346 struct hci_cp_write_page_scan_activity acp;
347 u8 type;
348
349 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
350 return;
351
352 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
353 return;
354
355 if (enable) {
356 type = PAGE_SCAN_TYPE_INTERLACED;
357
358 /* 160 msec page scan interval */
359 acp.interval = cpu_to_le16(0x0100);
360 } else {
361 type = PAGE_SCAN_TYPE_STANDARD; /* default */
362
363 /* default 1.28 sec page scan */
364 acp.interval = cpu_to_le16(0x0800);
365 }
366
367 acp.window = cpu_to_le16(0x0012);
368
369 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
370 __cpu_to_le16(hdev->page_scan_window) != acp.window)
371 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
372 sizeof(acp), &acp);
373
374 if (hdev->page_scan_type != type)
375 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
376}
377
196a5e97
JH
378/* This function controls the background scanning based on hdev->pend_le_conns
379 * list. If there are pending LE connection we start the background scanning,
380 * otherwise we stop it.
381 *
382 * This function requires the caller holds hdev->lock.
383 */
384static void __hci_update_background_scan(struct hci_request *req)
385{
386 struct hci_dev *hdev = req->hdev;
387
388 if (!test_bit(HCI_UP, &hdev->flags) ||
389 test_bit(HCI_INIT, &hdev->flags) ||
390 hci_dev_test_flag(hdev, HCI_SETUP) ||
391 hci_dev_test_flag(hdev, HCI_CONFIG) ||
392 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
393 hci_dev_test_flag(hdev, HCI_UNREGISTER))
394 return;
395
396 /* No point in doing scanning if LE support hasn't been enabled */
397 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
398 return;
399
400 /* If discovery is active don't interfere with it */
401 if (hdev->discovery.state != DISCOVERY_STOPPED)
402 return;
403
404 /* Reset RSSI and UUID filters when starting background scanning
405 * since these filters are meant for service discovery only.
406 *
407 * The Start Discovery and Start Service Discovery operations
408 * ensure to set proper values for RSSI threshold and UUID
409 * filter list. So it is safe to just reset them here.
410 */
411 hci_discovery_filter_clear(hdev);
412
413 if (list_empty(&hdev->pend_le_conns) &&
414 list_empty(&hdev->pend_le_reports)) {
415 /* If there is no pending LE connections or devices
416 * to be scanned for, we should stop the background
417 * scanning.
418 */
419
420 /* If controller is not scanning we are done. */
421 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
422 return;
423
424 hci_req_add_le_scan_disable(req);
425
426 BT_DBG("%s stopping background scanning", hdev->name);
427 } else {
428 /* If there is at least one pending LE connection, we should
429 * keep the background scan running.
430 */
431
432 /* If controller is connecting, we should not start scanning
433 * since some controllers are not able to scan and connect at
434 * the same time.
435 */
436 if (hci_lookup_le_connect(hdev))
437 return;
438
439 /* If controller is currently scanning, we stop it to ensure we
440 * don't miss any advertising (due to duplicates filter).
441 */
442 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
443 hci_req_add_le_scan_disable(req);
444
445 hci_req_add_le_passive_scan(req);
446
447 BT_DBG("%s starting background scanning", hdev->name);
448 }
449}
450
00cf5040
JH
451void __hci_req_update_name(struct hci_request *req)
452{
453 struct hci_dev *hdev = req->hdev;
454 struct hci_cp_write_local_name cp;
455
456 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
457
458 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
459}
460
b1a8917c
JH
461#define PNP_INFO_SVCLASS_ID 0x1200
462
463static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
464{
465 u8 *ptr = data, *uuids_start = NULL;
466 struct bt_uuid *uuid;
467
468 if (len < 4)
469 return ptr;
470
471 list_for_each_entry(uuid, &hdev->uuids, list) {
472 u16 uuid16;
473
474 if (uuid->size != 16)
475 continue;
476
477 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
478 if (uuid16 < 0x1100)
479 continue;
480
481 if (uuid16 == PNP_INFO_SVCLASS_ID)
482 continue;
483
484 if (!uuids_start) {
485 uuids_start = ptr;
486 uuids_start[0] = 1;
487 uuids_start[1] = EIR_UUID16_ALL;
488 ptr += 2;
489 }
490
491 /* Stop if not enough space to put next UUID */
492 if ((ptr - data) + sizeof(u16) > len) {
493 uuids_start[1] = EIR_UUID16_SOME;
494 break;
495 }
496
497 *ptr++ = (uuid16 & 0x00ff);
498 *ptr++ = (uuid16 & 0xff00) >> 8;
499 uuids_start[0] += sizeof(uuid16);
500 }
501
502 return ptr;
503}
504
505static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
506{
507 u8 *ptr = data, *uuids_start = NULL;
508 struct bt_uuid *uuid;
509
510 if (len < 6)
511 return ptr;
512
513 list_for_each_entry(uuid, &hdev->uuids, list) {
514 if (uuid->size != 32)
515 continue;
516
517 if (!uuids_start) {
518 uuids_start = ptr;
519 uuids_start[0] = 1;
520 uuids_start[1] = EIR_UUID32_ALL;
521 ptr += 2;
522 }
523
524 /* Stop if not enough space to put next UUID */
525 if ((ptr - data) + sizeof(u32) > len) {
526 uuids_start[1] = EIR_UUID32_SOME;
527 break;
528 }
529
530 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
531 ptr += sizeof(u32);
532 uuids_start[0] += sizeof(u32);
533 }
534
535 return ptr;
536}
537
538static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
539{
540 u8 *ptr = data, *uuids_start = NULL;
541 struct bt_uuid *uuid;
542
543 if (len < 18)
544 return ptr;
545
546 list_for_each_entry(uuid, &hdev->uuids, list) {
547 if (uuid->size != 128)
548 continue;
549
550 if (!uuids_start) {
551 uuids_start = ptr;
552 uuids_start[0] = 1;
553 uuids_start[1] = EIR_UUID128_ALL;
554 ptr += 2;
555 }
556
557 /* Stop if not enough space to put next UUID */
558 if ((ptr - data) + 16 > len) {
559 uuids_start[1] = EIR_UUID128_SOME;
560 break;
561 }
562
563 memcpy(ptr, uuid->uuid, 16);
564 ptr += 16;
565 uuids_start[0] += 16;
566 }
567
568 return ptr;
569}
570
571static void create_eir(struct hci_dev *hdev, u8 *data)
572{
573 u8 *ptr = data;
574 size_t name_len;
575
576 name_len = strlen(hdev->dev_name);
577
578 if (name_len > 0) {
579 /* EIR Data type */
580 if (name_len > 48) {
581 name_len = 48;
582 ptr[1] = EIR_NAME_SHORT;
583 } else
584 ptr[1] = EIR_NAME_COMPLETE;
585
586 /* EIR Data length */
587 ptr[0] = name_len + 1;
588
589 memcpy(ptr + 2, hdev->dev_name, name_len);
590
591 ptr += (name_len + 2);
592 }
593
594 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
595 ptr[0] = 2;
596 ptr[1] = EIR_TX_POWER;
597 ptr[2] = (u8) hdev->inq_tx_power;
598
599 ptr += 3;
600 }
601
602 if (hdev->devid_source > 0) {
603 ptr[0] = 9;
604 ptr[1] = EIR_DEVICE_ID;
605
606 put_unaligned_le16(hdev->devid_source, ptr + 2);
607 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
608 put_unaligned_le16(hdev->devid_product, ptr + 6);
609 put_unaligned_le16(hdev->devid_version, ptr + 8);
610
611 ptr += 10;
612 }
613
614 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
615 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
616 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
617}
618
619void __hci_req_update_eir(struct hci_request *req)
620{
621 struct hci_dev *hdev = req->hdev;
622 struct hci_cp_write_eir cp;
623
624 if (!hdev_is_powered(hdev))
625 return;
626
627 if (!lmp_ext_inq_capable(hdev))
628 return;
629
630 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
631 return;
632
633 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
634 return;
635
636 memset(&cp, 0, sizeof(cp));
637
638 create_eir(hdev, cp.data);
639
640 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
641 return;
642
643 memcpy(hdev->eir, cp.data, sizeof(cp.data));
644
645 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
646}
647
0857dd3b
JH
648void hci_req_add_le_scan_disable(struct hci_request *req)
649{
a2344b9e 650 struct hci_dev *hdev = req->hdev;
0857dd3b 651
a2344b9e
JK
652 if (use_ext_scan(hdev)) {
653 struct hci_cp_le_set_ext_scan_enable cp;
654
655 memset(&cp, 0, sizeof(cp));
656 cp.enable = LE_SCAN_DISABLE;
657 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
658 &cp);
659 } else {
660 struct hci_cp_le_set_scan_enable cp;
661
662 memset(&cp, 0, sizeof(cp));
663 cp.enable = LE_SCAN_DISABLE;
664 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
665 }
0857dd3b
JH
666}
667
668static void add_to_white_list(struct hci_request *req,
669 struct hci_conn_params *params)
670{
671 struct hci_cp_le_add_to_white_list cp;
672
673 cp.bdaddr_type = params->addr_type;
674 bacpy(&cp.bdaddr, &params->addr);
675
676 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
677}
678
679static u8 update_white_list(struct hci_request *req)
680{
681 struct hci_dev *hdev = req->hdev;
682 struct hci_conn_params *params;
683 struct bdaddr_list *b;
684 uint8_t white_list_entries = 0;
685
686 /* Go through the current white list programmed into the
687 * controller one by one and check if that address is still
688 * in the list of pending connections or list of devices to
689 * report. If not present in either list, then queue the
690 * command to remove it from the controller.
691 */
692 list_for_each_entry(b, &hdev->le_white_list, list) {
cff10ce7
JH
693 /* If the device is neither in pend_le_conns nor
694 * pend_le_reports then remove it from the whitelist.
695 */
696 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
697 &b->bdaddr, b->bdaddr_type) &&
698 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
699 &b->bdaddr, b->bdaddr_type)) {
700 struct hci_cp_le_del_from_white_list cp;
701
702 cp.bdaddr_type = b->bdaddr_type;
703 bacpy(&cp.bdaddr, &b->bdaddr);
0857dd3b 704
cff10ce7
JH
705 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
706 sizeof(cp), &cp);
0857dd3b
JH
707 continue;
708 }
709
cff10ce7
JH
710 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
711 /* White list can not be used with RPAs */
712 return 0x00;
713 }
0857dd3b 714
cff10ce7 715 white_list_entries++;
0857dd3b
JH
716 }
717
718 /* Since all no longer valid white list entries have been
719 * removed, walk through the list of pending connections
720 * and ensure that any new device gets programmed into
721 * the controller.
722 *
723 * If the list of the devices is larger than the list of
724 * available white list entries in the controller, then
725 * just abort and return filer policy value to not use the
726 * white list.
727 */
728 list_for_each_entry(params, &hdev->pend_le_conns, action) {
729 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
730 &params->addr, params->addr_type))
731 continue;
732
733 if (white_list_entries >= hdev->le_white_list_size) {
734 /* Select filter policy to accept all advertising */
735 return 0x00;
736 }
737
738 if (hci_find_irk_by_addr(hdev, &params->addr,
739 params->addr_type)) {
740 /* White list can not be used with RPAs */
741 return 0x00;
742 }
743
744 white_list_entries++;
745 add_to_white_list(req, params);
746 }
747
748 /* After adding all new pending connections, walk through
749 * the list of pending reports and also add these to the
750 * white list if there is still space.
751 */
752 list_for_each_entry(params, &hdev->pend_le_reports, action) {
753 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
754 &params->addr, params->addr_type))
755 continue;
756
757 if (white_list_entries >= hdev->le_white_list_size) {
758 /* Select filter policy to accept all advertising */
759 return 0x00;
760 }
761
762 if (hci_find_irk_by_addr(hdev, &params->addr,
763 params->addr_type)) {
764 /* White list can not be used with RPAs */
765 return 0x00;
766 }
767
768 white_list_entries++;
769 add_to_white_list(req, params);
770 }
771
772 /* Select filter policy to use white list */
773 return 0x01;
774}
775
82a37ade
JH
776static bool scan_use_rpa(struct hci_dev *hdev)
777{
778 return hci_dev_test_flag(hdev, HCI_PRIVACY);
779}
780
3baef810
JK
781static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
782 u16 window, u8 own_addr_type, u8 filter_policy)
0857dd3b 783{
a2344b9e 784 struct hci_dev *hdev = req->hdev;
3baef810 785
a2344b9e
JK
786 /* Use ext scanning if set ext scan param and ext scan enable is
787 * supported
788 */
789 if (use_ext_scan(hdev)) {
790 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
791 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
792 struct hci_cp_le_scan_phy_params *phy_params;
45bdd86e
JK
793 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
794 u32 plen;
a2344b9e
JK
795
796 ext_param_cp = (void *)data;
797 phy_params = (void *)ext_param_cp->data;
798
799 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
800 ext_param_cp->own_addr_type = own_addr_type;
801 ext_param_cp->filter_policy = filter_policy;
a2344b9e 802
45bdd86e
JK
803 plen = sizeof(*ext_param_cp);
804
805 if (scan_1m(hdev) || scan_2m(hdev)) {
806 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
807
808 memset(phy_params, 0, sizeof(*phy_params));
809 phy_params->type = type;
810 phy_params->interval = cpu_to_le16(interval);
811 phy_params->window = cpu_to_le16(window);
812
813 plen += sizeof(*phy_params);
814 phy_params++;
815 }
816
817 if (scan_coded(hdev)) {
818 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
819
820 memset(phy_params, 0, sizeof(*phy_params));
821 phy_params->type = type;
822 phy_params->interval = cpu_to_le16(interval);
823 phy_params->window = cpu_to_le16(window);
824
825 plen += sizeof(*phy_params);
826 phy_params++;
827 }
a2344b9e
JK
828
829 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
45bdd86e 830 plen, ext_param_cp);
a2344b9e
JK
831
832 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
833 ext_enable_cp.enable = LE_SCAN_ENABLE;
834 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
835
836 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
837 sizeof(ext_enable_cp), &ext_enable_cp);
838 } else {
839 struct hci_cp_le_set_scan_param param_cp;
840 struct hci_cp_le_set_scan_enable enable_cp;
841
842 memset(&param_cp, 0, sizeof(param_cp));
843 param_cp.type = type;
844 param_cp.interval = cpu_to_le16(interval);
845 param_cp.window = cpu_to_le16(window);
846 param_cp.own_address_type = own_addr_type;
847 param_cp.filter_policy = filter_policy;
848 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
849 &param_cp);
850
851 memset(&enable_cp, 0, sizeof(enable_cp));
852 enable_cp.enable = LE_SCAN_ENABLE;
853 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
854 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
855 &enable_cp);
856 }
3baef810
JK
857}
858
859void hci_req_add_le_passive_scan(struct hci_request *req)
860{
0857dd3b
JH
861 struct hci_dev *hdev = req->hdev;
862 u8 own_addr_type;
863 u8 filter_policy;
864
865 /* Set require_privacy to false since no SCAN_REQ are send
866 * during passive scanning. Not using an non-resolvable address
867 * here is important so that peer devices using direct
868 * advertising with our address will be correctly reported
869 * by the controller.
870 */
82a37ade
JH
871 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
872 &own_addr_type))
0857dd3b
JH
873 return;
874
875 /* Adding or removing entries from the white list must
876 * happen before enabling scanning. The controller does
877 * not allow white list modification while scanning.
878 */
879 filter_policy = update_white_list(req);
880
881 /* When the controller is using random resolvable addresses and
882 * with that having LE privacy enabled, then controllers with
883 * Extended Scanner Filter Policies support can now enable support
884 * for handling directed advertising.
885 *
886 * So instead of using filter polices 0x00 (no whitelist)
887 * and 0x01 (whitelist enabled) use the new filter policies
888 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
889 */
d7a5a11d 890 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
0857dd3b
JH
891 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
892 filter_policy |= 0x02;
893
3baef810
JK
894 hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
895 hdev->le_scan_window, own_addr_type, filter_policy);
0857dd3b
JH
896}
897
de181e88
JK
898static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
899{
900 struct adv_info *adv_instance;
901
902 /* Ignore instance 0 */
903 if (instance == 0x00)
904 return 0;
905
906 adv_instance = hci_find_adv_instance(hdev, instance);
907 if (!adv_instance)
908 return 0;
909
910 /* TODO: Take into account the "appearance" and "local-name" flags here.
911 * These are currently being ignored as they are not supported.
912 */
913 return adv_instance->scan_rsp_len;
914}
915
f2252570
JH
916static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
917{
cab054ab 918 u8 instance = hdev->cur_adv_instance;
f2252570
JH
919 struct adv_info *adv_instance;
920
921 /* Ignore instance 0 */
922 if (instance == 0x00)
923 return 0;
924
925 adv_instance = hci_find_adv_instance(hdev, instance);
926 if (!adv_instance)
927 return 0;
928
929 /* TODO: Take into account the "appearance" and "local-name" flags here.
930 * These are currently being ignored as they are not supported.
931 */
932 return adv_instance->scan_rsp_len;
933}
934
935void __hci_req_disable_advertising(struct hci_request *req)
936{
45b7749f
JK
937 if (ext_adv_capable(req->hdev)) {
938 struct hci_cp_le_set_ext_adv_enable cp;
f2252570 939
45b7749f
JK
940 cp.enable = 0x00;
941 /* Disable all sets since we only support one set at the moment */
942 cp.num_of_sets = 0x00;
943
944 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
945 } else {
946 u8 enable = 0x00;
947
948 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
949 }
f2252570
JH
950}
951
952static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
953{
954 u32 flags;
955 struct adv_info *adv_instance;
956
957 if (instance == 0x00) {
958 /* Instance 0 always manages the "Tx Power" and "Flags"
959 * fields
960 */
961 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
962
963 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
964 * corresponds to the "connectable" instance flag.
965 */
966 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
967 flags |= MGMT_ADV_FLAG_CONNECTABLE;
968
6a19cc8c
JH
969 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
970 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
971 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
d43efbd0
JH
972 flags |= MGMT_ADV_FLAG_DISCOV;
973
f2252570
JH
974 return flags;
975 }
976
977 adv_instance = hci_find_adv_instance(hdev, instance);
978
979 /* Return 0 when we got an invalid instance identifier. */
980 if (!adv_instance)
981 return 0;
982
983 return adv_instance->flags;
984}
985
82a37ade
JH
986static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
987{
988 /* If privacy is not enabled don't use RPA */
989 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
990 return false;
991
992 /* If basic privacy mode is enabled use RPA */
993 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
994 return true;
995
996 /* If limited privacy mode is enabled don't use RPA if we're
997 * both discoverable and bondable.
998 */
999 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1000 hci_dev_test_flag(hdev, HCI_BONDABLE))
1001 return false;
1002
1003 /* We're neither bondable nor discoverable in the limited
1004 * privacy mode, therefore use RPA.
1005 */
1006 return true;
1007}
1008
9e1e9f20
ŁR
1009static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1010{
1011 /* If there is no connection we are OK to advertise. */
1012 if (hci_conn_num(hdev, LE_LINK) == 0)
1013 return true;
1014
1015 /* Check le_states if there is any connection in slave role. */
1016 if (hdev->conn_hash.le_num_slave > 0) {
1017 /* Slave connection state and non connectable mode bit 20. */
1018 if (!connectable && !(hdev->le_states[2] & 0x10))
1019 return false;
1020
1021 /* Slave connection state and connectable mode bit 38
1022 * and scannable bit 21.
1023 */
62ebdc25
ŁR
1024 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1025 !(hdev->le_states[2] & 0x20)))
9e1e9f20
ŁR
1026 return false;
1027 }
1028
1029 /* Check le_states if there is any connection in master role. */
1030 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1031 /* Master connection state and non connectable mode bit 18. */
1032 if (!connectable && !(hdev->le_states[2] & 0x02))
1033 return false;
1034
1035 /* Master connection state and connectable mode bit 35 and
1036 * scannable 19.
1037 */
62ebdc25 1038 if (connectable && (!(hdev->le_states[4] & 0x08) ||
9e1e9f20
ŁR
1039 !(hdev->le_states[2] & 0x08)))
1040 return false;
1041 }
1042
1043 return true;
1044}
1045
f2252570
JH
1046void __hci_req_enable_advertising(struct hci_request *req)
1047{
1048 struct hci_dev *hdev = req->hdev;
1049 struct hci_cp_le_set_adv_param cp;
1050 u8 own_addr_type, enable = 0x01;
1051 bool connectable;
f2252570
JH
1052 u32 flags;
1053
9e1e9f20
ŁR
1054 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1055
1056 /* If the "connectable" instance flag was not set, then choose between
1057 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1058 */
1059 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1060 mgmt_get_connectable(hdev);
1061
1062 if (!is_advertising_allowed(hdev, connectable))
f2252570
JH
1063 return;
1064
1065 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1066 __hci_req_disable_advertising(req);
1067
1068 /* Clear the HCI_LE_ADV bit temporarily so that the
1069 * hci_update_random_address knows that it's safe to go ahead
1070 * and write a new random address. The flag will be set back on
1071 * as soon as the SET_ADV_ENABLE HCI command completes.
1072 */
1073 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1074
f2252570
JH
1075 /* Set require_privacy to true only when non-connectable
1076 * advertising is used. In that case it is fine to use a
1077 * non-resolvable private address.
1078 */
82a37ade
JH
1079 if (hci_update_random_address(req, !connectable,
1080 adv_use_rpa(hdev, flags),
1081 &own_addr_type) < 0)
f2252570
JH
1082 return;
1083
1084 memset(&cp, 0, sizeof(cp));
1085 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1086 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1087
1088 if (connectable)
1089 cp.type = LE_ADV_IND;
1090 else if (get_cur_adv_instance_scan_rsp_len(hdev))
1091 cp.type = LE_ADV_SCAN_IND;
1092 else
1093 cp.type = LE_ADV_NONCONN_IND;
1094
1095 cp.own_address_type = own_addr_type;
1096 cp.channel_map = hdev->le_adv_channel_map;
1097
1098 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1099
1100 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1101}
1102
f61851f6 1103u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
f2252570 1104{
cecbf3e9 1105 size_t short_len;
f61851f6 1106 size_t complete_len;
f2252570 1107
f61851f6
MN
1108 /* no space left for name (+ NULL + type + len) */
1109 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
cecbf3e9 1110 return ad_len;
f2252570 1111
f61851f6
MN
1112 /* use complete name if present and fits */
1113 complete_len = strlen(hdev->dev_name);
1114 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1b422066 1115 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
f61851f6 1116 hdev->dev_name, complete_len + 1);
cecbf3e9 1117
f61851f6
MN
1118 /* use short name if present */
1119 short_len = strlen(hdev->short_name);
1120 if (short_len)
1b422066 1121 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
f61851f6 1122 hdev->short_name, short_len + 1);
cecbf3e9 1123
f61851f6
MN
1124 /* use shortened full name if present, we already know that name
1125 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1126 */
1127 if (complete_len) {
1128 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1129
1130 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1131 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1132
1133 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1134 sizeof(name));
f2252570
JH
1135 }
1136
1137 return ad_len;
1138}
1139
1b422066
MN
1140static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1141{
1142 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1143}
1144
7c295c48
MN
1145static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1146{
7ddb30c7
MN
1147 u8 scan_rsp_len = 0;
1148
1149 if (hdev->appearance) {
1b422066 1150 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
7ddb30c7
MN
1151 }
1152
1b422066 1153 return append_local_name(hdev, ptr, scan_rsp_len);
7c295c48
MN
1154}
1155
f2252570
JH
1156static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1157 u8 *ptr)
1158{
1159 struct adv_info *adv_instance;
7c295c48
MN
1160 u32 instance_flags;
1161 u8 scan_rsp_len = 0;
f2252570
JH
1162
1163 adv_instance = hci_find_adv_instance(hdev, instance);
1164 if (!adv_instance)
1165 return 0;
1166
7c295c48
MN
1167 instance_flags = adv_instance->flags;
1168
c4960ecf 1169 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1b422066 1170 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
c4960ecf
MN
1171 }
1172
1b422066 1173 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
f2252570
JH
1174 adv_instance->scan_rsp_len);
1175
7c295c48 1176 scan_rsp_len += adv_instance->scan_rsp_len;
7c295c48
MN
1177
1178 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1179 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1180
1181 return scan_rsp_len;
f2252570
JH
1182}
1183
cab054ab 1184void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
f2252570
JH
1185{
1186 struct hci_dev *hdev = req->hdev;
f2252570
JH
1187 u8 len;
1188
1189 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1190 return;
1191
a0fb3726
JK
1192 if (ext_adv_capable(hdev)) {
1193 struct hci_cp_le_set_ext_scan_rsp_data cp;
f2252570 1194
a0fb3726 1195 memset(&cp, 0, sizeof(cp));
f2252570 1196
a0fb3726
JK
1197 if (instance)
1198 len = create_instance_scan_rsp_data(hdev, instance,
1199 cp.data);
1200 else
1201 len = create_default_scan_rsp_data(hdev, cp.data);
1202
1203 if (hdev->scan_rsp_data_len == len &&
1204 !memcmp(cp.data, hdev->scan_rsp_data, len))
1205 return;
1206
1207 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1208 hdev->scan_rsp_data_len = len;
1209
1210 cp.handle = 0;
1211 cp.length = len;
1212 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1213 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1214
1215 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1216 &cp);
1217 } else {
1218 struct hci_cp_le_set_scan_rsp_data cp;
1219
1220 memset(&cp, 0, sizeof(cp));
1221
1222 if (instance)
1223 len = create_instance_scan_rsp_data(hdev, instance,
1224 cp.data);
1225 else
1226 len = create_default_scan_rsp_data(hdev, cp.data);
1227
1228 if (hdev->scan_rsp_data_len == len &&
1229 !memcmp(cp.data, hdev->scan_rsp_data, len))
1230 return;
f2252570 1231
a0fb3726
JK
1232 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1233 hdev->scan_rsp_data_len = len;
f2252570 1234
a0fb3726 1235 cp.length = len;
f2252570 1236
a0fb3726
JK
1237 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1238 }
f2252570
JH
1239}
1240
f2252570
JH
1241static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1242{
1243 struct adv_info *adv_instance = NULL;
1244 u8 ad_len = 0, flags = 0;
1245 u32 instance_flags;
1246
1247 /* Return 0 when the current instance identifier is invalid. */
1248 if (instance) {
1249 adv_instance = hci_find_adv_instance(hdev, instance);
1250 if (!adv_instance)
1251 return 0;
1252 }
1253
1254 instance_flags = get_adv_instance_flags(hdev, instance);
1255
1256 /* The Add Advertising command allows userspace to set both the general
1257 * and limited discoverable flags.
1258 */
1259 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1260 flags |= LE_AD_GENERAL;
1261
1262 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1263 flags |= LE_AD_LIMITED;
1264
f18ba58f
JH
1265 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1266 flags |= LE_AD_NO_BREDR;
1267
f2252570
JH
1268 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1269 /* If a discovery flag wasn't provided, simply use the global
1270 * settings.
1271 */
1272 if (!flags)
1273 flags |= mgmt_get_adv_discov_flags(hdev);
1274
f2252570
JH
1275 /* If flags would still be empty, then there is no need to
1276 * include the "Flags" AD field".
1277 */
1278 if (flags) {
1279 ptr[0] = 0x02;
1280 ptr[1] = EIR_FLAGS;
1281 ptr[2] = flags;
1282
1283 ad_len += 3;
1284 ptr += 3;
1285 }
1286 }
1287
1288 if (adv_instance) {
1289 memcpy(ptr, adv_instance->adv_data,
1290 adv_instance->adv_data_len);
1291 ad_len += adv_instance->adv_data_len;
1292 ptr += adv_instance->adv_data_len;
1293 }
1294
de181e88
JK
1295 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1296 s8 adv_tx_power;
f2252570 1297
de181e88
JK
1298 if (ext_adv_capable(hdev)) {
1299 if (adv_instance)
1300 adv_tx_power = adv_instance->tx_power;
1301 else
1302 adv_tx_power = hdev->adv_tx_power;
1303 } else {
1304 adv_tx_power = hdev->adv_tx_power;
1305 }
1306
1307 /* Provide Tx Power only if we can provide a valid value for it */
1308 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1309 ptr[0] = 0x02;
1310 ptr[1] = EIR_TX_POWER;
1311 ptr[2] = (u8)adv_tx_power;
1312
1313 ad_len += 3;
1314 ptr += 3;
1315 }
f2252570
JH
1316 }
1317
1318 return ad_len;
1319}
1320
cab054ab 1321void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
f2252570
JH
1322{
1323 struct hci_dev *hdev = req->hdev;
f2252570
JH
1324 u8 len;
1325
1326 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1327 return;
1328
a0fb3726
JK
1329 if (ext_adv_capable(hdev)) {
1330 struct hci_cp_le_set_ext_adv_data cp;
f2252570 1331
a0fb3726 1332 memset(&cp, 0, sizeof(cp));
f2252570 1333
a0fb3726
JK
1334 len = create_instance_adv_data(hdev, instance, cp.data);
1335
1336 /* There's nothing to do if the data hasn't changed */
1337 if (hdev->adv_data_len == len &&
1338 memcmp(cp.data, hdev->adv_data, len) == 0)
1339 return;
1340
1341 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1342 hdev->adv_data_len = len;
1343
1344 cp.length = len;
1345 cp.handle = 0;
1346 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1347 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
f2252570 1348
a0fb3726
JK
1349 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1350 } else {
1351 struct hci_cp_le_set_adv_data cp;
1352
1353 memset(&cp, 0, sizeof(cp));
f2252570 1354
a0fb3726
JK
1355 len = create_instance_adv_data(hdev, instance, cp.data);
1356
1357 /* There's nothing to do if the data hasn't changed */
1358 if (hdev->adv_data_len == len &&
1359 memcmp(cp.data, hdev->adv_data, len) == 0)
1360 return;
f2252570 1361
a0fb3726
JK
1362 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1363 hdev->adv_data_len = len;
1364
1365 cp.length = len;
1366
1367 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1368 }
f2252570
JH
1369}
1370
cab054ab 1371int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
f2252570
JH
1372{
1373 struct hci_request req;
1374
1375 hci_req_init(&req, hdev);
1376 __hci_req_update_adv_data(&req, instance);
1377
1378 return hci_req_run(&req, NULL);
1379}
1380
1381static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1382{
1383 BT_DBG("%s status %u", hdev->name, status);
1384}
1385
1386void hci_req_reenable_advertising(struct hci_dev *hdev)
1387{
1388 struct hci_request req;
f2252570
JH
1389
1390 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
17fd08ff 1391 list_empty(&hdev->adv_instances))
f2252570
JH
1392 return;
1393
f2252570
JH
1394 hci_req_init(&req, hdev);
1395
cab054ab
JH
1396 if (hdev->cur_adv_instance) {
1397 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1398 true);
f2252570 1399 } else {
de181e88
JK
1400 if (ext_adv_capable(hdev)) {
1401 __hci_req_start_ext_adv(&req, 0x00);
1402 } else {
1403 __hci_req_update_adv_data(&req, 0x00);
1404 __hci_req_update_scan_rsp_data(&req, 0x00);
1405 __hci_req_enable_advertising(&req);
1406 }
f2252570
JH
1407 }
1408
1409 hci_req_run(&req, adv_enable_complete);
1410}
1411
1412static void adv_timeout_expire(struct work_struct *work)
1413{
1414 struct hci_dev *hdev = container_of(work, struct hci_dev,
1415 adv_instance_expire.work);
1416
1417 struct hci_request req;
1418 u8 instance;
1419
1420 BT_DBG("%s", hdev->name);
1421
1422 hci_dev_lock(hdev);
1423
1424 hdev->adv_instance_timeout = 0;
1425
cab054ab 1426 instance = hdev->cur_adv_instance;
f2252570
JH
1427 if (instance == 0x00)
1428 goto unlock;
1429
1430 hci_req_init(&req, hdev);
1431
37d3a1fa 1432 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
f2252570
JH
1433
1434 if (list_empty(&hdev->adv_instances))
1435 __hci_req_disable_advertising(&req);
1436
550a8ca7 1437 hci_req_run(&req, NULL);
f2252570
JH
1438
1439unlock:
1440 hci_dev_unlock(hdev);
1441}
1442
a73c046a
JK
1443int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1444 bool use_rpa, struct adv_info *adv_instance,
1445 u8 *own_addr_type, bdaddr_t *rand_addr)
1446{
1447 int err;
1448
1449 bacpy(rand_addr, BDADDR_ANY);
1450
1451 /* If privacy is enabled use a resolvable private address. If
1452 * current RPA has expired then generate a new one.
1453 */
1454 if (use_rpa) {
1455 int to;
1456
1457 *own_addr_type = ADDR_LE_DEV_RANDOM;
1458
1459 if (adv_instance) {
1460 if (!adv_instance->rpa_expired &&
1461 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1462 return 0;
1463
1464 adv_instance->rpa_expired = false;
1465 } else {
1466 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1467 !bacmp(&hdev->random_addr, &hdev->rpa))
1468 return 0;
1469 }
1470
1471 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1472 if (err < 0) {
1473 BT_ERR("%s failed to generate new RPA", hdev->name);
1474 return err;
1475 }
1476
1477 bacpy(rand_addr, &hdev->rpa);
1478
1479 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1480 if (adv_instance)
1481 queue_delayed_work(hdev->workqueue,
1482 &adv_instance->rpa_expired_cb, to);
1483 else
1484 queue_delayed_work(hdev->workqueue,
1485 &hdev->rpa_expired, to);
1486
1487 return 0;
1488 }
1489
1490 /* In case of required privacy without resolvable private address,
1491 * use an non-resolvable private address. This is useful for
1492 * non-connectable advertising.
1493 */
1494 if (require_privacy) {
1495 bdaddr_t nrpa;
1496
1497 while (true) {
1498 /* The non-resolvable private address is generated
1499 * from random six bytes with the two most significant
1500 * bits cleared.
1501 */
1502 get_random_bytes(&nrpa, 6);
1503 nrpa.b[5] &= 0x3f;
1504
1505 /* The non-resolvable private address shall not be
1506 * equal to the public address.
1507 */
1508 if (bacmp(&hdev->bdaddr, &nrpa))
1509 break;
1510 }
1511
1512 *own_addr_type = ADDR_LE_DEV_RANDOM;
1513 bacpy(rand_addr, &nrpa);
1514
1515 return 0;
1516 }
1517
1518 /* No privacy so use a public address. */
1519 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1520
1521 return 0;
1522}
1523
45b7749f
JK
1524void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1525{
1526 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1527}
1528
a0fb3726 1529int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
de181e88
JK
1530{
1531 struct hci_cp_le_set_ext_adv_params cp;
1532 struct hci_dev *hdev = req->hdev;
1533 bool connectable;
1534 u32 flags;
a73c046a
JK
1535 bdaddr_t random_addr;
1536 u8 own_addr_type;
1537 int err;
1538 struct adv_info *adv_instance;
de181e88
JK
1539 /* In ext adv set param interval is 3 octets */
1540 const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1541
a73c046a
JK
1542 if (instance > 0) {
1543 adv_instance = hci_find_adv_instance(hdev, instance);
1544 if (!adv_instance)
1545 return -EINVAL;
1546 } else {
1547 adv_instance = NULL;
1548 }
1549
de181e88
JK
1550 flags = get_adv_instance_flags(hdev, instance);
1551
1552 /* If the "connectable" instance flag was not set, then choose between
1553 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1554 */
1555 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1556 mgmt_get_connectable(hdev);
1557
1558 if (!is_advertising_allowed(hdev, connectable))
1559 return -EPERM;
1560
a73c046a
JK
1561 /* Set require_privacy to true only when non-connectable
1562 * advertising is used. In that case it is fine to use a
1563 * non-resolvable private address.
1564 */
1565 err = hci_get_random_address(hdev, !connectable,
1566 adv_use_rpa(hdev, flags), adv_instance,
1567 &own_addr_type, &random_addr);
1568 if (err < 0)
1569 return err;
1570
de181e88
JK
1571 memset(&cp, 0, sizeof(cp));
1572
1573 memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1574 memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1575
1576 if (connectable)
1577 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1578 else if (get_adv_instance_scan_rsp_len(hdev, instance))
1579 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1580 else
1581 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1582
a73c046a 1583 cp.own_addr_type = own_addr_type;
de181e88
JK
1584 cp.channel_map = hdev->le_adv_channel_map;
1585 cp.tx_power = 127;
1586 cp.primary_phy = HCI_ADV_PHY_1M;
1587 cp.secondary_phy = HCI_ADV_PHY_1M;
1588 cp.handle = 0;
1589
1590 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1591
a73c046a
JK
1592 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1593 bacmp(&random_addr, BDADDR_ANY)) {
1594 struct hci_cp_le_set_adv_set_rand_addr cp;
1595
1596 /* Check if random address need to be updated */
1597 if (adv_instance) {
1598 if (!bacmp(&random_addr, &adv_instance->random_addr))
1599 return 0;
1600 } else {
1601 if (!bacmp(&random_addr, &hdev->random_addr))
1602 return 0;
1603 }
1604
1605 memset(&cp, 0, sizeof(cp));
1606
1607 cp.handle = 0;
1608 bacpy(&cp.bdaddr, &random_addr);
1609
1610 hci_req_add(req,
1611 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1612 sizeof(cp), &cp);
1613 }
1614
de181e88
JK
1615 return 0;
1616}
1617
1618void __hci_req_enable_ext_advertising(struct hci_request *req)
1619{
1620 struct hci_cp_le_set_ext_adv_enable *cp;
1621 struct hci_cp_ext_adv_set *adv_set;
1622 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1623
1624 cp = (void *) data;
1625 adv_set = (void *) cp->data;
1626
1627 memset(cp, 0, sizeof(*cp));
1628
1629 cp->enable = 0x01;
1630 cp->num_of_sets = 0x01;
1631
1632 memset(adv_set, 0, sizeof(*adv_set));
1633
1634 adv_set->handle = 0;
1635
1636 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1637 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1638 data);
1639}
1640
1641int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1642{
45b7749f 1643 struct hci_dev *hdev = req->hdev;
de181e88
JK
1644 int err;
1645
45b7749f
JK
1646 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1647 __hci_req_disable_advertising(req);
1648
de181e88
JK
1649 err = __hci_req_setup_ext_adv_instance(req, instance);
1650 if (err < 0)
1651 return err;
1652
a0fb3726 1653 __hci_req_update_scan_rsp_data(req, instance);
de181e88
JK
1654 __hci_req_enable_ext_advertising(req);
1655
1656 return 0;
1657}
1658
f2252570
JH
1659int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1660 bool force)
1661{
1662 struct hci_dev *hdev = req->hdev;
1663 struct adv_info *adv_instance = NULL;
1664 u16 timeout;
1665
1666 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
17fd08ff 1667 list_empty(&hdev->adv_instances))
f2252570
JH
1668 return -EPERM;
1669
1670 if (hdev->adv_instance_timeout)
1671 return -EBUSY;
1672
1673 adv_instance = hci_find_adv_instance(hdev, instance);
1674 if (!adv_instance)
1675 return -ENOENT;
1676
1677 /* A zero timeout means unlimited advertising. As long as there is
1678 * only one instance, duration should be ignored. We still set a timeout
1679 * in case further instances are being added later on.
1680 *
1681 * If the remaining lifetime of the instance is more than the duration
1682 * then the timeout corresponds to the duration, otherwise it will be
1683 * reduced to the remaining instance lifetime.
1684 */
1685 if (adv_instance->timeout == 0 ||
1686 adv_instance->duration <= adv_instance->remaining_time)
1687 timeout = adv_instance->duration;
1688 else
1689 timeout = adv_instance->remaining_time;
1690
1691 /* The remaining time is being reduced unless the instance is being
1692 * advertised without time limit.
1693 */
1694 if (adv_instance->timeout)
1695 adv_instance->remaining_time =
1696 adv_instance->remaining_time - timeout;
1697
1698 hdev->adv_instance_timeout = timeout;
1699 queue_delayed_work(hdev->req_workqueue,
1700 &hdev->adv_instance_expire,
1701 msecs_to_jiffies(timeout * 1000));
1702
1703 /* If we're just re-scheduling the same instance again then do not
1704 * execute any HCI commands. This happens when a single instance is
1705 * being advertised.
1706 */
1707 if (!force && hdev->cur_adv_instance == instance &&
1708 hci_dev_test_flag(hdev, HCI_LE_ADV))
1709 return 0;
1710
1711 hdev->cur_adv_instance = instance;
de181e88
JK
1712 if (ext_adv_capable(hdev)) {
1713 __hci_req_start_ext_adv(req, instance);
1714 } else {
1715 __hci_req_update_adv_data(req, instance);
1716 __hci_req_update_scan_rsp_data(req, instance);
1717 __hci_req_enable_advertising(req);
1718 }
f2252570
JH
1719
1720 return 0;
1721}
1722
1723static void cancel_adv_timeout(struct hci_dev *hdev)
1724{
1725 if (hdev->adv_instance_timeout) {
1726 hdev->adv_instance_timeout = 0;
1727 cancel_delayed_work(&hdev->adv_instance_expire);
1728 }
1729}
1730
1731/* For a single instance:
1732 * - force == true: The instance will be removed even when its remaining
1733 * lifetime is not zero.
1734 * - force == false: the instance will be deactivated but kept stored unless
1735 * the remaining lifetime is zero.
1736 *
1737 * For instance == 0x00:
1738 * - force == true: All instances will be removed regardless of their timeout
1739 * setting.
1740 * - force == false: Only instances that have a timeout will be removed.
1741 */
37d3a1fa
JH
1742void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1743 struct hci_request *req, u8 instance,
1744 bool force)
f2252570
JH
1745{
1746 struct adv_info *adv_instance, *n, *next_instance = NULL;
1747 int err;
1748 u8 rem_inst;
1749
1750 /* Cancel any timeout concerning the removed instance(s). */
1751 if (!instance || hdev->cur_adv_instance == instance)
1752 cancel_adv_timeout(hdev);
1753
1754 /* Get the next instance to advertise BEFORE we remove
1755 * the current one. This can be the same instance again
1756 * if there is only one instance.
1757 */
1758 if (instance && hdev->cur_adv_instance == instance)
1759 next_instance = hci_get_next_instance(hdev, instance);
1760
1761 if (instance == 0x00) {
1762 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1763 list) {
1764 if (!(force || adv_instance->timeout))
1765 continue;
1766
1767 rem_inst = adv_instance->instance;
1768 err = hci_remove_adv_instance(hdev, rem_inst);
1769 if (!err)
37d3a1fa 1770 mgmt_advertising_removed(sk, hdev, rem_inst);
f2252570 1771 }
f2252570
JH
1772 } else {
1773 adv_instance = hci_find_adv_instance(hdev, instance);
1774
1775 if (force || (adv_instance && adv_instance->timeout &&
1776 !adv_instance->remaining_time)) {
1777 /* Don't advertise a removed instance. */
1778 if (next_instance &&
1779 next_instance->instance == instance)
1780 next_instance = NULL;
1781
1782 err = hci_remove_adv_instance(hdev, instance);
1783 if (!err)
37d3a1fa 1784 mgmt_advertising_removed(sk, hdev, instance);
f2252570
JH
1785 }
1786 }
1787
f2252570
JH
1788 if (!req || !hdev_is_powered(hdev) ||
1789 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1790 return;
1791
1792 if (next_instance)
1793 __hci_req_schedule_adv_instance(req, next_instance->instance,
1794 false);
1795}
1796
0857dd3b
JH
1797static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1798{
1799 struct hci_dev *hdev = req->hdev;
1800
1801 /* If we're advertising or initiating an LE connection we can't
1802 * go ahead and change the random address at this time. This is
1803 * because the eventual initiator address used for the
1804 * subsequently created connection will be undefined (some
1805 * controllers use the new address and others the one we had
1806 * when the operation started).
1807 *
1808 * In this kind of scenario skip the update and let the random
1809 * address be updated at the next cycle.
1810 */
d7a5a11d 1811 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
e7d9ab73 1812 hci_lookup_le_connect(hdev)) {
0857dd3b 1813 BT_DBG("Deferring random address update");
a1536da2 1814 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
0857dd3b
JH
1815 return;
1816 }
1817
1818 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1819}
1820
1821int hci_update_random_address(struct hci_request *req, bool require_privacy,
82a37ade 1822 bool use_rpa, u8 *own_addr_type)
0857dd3b
JH
1823{
1824 struct hci_dev *hdev = req->hdev;
1825 int err;
1826
1827 /* If privacy is enabled use a resolvable private address. If
1828 * current RPA has expired or there is something else than
1829 * the current RPA in use, then generate a new one.
1830 */
82a37ade 1831 if (use_rpa) {
0857dd3b
JH
1832 int to;
1833
1834 *own_addr_type = ADDR_LE_DEV_RANDOM;
1835
a69d8927 1836 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
0857dd3b
JH
1837 !bacmp(&hdev->random_addr, &hdev->rpa))
1838 return 0;
1839
1840 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1841 if (err < 0) {
2064ee33 1842 bt_dev_err(hdev, "failed to generate new RPA");
0857dd3b
JH
1843 return err;
1844 }
1845
1846 set_random_addr(req, &hdev->rpa);
1847
1848 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1849 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1850
1851 return 0;
1852 }
1853
1854 /* In case of required privacy without resolvable private address,
1855 * use an non-resolvable private address. This is useful for active
1856 * scanning and non-connectable advertising.
1857 */
1858 if (require_privacy) {
1859 bdaddr_t nrpa;
1860
1861 while (true) {
1862 /* The non-resolvable private address is generated
1863 * from random six bytes with the two most significant
1864 * bits cleared.
1865 */
1866 get_random_bytes(&nrpa, 6);
1867 nrpa.b[5] &= 0x3f;
1868
1869 /* The non-resolvable private address shall not be
1870 * equal to the public address.
1871 */
1872 if (bacmp(&hdev->bdaddr, &nrpa))
1873 break;
1874 }
1875
1876 *own_addr_type = ADDR_LE_DEV_RANDOM;
1877 set_random_addr(req, &nrpa);
1878 return 0;
1879 }
1880
1881 /* If forcing static address is in use or there is no public
1882 * address use the static address as random address (but skip
1883 * the HCI command if the current random address is already the
1884 * static one.
50b5b952
MH
1885 *
1886 * In case BR/EDR has been disabled on a dual-mode controller
1887 * and a static address has been configured, then use that
1888 * address instead of the public BR/EDR address.
0857dd3b 1889 */
b7cb93e5 1890 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 1891 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 1892 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 1893 bacmp(&hdev->static_addr, BDADDR_ANY))) {
0857dd3b
JH
1894 *own_addr_type = ADDR_LE_DEV_RANDOM;
1895 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1896 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1897 &hdev->static_addr);
1898 return 0;
1899 }
1900
1901 /* Neither privacy nor static address is being used so use a
1902 * public address.
1903 */
1904 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1905
1906 return 0;
1907}
2cf22218 1908
405a2611
JH
1909static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1910{
1911 struct bdaddr_list *b;
1912
1913 list_for_each_entry(b, &hdev->whitelist, list) {
1914 struct hci_conn *conn;
1915
1916 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1917 if (!conn)
1918 return true;
1919
1920 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1921 return true;
1922 }
1923
1924 return false;
1925}
1926
01b1cb87 1927void __hci_req_update_scan(struct hci_request *req)
405a2611
JH
1928{
1929 struct hci_dev *hdev = req->hdev;
1930 u8 scan;
1931
d7a5a11d 1932 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
405a2611
JH
1933 return;
1934
1935 if (!hdev_is_powered(hdev))
1936 return;
1937
1938 if (mgmt_powering_down(hdev))
1939 return;
1940
d7a5a11d 1941 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
405a2611
JH
1942 disconnected_whitelist_entries(hdev))
1943 scan = SCAN_PAGE;
1944 else
1945 scan = SCAN_DISABLED;
1946
d7a5a11d 1947 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
405a2611
JH
1948 scan |= SCAN_INQUIRY;
1949
01b1cb87
JH
1950 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1951 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1952 return;
1953
405a2611
JH
1954 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1955}
1956
01b1cb87 1957static int update_scan(struct hci_request *req, unsigned long opt)
405a2611 1958{
01b1cb87
JH
1959 hci_dev_lock(req->hdev);
1960 __hci_req_update_scan(req);
1961 hci_dev_unlock(req->hdev);
1962 return 0;
1963}
405a2611 1964
01b1cb87
JH
1965static void scan_update_work(struct work_struct *work)
1966{
1967 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1968
1969 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
405a2611
JH
1970}
1971
53c0ba74
JH
1972static int connectable_update(struct hci_request *req, unsigned long opt)
1973{
1974 struct hci_dev *hdev = req->hdev;
1975
1976 hci_dev_lock(hdev);
1977
1978 __hci_req_update_scan(req);
1979
1980 /* If BR/EDR is not enabled and we disable advertising as a
1981 * by-product of disabling connectable, we need to update the
1982 * advertising flags.
1983 */
1984 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
cab054ab 1985 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
53c0ba74
JH
1986
1987 /* Update the advertising parameters if necessary */
1988 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
de181e88
JK
1989 !list_empty(&hdev->adv_instances)) {
1990 if (ext_adv_capable(hdev))
1991 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
1992 else
1993 __hci_req_enable_advertising(req);
1994 }
53c0ba74
JH
1995
1996 __hci_update_background_scan(req);
1997
1998 hci_dev_unlock(hdev);
1999
2000 return 0;
2001}
2002
2003static void connectable_update_work(struct work_struct *work)
2004{
2005 struct hci_dev *hdev = container_of(work, struct hci_dev,
2006 connectable_update);
2007 u8 status;
2008
2009 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2010 mgmt_set_connectable_complete(hdev, status);
2011}
2012
14bf5eac
JH
2013static u8 get_service_classes(struct hci_dev *hdev)
2014{
2015 struct bt_uuid *uuid;
2016 u8 val = 0;
2017
2018 list_for_each_entry(uuid, &hdev->uuids, list)
2019 val |= uuid->svc_hint;
2020
2021 return val;
2022}
2023
2024void __hci_req_update_class(struct hci_request *req)
2025{
2026 struct hci_dev *hdev = req->hdev;
2027 u8 cod[3];
2028
2029 BT_DBG("%s", hdev->name);
2030
2031 if (!hdev_is_powered(hdev))
2032 return;
2033
2034 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2035 return;
2036
2037 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2038 return;
2039
2040 cod[0] = hdev->minor_class;
2041 cod[1] = hdev->major_class;
2042 cod[2] = get_service_classes(hdev);
2043
2044 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2045 cod[1] |= 0x20;
2046
2047 if (memcmp(cod, hdev->dev_class, 3) == 0)
2048 return;
2049
2050 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2051}
2052
aed1a885
JH
2053static void write_iac(struct hci_request *req)
2054{
2055 struct hci_dev *hdev = req->hdev;
2056 struct hci_cp_write_current_iac_lap cp;
2057
2058 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2059 return;
2060
2061 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2062 /* Limited discoverable mode */
2063 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2064 cp.iac_lap[0] = 0x00; /* LIAC */
2065 cp.iac_lap[1] = 0x8b;
2066 cp.iac_lap[2] = 0x9e;
2067 cp.iac_lap[3] = 0x33; /* GIAC */
2068 cp.iac_lap[4] = 0x8b;
2069 cp.iac_lap[5] = 0x9e;
2070 } else {
2071 /* General discoverable mode */
2072 cp.num_iac = 1;
2073 cp.iac_lap[0] = 0x33; /* GIAC */
2074 cp.iac_lap[1] = 0x8b;
2075 cp.iac_lap[2] = 0x9e;
2076 }
2077
2078 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2079 (cp.num_iac * 3) + 1, &cp);
2080}
2081
2082static int discoverable_update(struct hci_request *req, unsigned long opt)
2083{
2084 struct hci_dev *hdev = req->hdev;
2085
2086 hci_dev_lock(hdev);
2087
2088 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2089 write_iac(req);
2090 __hci_req_update_scan(req);
2091 __hci_req_update_class(req);
2092 }
2093
2094 /* Advertising instances don't use the global discoverable setting, so
2095 * only update AD if advertising was enabled using Set Advertising.
2096 */
82a37ade 2097 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
cab054ab 2098 __hci_req_update_adv_data(req, 0x00);
aed1a885 2099
82a37ade
JH
2100 /* Discoverable mode affects the local advertising
2101 * address in limited privacy mode.
2102 */
de181e88
JK
2103 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2104 if (ext_adv_capable(hdev))
2105 __hci_req_start_ext_adv(req, 0x00);
2106 else
2107 __hci_req_enable_advertising(req);
2108 }
82a37ade
JH
2109 }
2110
aed1a885
JH
2111 hci_dev_unlock(hdev);
2112
2113 return 0;
2114}
2115
2116static void discoverable_update_work(struct work_struct *work)
2117{
2118 struct hci_dev *hdev = container_of(work, struct hci_dev,
2119 discoverable_update);
2120 u8 status;
2121
2122 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2123 mgmt_set_discoverable_complete(hdev, status);
2124}
2125
dcc0f0d9
JH
2126void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2127 u8 reason)
2128{
2129 switch (conn->state) {
2130 case BT_CONNECTED:
2131 case BT_CONFIG:
2132 if (conn->type == AMP_LINK) {
2133 struct hci_cp_disconn_phy_link cp;
2134
2135 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2136 cp.reason = reason;
2137 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2138 &cp);
2139 } else {
2140 struct hci_cp_disconnect dc;
2141
2142 dc.handle = cpu_to_le16(conn->handle);
2143 dc.reason = reason;
2144 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2145 }
2146
2147 conn->state = BT_DISCONN;
2148
2149 break;
2150 case BT_CONNECT:
2151 if (conn->type == LE_LINK) {
2152 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2153 break;
2154 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2155 0, NULL);
2156 } else if (conn->type == ACL_LINK) {
2157 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2158 break;
2159 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2160 6, &conn->dst);
2161 }
2162 break;
2163 case BT_CONNECT2:
2164 if (conn->type == ACL_LINK) {
2165 struct hci_cp_reject_conn_req rej;
2166
2167 bacpy(&rej.bdaddr, &conn->dst);
2168 rej.reason = reason;
2169
2170 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2171 sizeof(rej), &rej);
2172 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2173 struct hci_cp_reject_sync_conn_req rej;
2174
2175 bacpy(&rej.bdaddr, &conn->dst);
2176
2177 /* SCO rejection has its own limited set of
2178 * allowed error values (0x0D-0x0F) which isn't
2179 * compatible with most values passed to this
2180 * function. To be safe hard-code one of the
2181 * values that's suitable for SCO.
2182 */
3c0975a7 2183 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
dcc0f0d9
JH
2184
2185 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2186 sizeof(rej), &rej);
2187 }
2188 break;
2189 default:
2190 conn->state = BT_CLOSED;
2191 break;
2192 }
2193}
2194
2195static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2196{
2197 if (status)
2198 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2199}
2200
2201int hci_abort_conn(struct hci_conn *conn, u8 reason)
2202{
2203 struct hci_request req;
2204 int err;
2205
2206 hci_req_init(&req, conn->hdev);
2207
2208 __hci_abort_conn(&req, conn, reason);
2209
2210 err = hci_req_run(&req, abort_conn_complete);
2211 if (err && err != -ENODATA) {
2064ee33 2212 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
dcc0f0d9
JH
2213 return err;
2214 }
2215
2216 return 0;
2217}
5fc16cc4 2218
a1d01db1 2219static int update_bg_scan(struct hci_request *req, unsigned long opt)
2e93e53b
JH
2220{
2221 hci_dev_lock(req->hdev);
2222 __hci_update_background_scan(req);
2223 hci_dev_unlock(req->hdev);
a1d01db1 2224 return 0;
2e93e53b
JH
2225}
2226
2227static void bg_scan_update(struct work_struct *work)
2228{
2229 struct hci_dev *hdev = container_of(work, struct hci_dev,
2230 bg_scan_update);
84235d22
JH
2231 struct hci_conn *conn;
2232 u8 status;
2233 int err;
2234
2235 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2236 if (!err)
2237 return;
2238
2239 hci_dev_lock(hdev);
2240
2241 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2242 if (conn)
2243 hci_le_conn_failed(conn, status);
2e93e53b 2244
84235d22 2245 hci_dev_unlock(hdev);
2e93e53b
JH
2246}
2247
f4a2cb4d 2248static int le_scan_disable(struct hci_request *req, unsigned long opt)
7c1fbed2 2249{
f4a2cb4d
JH
2250 hci_req_add_le_scan_disable(req);
2251 return 0;
7c1fbed2
JH
2252}
2253
f4a2cb4d 2254static int bredr_inquiry(struct hci_request *req, unsigned long opt)
7c1fbed2 2255{
f4a2cb4d 2256 u8 length = opt;
78b781ca
JH
2257 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2258 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
7c1fbed2 2259 struct hci_cp_inquiry cp;
7c1fbed2 2260
f4a2cb4d 2261 BT_DBG("%s", req->hdev->name);
7c1fbed2 2262
f4a2cb4d
JH
2263 hci_dev_lock(req->hdev);
2264 hci_inquiry_cache_flush(req->hdev);
2265 hci_dev_unlock(req->hdev);
7c1fbed2 2266
f4a2cb4d 2267 memset(&cp, 0, sizeof(cp));
78b781ca
JH
2268
2269 if (req->hdev->discovery.limited)
2270 memcpy(&cp.lap, liac, sizeof(cp.lap));
2271 else
2272 memcpy(&cp.lap, giac, sizeof(cp.lap));
2273
f4a2cb4d 2274 cp.length = length;
7c1fbed2 2275
f4a2cb4d 2276 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7c1fbed2 2277
a1d01db1 2278 return 0;
7c1fbed2
JH
2279}
2280
2281static void le_scan_disable_work(struct work_struct *work)
2282{
2283 struct hci_dev *hdev = container_of(work, struct hci_dev,
2284 le_scan_disable.work);
2285 u8 status;
7c1fbed2
JH
2286
2287 BT_DBG("%s", hdev->name);
2288
f4a2cb4d
JH
2289 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2290 return;
2291
7c1fbed2
JH
2292 cancel_delayed_work(&hdev->le_scan_restart);
2293
f4a2cb4d
JH
2294 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2295 if (status) {
2064ee33
MH
2296 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2297 status);
f4a2cb4d
JH
2298 return;
2299 }
2300
2301 hdev->discovery.scan_start = 0;
2302
2303 /* If we were running LE only scan, change discovery state. If
2304 * we were running both LE and BR/EDR inquiry simultaneously,
2305 * and BR/EDR inquiry is already finished, stop discovery,
2306 * otherwise BR/EDR inquiry will stop discovery when finished.
2307 * If we will resolve remote device name, do not change
2308 * discovery state.
2309 */
2310
2311 if (hdev->discovery.type == DISCOV_TYPE_LE)
2312 goto discov_stopped;
2313
2314 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
7c1fbed2
JH
2315 return;
2316
f4a2cb4d
JH
2317 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2318 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2319 hdev->discovery.state != DISCOVERY_RESOLVING)
2320 goto discov_stopped;
2321
2322 return;
2323 }
2324
2325 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2326 HCI_CMD_TIMEOUT, &status);
2327 if (status) {
2064ee33 2328 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
f4a2cb4d
JH
2329 goto discov_stopped;
2330 }
2331
2332 return;
2333
2334discov_stopped:
2335 hci_dev_lock(hdev);
2336 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2337 hci_dev_unlock(hdev);
7c1fbed2
JH
2338}
2339
3dfe5905
JH
2340static int le_scan_restart(struct hci_request *req, unsigned long opt)
2341{
2342 struct hci_dev *hdev = req->hdev;
3dfe5905
JH
2343
2344 /* If controller is not scanning we are done. */
2345 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2346 return 0;
2347
2348 hci_req_add_le_scan_disable(req);
2349
a2344b9e
JK
2350 if (use_ext_scan(hdev)) {
2351 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2352
2353 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2354 ext_enable_cp.enable = LE_SCAN_ENABLE;
2355 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2356
2357 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2358 sizeof(ext_enable_cp), &ext_enable_cp);
2359 } else {
2360 struct hci_cp_le_set_scan_enable cp;
2361
2362 memset(&cp, 0, sizeof(cp));
2363 cp.enable = LE_SCAN_ENABLE;
2364 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2365 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2366 }
3dfe5905
JH
2367
2368 return 0;
2369}
2370
2371static void le_scan_restart_work(struct work_struct *work)
7c1fbed2 2372{
3dfe5905
JH
2373 struct hci_dev *hdev = container_of(work, struct hci_dev,
2374 le_scan_restart.work);
7c1fbed2 2375 unsigned long timeout, duration, scan_start, now;
3dfe5905 2376 u8 status;
7c1fbed2
JH
2377
2378 BT_DBG("%s", hdev->name);
2379
3dfe5905 2380 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
7c1fbed2 2381 if (status) {
2064ee33
MH
2382 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2383 status);
7c1fbed2
JH
2384 return;
2385 }
2386
2387 hci_dev_lock(hdev);
2388
2389 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2390 !hdev->discovery.scan_start)
2391 goto unlock;
2392
2393 /* When the scan was started, hdev->le_scan_disable has been queued
2394 * after duration from scan_start. During scan restart this job
2395 * has been canceled, and we need to queue it again after proper
2396 * timeout, to make sure that scan does not run indefinitely.
2397 */
2398 duration = hdev->discovery.scan_duration;
2399 scan_start = hdev->discovery.scan_start;
2400 now = jiffies;
2401 if (now - scan_start <= duration) {
2402 int elapsed;
2403
2404 if (now >= scan_start)
2405 elapsed = now - scan_start;
2406 else
2407 elapsed = ULONG_MAX - scan_start + now;
2408
2409 timeout = duration - elapsed;
2410 } else {
2411 timeout = 0;
2412 }
2413
2414 queue_delayed_work(hdev->req_workqueue,
2415 &hdev->le_scan_disable, timeout);
2416
2417unlock:
2418 hci_dev_unlock(hdev);
2419}
2420
e68f072b
JH
2421static int active_scan(struct hci_request *req, unsigned long opt)
2422{
2423 uint16_t interval = opt;
2424 struct hci_dev *hdev = req->hdev;
e68f072b
JH
2425 u8 own_addr_type;
2426 int err;
2427
2428 BT_DBG("%s", hdev->name);
2429
2430 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2431 hci_dev_lock(hdev);
2432
2433 /* Don't let discovery abort an outgoing connection attempt
2434 * that's using directed advertising.
2435 */
2436 if (hci_lookup_le_connect(hdev)) {
2437 hci_dev_unlock(hdev);
2438 return -EBUSY;
2439 }
2440
2441 cancel_adv_timeout(hdev);
2442 hci_dev_unlock(hdev);
2443
94386b6a 2444 __hci_req_disable_advertising(req);
e68f072b
JH
2445 }
2446
2447 /* If controller is scanning, it means the background scanning is
2448 * running. Thus, we should temporarily stop it in order to set the
2449 * discovery scanning parameters.
2450 */
2451 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2452 hci_req_add_le_scan_disable(req);
2453
2454 /* All active scans will be done with either a resolvable private
2455 * address (when privacy feature has been enabled) or non-resolvable
2456 * private address.
2457 */
82a37ade
JH
2458 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2459 &own_addr_type);
e68f072b
JH
2460 if (err < 0)
2461 own_addr_type = ADDR_LE_DEV_PUBLIC;
2462
3baef810
JK
2463 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2464 own_addr_type, 0);
e68f072b
JH
2465 return 0;
2466}
2467
2468static int interleaved_discov(struct hci_request *req, unsigned long opt)
2469{
2470 int err;
2471
2472 BT_DBG("%s", req->hdev->name);
2473
2474 err = active_scan(req, opt);
2475 if (err)
2476 return err;
2477
7df26b56 2478 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
e68f072b
JH
2479}
2480
2481static void start_discovery(struct hci_dev *hdev, u8 *status)
2482{
2483 unsigned long timeout;
2484
2485 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2486
2487 switch (hdev->discovery.type) {
2488 case DISCOV_TYPE_BREDR:
2489 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
7df26b56
JH
2490 hci_req_sync(hdev, bredr_inquiry,
2491 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
e68f072b
JH
2492 status);
2493 return;
2494 case DISCOV_TYPE_INTERLEAVED:
2495 /* When running simultaneous discovery, the LE scanning time
2496 * should occupy the whole discovery time sine BR/EDR inquiry
2497 * and LE scanning are scheduled by the controller.
2498 *
2499 * For interleaving discovery in comparison, BR/EDR inquiry
2500 * and LE scanning are done sequentially with separate
2501 * timeouts.
2502 */
2503 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2504 &hdev->quirks)) {
2505 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2506 /* During simultaneous discovery, we double LE scan
2507 * interval. We must leave some time for the controller
2508 * to do BR/EDR inquiry.
2509 */
2510 hci_req_sync(hdev, interleaved_discov,
2511 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2512 status);
2513 break;
2514 }
2515
2516 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2517 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2518 HCI_CMD_TIMEOUT, status);
2519 break;
2520 case DISCOV_TYPE_LE:
2521 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2522 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2523 HCI_CMD_TIMEOUT, status);
2524 break;
2525 default:
2526 *status = HCI_ERROR_UNSPECIFIED;
2527 return;
2528 }
2529
2530 if (*status)
2531 return;
2532
2533 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2534
2535 /* When service discovery is used and the controller has a
2536 * strict duplicate filter, it is important to remember the
2537 * start and duration of the scan. This is required for
2538 * restarting scanning during the discovery phase.
2539 */
2540 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2541 hdev->discovery.result_filtering) {
2542 hdev->discovery.scan_start = jiffies;
2543 hdev->discovery.scan_duration = timeout;
2544 }
2545
2546 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2547 timeout);
2548}
2549
2154d3f4
JH
2550bool hci_req_stop_discovery(struct hci_request *req)
2551{
2552 struct hci_dev *hdev = req->hdev;
2553 struct discovery_state *d = &hdev->discovery;
2554 struct hci_cp_remote_name_req_cancel cp;
2555 struct inquiry_entry *e;
2556 bool ret = false;
2557
2558 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2559
2560 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2561 if (test_bit(HCI_INQUIRY, &hdev->flags))
2562 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2563
2564 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2565 cancel_delayed_work(&hdev->le_scan_disable);
2566 hci_req_add_le_scan_disable(req);
2567 }
2568
2569 ret = true;
2570 } else {
2571 /* Passive scanning */
2572 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2573 hci_req_add_le_scan_disable(req);
2574 ret = true;
2575 }
2576 }
2577
2578 /* No further actions needed for LE-only discovery */
2579 if (d->type == DISCOV_TYPE_LE)
2580 return ret;
2581
2582 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2583 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2584 NAME_PENDING);
2585 if (!e)
2586 return ret;
2587
2588 bacpy(&cp.bdaddr, &e->data.bdaddr);
2589 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2590 &cp);
2591 ret = true;
2592 }
2593
2594 return ret;
2595}
2596
2597static int stop_discovery(struct hci_request *req, unsigned long opt)
2598{
2599 hci_dev_lock(req->hdev);
2600 hci_req_stop_discovery(req);
2601 hci_dev_unlock(req->hdev);
2602
2603 return 0;
2604}
2605
e68f072b
JH
2606static void discov_update(struct work_struct *work)
2607{
2608 struct hci_dev *hdev = container_of(work, struct hci_dev,
2609 discov_update);
2610 u8 status = 0;
2611
2612 switch (hdev->discovery.state) {
2613 case DISCOVERY_STARTING:
2614 start_discovery(hdev, &status);
2615 mgmt_start_discovery_complete(hdev, status);
2616 if (status)
2617 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2618 else
2619 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2620 break;
2154d3f4
JH
2621 case DISCOVERY_STOPPING:
2622 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2623 mgmt_stop_discovery_complete(hdev, status);
2624 if (!status)
2625 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2626 break;
e68f072b
JH
2627 case DISCOVERY_STOPPED:
2628 default:
2629 return;
2630 }
2631}
2632
c366f555
JH
2633static void discov_off(struct work_struct *work)
2634{
2635 struct hci_dev *hdev = container_of(work, struct hci_dev,
2636 discov_off.work);
2637
2638 BT_DBG("%s", hdev->name);
2639
2640 hci_dev_lock(hdev);
2641
2642 /* When discoverable timeout triggers, then just make sure
2643 * the limited discoverable flag is cleared. Even in the case
2644 * of a timeout triggered from general discoverable, it is
2645 * safe to unconditionally clear the flag.
2646 */
2647 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2648 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2649 hdev->discov_timeout = 0;
2650
2651 hci_dev_unlock(hdev);
2652
2653 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2654 mgmt_new_settings(hdev);
2655}
2656
2ff13894
JH
2657static int powered_update_hci(struct hci_request *req, unsigned long opt)
2658{
2659 struct hci_dev *hdev = req->hdev;
2ff13894
JH
2660 u8 link_sec;
2661
2662 hci_dev_lock(hdev);
2663
2664 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2665 !lmp_host_ssp_capable(hdev)) {
2666 u8 mode = 0x01;
2667
2668 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2669
2670 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2671 u8 support = 0x01;
2672
2673 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2674 sizeof(support), &support);
2675 }
2676 }
2677
2678 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2679 lmp_bredr_capable(hdev)) {
2680 struct hci_cp_write_le_host_supported cp;
2681
2682 cp.le = 0x01;
2683 cp.simul = 0x00;
2684
2685 /* Check first if we already have the right
2686 * host state (host features set)
2687 */
2688 if (cp.le != lmp_host_le_capable(hdev) ||
2689 cp.simul != lmp_host_le_br_capable(hdev))
2690 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2691 sizeof(cp), &cp);
2692 }
2693
d6b7e2cd 2694 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2ff13894
JH
2695 /* Make sure the controller has a good default for
2696 * advertising data. This also applies to the case
2697 * where BR/EDR was toggled during the AUTO_OFF phase.
2698 */
d6b7e2cd
JH
2699 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2700 list_empty(&hdev->adv_instances)) {
a0fb3726
JK
2701 int err;
2702
2703 if (ext_adv_capable(hdev)) {
2704 err = __hci_req_setup_ext_adv_instance(req,
2705 0x00);
2706 if (!err)
2707 __hci_req_update_scan_rsp_data(req,
2708 0x00);
2709 } else {
2710 err = 0;
2711 __hci_req_update_adv_data(req, 0x00);
2712 __hci_req_update_scan_rsp_data(req, 0x00);
2713 }
d6b7e2cd 2714
de181e88 2715 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
a0fb3726 2716 if (!ext_adv_capable(hdev))
de181e88 2717 __hci_req_enable_advertising(req);
a0fb3726
JK
2718 else if (!err)
2719 __hci_req_enable_ext_advertising(req);
de181e88 2720 }
d6b7e2cd
JH
2721 } else if (!list_empty(&hdev->adv_instances)) {
2722 struct adv_info *adv_instance;
2ff13894 2723
2ff13894
JH
2724 adv_instance = list_first_entry(&hdev->adv_instances,
2725 struct adv_info, list);
2ff13894 2726 __hci_req_schedule_adv_instance(req,
d6b7e2cd 2727 adv_instance->instance,
2ff13894 2728 true);
d6b7e2cd 2729 }
2ff13894
JH
2730 }
2731
2732 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2733 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2734 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2735 sizeof(link_sec), &link_sec);
2736
2737 if (lmp_bredr_capable(hdev)) {
2738 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2739 __hci_req_write_fast_connectable(req, true);
2740 else
2741 __hci_req_write_fast_connectable(req, false);
2742 __hci_req_update_scan(req);
2743 __hci_req_update_class(req);
2744 __hci_req_update_name(req);
2745 __hci_req_update_eir(req);
2746 }
2747
2748 hci_dev_unlock(hdev);
2749 return 0;
2750}
2751
2752int __hci_req_hci_power_on(struct hci_dev *hdev)
2753{
2754 /* Register the available SMP channels (BR/EDR and LE) only when
2755 * successfully powering on the controller. This late
2756 * registration is required so that LE SMP can clearly decide if
2757 * the public address or static address is used.
2758 */
2759 smp_register(hdev);
2760
2761 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2762 NULL);
2763}
2764
5fc16cc4
JH
2765void hci_request_setup(struct hci_dev *hdev)
2766{
e68f072b 2767 INIT_WORK(&hdev->discov_update, discov_update);
2e93e53b 2768 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
01b1cb87 2769 INIT_WORK(&hdev->scan_update, scan_update_work);
53c0ba74 2770 INIT_WORK(&hdev->connectable_update, connectable_update_work);
aed1a885 2771 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
c366f555 2772 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
7c1fbed2
JH
2773 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2774 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
f2252570 2775 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
5fc16cc4
JH
2776}
2777
2778void hci_request_cancel_all(struct hci_dev *hdev)
2779{
7df0f73e
JH
2780 hci_req_sync_cancel(hdev, ENODEV);
2781
e68f072b 2782 cancel_work_sync(&hdev->discov_update);
2e93e53b 2783 cancel_work_sync(&hdev->bg_scan_update);
01b1cb87 2784 cancel_work_sync(&hdev->scan_update);
53c0ba74 2785 cancel_work_sync(&hdev->connectable_update);
aed1a885 2786 cancel_work_sync(&hdev->discoverable_update);
c366f555 2787 cancel_delayed_work_sync(&hdev->discov_off);
7c1fbed2
JH
2788 cancel_delayed_work_sync(&hdev->le_scan_disable);
2789 cancel_delayed_work_sync(&hdev->le_scan_restart);
f2252570
JH
2790
2791 if (hdev->adv_instance_timeout) {
2792 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2793 hdev->adv_instance_timeout = 0;
2794 }
5fc16cc4 2795}