Bluetooth: Let controller creates RPA during le create conn
[linux-2.6-block.git] / net / bluetooth / hci_request.c
CommitLineData
0857dd3b
JH
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
174cd4b1
IM
24#include <linux/sched/signal.h>
25
0857dd3b
JH
26#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
f2252570 28#include <net/bluetooth/mgmt.h>
0857dd3b
JH
29
30#include "smp.h"
31#include "hci_request.h"
32
be91cd05
JH
33#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
0857dd3b
JH
37void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
f17d858e
JK
44void hci_req_purge(struct hci_request *req)
45{
46 skb_queue_purge(&req->cmd_q);
47}
48
f80c5dad
JPRV
49bool hci_req_status_pend(struct hci_dev *hdev)
50{
51 return hdev->req_status == HCI_REQ_PEND;
52}
53
e6214487
JH
54static int req_run(struct hci_request *req, hci_req_complete_t complete,
55 hci_req_complete_skb_t complete_skb)
0857dd3b
JH
56{
57 struct hci_dev *hdev = req->hdev;
58 struct sk_buff *skb;
59 unsigned long flags;
60
61 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
62
63 /* If an error occurred during request building, remove all HCI
64 * commands queued on the HCI request queue.
65 */
66 if (req->err) {
67 skb_queue_purge(&req->cmd_q);
68 return req->err;
69 }
70
71 /* Do not allow empty requests */
72 if (skb_queue_empty(&req->cmd_q))
73 return -ENODATA;
74
75 skb = skb_peek_tail(&req->cmd_q);
44d27137
JH
76 if (complete) {
77 bt_cb(skb)->hci.req_complete = complete;
78 } else if (complete_skb) {
79 bt_cb(skb)->hci.req_complete_skb = complete_skb;
80 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 }
0857dd3b
JH
82
83 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
84 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
85 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
86
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88
89 return 0;
90}
91
e6214487
JH
92int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
93{
94 return req_run(req, complete, NULL);
95}
96
97int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
98{
99 return req_run(req, NULL, complete);
100}
101
be91cd05
JH
102static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 struct sk_buff *skb)
104{
105 BT_DBG("%s result 0x%2.2x", hdev->name, result);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 if (skb)
111 hdev->req_skb = skb_get(skb);
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
b504430c 116void hci_req_sync_cancel(struct hci_dev *hdev, int err)
be91cd05
JH
117{
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125}
126
127struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
128 const void *param, u8 event, u32 timeout)
129{
be91cd05
JH
130 struct hci_request req;
131 struct sk_buff *skb;
132 int err = 0;
133
134 BT_DBG("%s", hdev->name);
135
136 hci_req_init(&req, hdev);
137
138 hci_req_add_ev(&req, opcode, plen, param, event);
139
140 hdev->req_status = HCI_REQ_PEND;
141
be91cd05 142 err = hci_req_run_skb(&req, hci_req_sync_complete);
67d8cee4 143 if (err < 0)
be91cd05 144 return ERR_PTR(err);
be91cd05 145
67d8cee4
JK
146 err = wait_event_interruptible_timeout(hdev->req_wait_q,
147 hdev->req_status != HCI_REQ_PEND, timeout);
be91cd05 148
67d8cee4 149 if (err == -ERESTARTSYS)
be91cd05
JH
150 return ERR_PTR(-EINTR);
151
152 switch (hdev->req_status) {
153 case HCI_REQ_DONE:
154 err = -bt_to_errno(hdev->req_result);
155 break;
156
157 case HCI_REQ_CANCELED:
158 err = -hdev->req_result;
159 break;
160
161 default:
162 err = -ETIMEDOUT;
163 break;
164 }
165
166 hdev->req_status = hdev->req_result = 0;
167 skb = hdev->req_skb;
168 hdev->req_skb = NULL;
169
170 BT_DBG("%s end: err %d", hdev->name, err);
171
172 if (err < 0) {
173 kfree_skb(skb);
174 return ERR_PTR(err);
175 }
176
177 if (!skb)
178 return ERR_PTR(-ENODATA);
179
180 return skb;
181}
182EXPORT_SYMBOL(__hci_cmd_sync_ev);
183
184struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
185 const void *param, u32 timeout)
186{
187 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
188}
189EXPORT_SYMBOL(__hci_cmd_sync);
190
191/* Execute request and wait for completion. */
a1d01db1
JH
192int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
193 unsigned long opt),
4ebeee2d 194 unsigned long opt, u32 timeout, u8 *hci_status)
be91cd05
JH
195{
196 struct hci_request req;
be91cd05
JH
197 int err = 0;
198
199 BT_DBG("%s start", hdev->name);
200
201 hci_req_init(&req, hdev);
202
203 hdev->req_status = HCI_REQ_PEND;
204
a1d01db1
JH
205 err = func(&req, opt);
206 if (err) {
207 if (hci_status)
208 *hci_status = HCI_ERROR_UNSPECIFIED;
209 return err;
210 }
be91cd05 211
be91cd05
JH
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
be91cd05
JH
216 /* ENODATA means the HCI request command queue is empty.
217 * This can happen when a request with conditionals doesn't
218 * trigger any commands to be sent. This is normal behavior
219 * and should not trigger an error return.
220 */
568f44f6
JH
221 if (err == -ENODATA) {
222 if (hci_status)
223 *hci_status = 0;
be91cd05 224 return 0;
568f44f6
JH
225 }
226
227 if (hci_status)
228 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
229
230 return err;
231 }
232
67d8cee4
JK
233 err = wait_event_interruptible_timeout(hdev->req_wait_q,
234 hdev->req_status != HCI_REQ_PEND, timeout);
be91cd05 235
67d8cee4 236 if (err == -ERESTARTSYS)
be91cd05
JH
237 return -EINTR;
238
239 switch (hdev->req_status) {
240 case HCI_REQ_DONE:
241 err = -bt_to_errno(hdev->req_result);
4ebeee2d
JH
242 if (hci_status)
243 *hci_status = hdev->req_result;
be91cd05
JH
244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
4ebeee2d
JH
248 if (hci_status)
249 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
250 break;
251
252 default:
253 err = -ETIMEDOUT;
4ebeee2d
JH
254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
256 break;
257 }
258
9afee949
FD
259 kfree_skb(hdev->req_skb);
260 hdev->req_skb = NULL;
be91cd05
JH
261 hdev->req_status = hdev->req_result = 0;
262
263 BT_DBG("%s end: err %d", hdev->name, err);
264
265 return err;
266}
267
a1d01db1
JH
268int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
269 unsigned long opt),
4ebeee2d 270 unsigned long opt, u32 timeout, u8 *hci_status)
be91cd05
JH
271{
272 int ret;
273
274 if (!test_bit(HCI_UP, &hdev->flags))
275 return -ENETDOWN;
276
277 /* Serialize all requests */
b504430c 278 hci_req_sync_lock(hdev);
4ebeee2d 279 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
b504430c 280 hci_req_sync_unlock(hdev);
be91cd05
JH
281
282 return ret;
283}
284
0857dd3b
JH
285struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
286 const void *param)
287{
288 int len = HCI_COMMAND_HDR_SIZE + plen;
289 struct hci_command_hdr *hdr;
290 struct sk_buff *skb;
291
292 skb = bt_skb_alloc(len, GFP_ATOMIC);
293 if (!skb)
294 return NULL;
295
4df864c1 296 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
0857dd3b
JH
297 hdr->opcode = cpu_to_le16(opcode);
298 hdr->plen = plen;
299
300 if (plen)
59ae1d12 301 skb_put_data(skb, param, plen);
0857dd3b
JH
302
303 BT_DBG("skb len %d", skb->len);
304
d79f34e3
MH
305 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
306 hci_skb_opcode(skb) = opcode;
0857dd3b
JH
307
308 return skb;
309}
310
311/* Queue a command to an asynchronous HCI request */
312void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
313 const void *param, u8 event)
314{
315 struct hci_dev *hdev = req->hdev;
316 struct sk_buff *skb;
317
318 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
319
320 /* If an error occurred during request building, there is no point in
321 * queueing the HCI command. We can simply return.
322 */
323 if (req->err)
324 return;
325
326 skb = hci_prepare_cmd(hdev, opcode, plen, param);
327 if (!skb) {
2064ee33
MH
328 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
329 opcode);
0857dd3b
JH
330 req->err = -ENOMEM;
331 return;
332 }
333
334 if (skb_queue_empty(&req->cmd_q))
44d27137 335 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
0857dd3b 336
242c0ebd 337 bt_cb(skb)->hci.req_event = event;
0857dd3b
JH
338
339 skb_queue_tail(&req->cmd_q, skb);
340}
341
342void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
343 const void *param)
344{
345 hci_req_add_ev(req, opcode, plen, param, 0);
346}
347
bf943cbf
JH
348void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
349{
350 struct hci_dev *hdev = req->hdev;
351 struct hci_cp_write_page_scan_activity acp;
352 u8 type;
353
354 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
355 return;
356
357 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
358 return;
359
360 if (enable) {
361 type = PAGE_SCAN_TYPE_INTERLACED;
362
363 /* 160 msec page scan interval */
364 acp.interval = cpu_to_le16(0x0100);
365 } else {
10873f99
AM
366 type = hdev->def_page_scan_type;
367 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
bf943cbf
JH
368 }
369
10873f99 370 acp.window = cpu_to_le16(hdev->def_page_scan_window);
bf943cbf
JH
371
372 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
373 __cpu_to_le16(hdev->page_scan_window) != acp.window)
374 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
375 sizeof(acp), &acp);
376
377 if (hdev->page_scan_type != type)
378 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
379}
380
196a5e97
JH
381/* This function controls the background scanning based on hdev->pend_le_conns
382 * list. If there are pending LE connection we start the background scanning,
383 * otherwise we stop it.
384 *
385 * This function requires the caller holds hdev->lock.
386 */
387static void __hci_update_background_scan(struct hci_request *req)
388{
389 struct hci_dev *hdev = req->hdev;
390
391 if (!test_bit(HCI_UP, &hdev->flags) ||
392 test_bit(HCI_INIT, &hdev->flags) ||
393 hci_dev_test_flag(hdev, HCI_SETUP) ||
394 hci_dev_test_flag(hdev, HCI_CONFIG) ||
395 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
396 hci_dev_test_flag(hdev, HCI_UNREGISTER))
397 return;
398
399 /* No point in doing scanning if LE support hasn't been enabled */
400 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
401 return;
402
403 /* If discovery is active don't interfere with it */
404 if (hdev->discovery.state != DISCOVERY_STOPPED)
405 return;
406
407 /* Reset RSSI and UUID filters when starting background scanning
408 * since these filters are meant for service discovery only.
409 *
410 * The Start Discovery and Start Service Discovery operations
411 * ensure to set proper values for RSSI threshold and UUID
412 * filter list. So it is safe to just reset them here.
413 */
414 hci_discovery_filter_clear(hdev);
415
8208f5a9
MC
416 BT_DBG("%s ADV monitoring is %s", hdev->name,
417 hci_is_adv_monitoring(hdev) ? "on" : "off");
418
196a5e97 419 if (list_empty(&hdev->pend_le_conns) &&
8208f5a9
MC
420 list_empty(&hdev->pend_le_reports) &&
421 !hci_is_adv_monitoring(hdev)) {
196a5e97 422 /* If there is no pending LE connections or devices
8208f5a9
MC
423 * to be scanned for or no ADV monitors, we should stop the
424 * background scanning.
196a5e97
JH
425 */
426
427 /* If controller is not scanning we are done. */
428 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
429 return;
430
431 hci_req_add_le_scan_disable(req);
432
433 BT_DBG("%s stopping background scanning", hdev->name);
434 } else {
435 /* If there is at least one pending LE connection, we should
436 * keep the background scan running.
437 */
438
439 /* If controller is connecting, we should not start scanning
440 * since some controllers are not able to scan and connect at
441 * the same time.
442 */
443 if (hci_lookup_le_connect(hdev))
444 return;
445
446 /* If controller is currently scanning, we stop it to ensure we
447 * don't miss any advertising (due to duplicates filter).
448 */
449 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
450 hci_req_add_le_scan_disable(req);
451
452 hci_req_add_le_passive_scan(req);
453
454 BT_DBG("%s starting background scanning", hdev->name);
455 }
456}
457
00cf5040
JH
458void __hci_req_update_name(struct hci_request *req)
459{
460 struct hci_dev *hdev = req->hdev;
461 struct hci_cp_write_local_name cp;
462
463 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
464
465 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
466}
467
b1a8917c
JH
468#define PNP_INFO_SVCLASS_ID 0x1200
469
470static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
471{
472 u8 *ptr = data, *uuids_start = NULL;
473 struct bt_uuid *uuid;
474
475 if (len < 4)
476 return ptr;
477
478 list_for_each_entry(uuid, &hdev->uuids, list) {
479 u16 uuid16;
480
481 if (uuid->size != 16)
482 continue;
483
484 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
485 if (uuid16 < 0x1100)
486 continue;
487
488 if (uuid16 == PNP_INFO_SVCLASS_ID)
489 continue;
490
491 if (!uuids_start) {
492 uuids_start = ptr;
493 uuids_start[0] = 1;
494 uuids_start[1] = EIR_UUID16_ALL;
495 ptr += 2;
496 }
497
498 /* Stop if not enough space to put next UUID */
499 if ((ptr - data) + sizeof(u16) > len) {
500 uuids_start[1] = EIR_UUID16_SOME;
501 break;
502 }
503
504 *ptr++ = (uuid16 & 0x00ff);
505 *ptr++ = (uuid16 & 0xff00) >> 8;
506 uuids_start[0] += sizeof(uuid16);
507 }
508
509 return ptr;
510}
511
512static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
513{
514 u8 *ptr = data, *uuids_start = NULL;
515 struct bt_uuid *uuid;
516
517 if (len < 6)
518 return ptr;
519
520 list_for_each_entry(uuid, &hdev->uuids, list) {
521 if (uuid->size != 32)
522 continue;
523
524 if (!uuids_start) {
525 uuids_start = ptr;
526 uuids_start[0] = 1;
527 uuids_start[1] = EIR_UUID32_ALL;
528 ptr += 2;
529 }
530
531 /* Stop if not enough space to put next UUID */
532 if ((ptr - data) + sizeof(u32) > len) {
533 uuids_start[1] = EIR_UUID32_SOME;
534 break;
535 }
536
537 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
538 ptr += sizeof(u32);
539 uuids_start[0] += sizeof(u32);
540 }
541
542 return ptr;
543}
544
545static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
546{
547 u8 *ptr = data, *uuids_start = NULL;
548 struct bt_uuid *uuid;
549
550 if (len < 18)
551 return ptr;
552
553 list_for_each_entry(uuid, &hdev->uuids, list) {
554 if (uuid->size != 128)
555 continue;
556
557 if (!uuids_start) {
558 uuids_start = ptr;
559 uuids_start[0] = 1;
560 uuids_start[1] = EIR_UUID128_ALL;
561 ptr += 2;
562 }
563
564 /* Stop if not enough space to put next UUID */
565 if ((ptr - data) + 16 > len) {
566 uuids_start[1] = EIR_UUID128_SOME;
567 break;
568 }
569
570 memcpy(ptr, uuid->uuid, 16);
571 ptr += 16;
572 uuids_start[0] += 16;
573 }
574
575 return ptr;
576}
577
578static void create_eir(struct hci_dev *hdev, u8 *data)
579{
580 u8 *ptr = data;
581 size_t name_len;
582
583 name_len = strlen(hdev->dev_name);
584
585 if (name_len > 0) {
586 /* EIR Data type */
587 if (name_len > 48) {
588 name_len = 48;
589 ptr[1] = EIR_NAME_SHORT;
590 } else
591 ptr[1] = EIR_NAME_COMPLETE;
592
593 /* EIR Data length */
594 ptr[0] = name_len + 1;
595
596 memcpy(ptr + 2, hdev->dev_name, name_len);
597
598 ptr += (name_len + 2);
599 }
600
601 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
602 ptr[0] = 2;
603 ptr[1] = EIR_TX_POWER;
604 ptr[2] = (u8) hdev->inq_tx_power;
605
606 ptr += 3;
607 }
608
609 if (hdev->devid_source > 0) {
610 ptr[0] = 9;
611 ptr[1] = EIR_DEVICE_ID;
612
613 put_unaligned_le16(hdev->devid_source, ptr + 2);
614 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
615 put_unaligned_le16(hdev->devid_product, ptr + 6);
616 put_unaligned_le16(hdev->devid_version, ptr + 8);
617
618 ptr += 10;
619 }
620
621 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
622 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
623 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624}
625
626void __hci_req_update_eir(struct hci_request *req)
627{
628 struct hci_dev *hdev = req->hdev;
629 struct hci_cp_write_eir cp;
630
631 if (!hdev_is_powered(hdev))
632 return;
633
634 if (!lmp_ext_inq_capable(hdev))
635 return;
636
637 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
638 return;
639
640 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
641 return;
642
643 memset(&cp, 0, sizeof(cp));
644
645 create_eir(hdev, cp.data);
646
647 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
648 return;
649
650 memcpy(hdev->eir, cp.data, sizeof(cp.data));
651
652 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
653}
654
0857dd3b
JH
655void hci_req_add_le_scan_disable(struct hci_request *req)
656{
a2344b9e 657 struct hci_dev *hdev = req->hdev;
0857dd3b 658
dd522a74
APS
659 if (hdev->scanning_paused) {
660 bt_dev_dbg(hdev, "Scanning is paused for suspend");
661 return;
662 }
663
a2344b9e
JK
664 if (use_ext_scan(hdev)) {
665 struct hci_cp_le_set_ext_scan_enable cp;
666
667 memset(&cp, 0, sizeof(cp));
668 cp.enable = LE_SCAN_DISABLE;
669 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
670 &cp);
671 } else {
672 struct hci_cp_le_set_scan_enable cp;
673
674 memset(&cp, 0, sizeof(cp));
675 cp.enable = LE_SCAN_DISABLE;
676 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
677 }
e1d57235
MH
678
679 if (use_ll_privacy(hdev) &&
680 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
681 __u8 enable = 0x00;
682 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
683 }
0857dd3b
JH
684}
685
dd522a74
APS
686static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
687 u8 bdaddr_type)
688{
689 struct hci_cp_le_del_from_white_list cp;
690
691 cp.bdaddr_type = bdaddr_type;
692 bacpy(&cp.bdaddr, bdaddr);
693
694 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
695 cp.bdaddr_type);
696 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
0eee35bd
MH
697
698 if (use_ll_privacy(req->hdev)) {
699 struct smp_irk *irk;
700
701 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
702 if (irk) {
703 struct hci_cp_le_del_from_resolv_list cp;
704
705 cp.bdaddr_type = bdaddr_type;
706 bacpy(&cp.bdaddr, bdaddr);
707
708 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
709 sizeof(cp), &cp);
710 }
711 }
dd522a74
APS
712}
713
714/* Adds connection to white list if needed. On error, returns -1. */
715static int add_to_white_list(struct hci_request *req,
716 struct hci_conn_params *params, u8 *num_entries,
717 bool allow_rpa)
0857dd3b
JH
718{
719 struct hci_cp_le_add_to_white_list cp;
dd522a74
APS
720 struct hci_dev *hdev = req->hdev;
721
722 /* Already in white list */
723 if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
724 params->addr_type))
725 return 0;
0857dd3b 726
dd522a74
APS
727 /* Select filter policy to accept all advertising */
728 if (*num_entries >= hdev->le_white_list_size)
729 return -1;
730
731 /* White list can not be used with RPAs */
0eee35bd 732 if (!allow_rpa && !use_ll_privacy(hdev) &&
dd522a74
APS
733 hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
734 return -1;
735 }
736
737 /* During suspend, only wakeable devices can be in whitelist */
a1fc7535
APS
738 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
739 params->current_flags))
dd522a74
APS
740 return 0;
741
742 *num_entries += 1;
0857dd3b
JH
743 cp.bdaddr_type = params->addr_type;
744 bacpy(&cp.bdaddr, &params->addr);
745
dd522a74
APS
746 bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
747 cp.bdaddr_type);
0857dd3b 748 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
dd522a74 749
0eee35bd
MH
750 if (use_ll_privacy(hdev)) {
751 struct smp_irk *irk;
752
753 irk = hci_find_irk_by_addr(hdev, &params->addr,
754 params->addr_type);
755 if (irk) {
756 struct hci_cp_le_add_to_resolv_list cp;
757
758 cp.bdaddr_type = params->addr_type;
759 bacpy(&cp.bdaddr, &params->addr);
760 memcpy(cp.peer_irk, irk->val, 16);
761
762 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
763 memcpy(cp.local_irk, hdev->irk, 16);
764 else
765 memset(cp.local_irk, 0, 16);
766
767 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
768 sizeof(cp), &cp);
769 }
770 }
771
dd522a74 772 return 0;
0857dd3b
JH
773}
774
775static u8 update_white_list(struct hci_request *req)
776{
777 struct hci_dev *hdev = req->hdev;
778 struct hci_conn_params *params;
779 struct bdaddr_list *b;
dd522a74
APS
780 u8 num_entries = 0;
781 bool pend_conn, pend_report;
782 /* We allow whitelisting even with RPAs in suspend. In the worst case,
783 * we won't be able to wake from devices that use the privacy1.2
784 * features. Additionally, once we support privacy1.2 and IRK
785 * offloading, we can update this to also check for those conditions.
786 */
787 bool allow_rpa = hdev->suspended;
0857dd3b
JH
788
789 /* Go through the current white list programmed into the
790 * controller one by one and check if that address is still
791 * in the list of pending connections or list of devices to
792 * report. If not present in either list, then queue the
793 * command to remove it from the controller.
794 */
795 list_for_each_entry(b, &hdev->le_white_list, list) {
dd522a74
APS
796 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
797 &b->bdaddr,
798 b->bdaddr_type);
799 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
800 &b->bdaddr,
801 b->bdaddr_type);
802
803 /* If the device is not likely to connect or report,
804 * remove it from the whitelist.
cff10ce7 805 */
dd522a74
APS
806 if (!pend_conn && !pend_report) {
807 del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
0857dd3b
JH
808 continue;
809 }
810
dd522a74 811 /* White list can not be used with RPAs */
0eee35bd 812 if (!allow_rpa && !use_ll_privacy(hdev) &&
dd522a74 813 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
cff10ce7
JH
814 return 0x00;
815 }
0857dd3b 816
dd522a74 817 num_entries++;
0857dd3b
JH
818 }
819
820 /* Since all no longer valid white list entries have been
821 * removed, walk through the list of pending connections
822 * and ensure that any new device gets programmed into
823 * the controller.
824 *
825 * If the list of the devices is larger than the list of
826 * available white list entries in the controller, then
827 * just abort and return filer policy value to not use the
828 * white list.
829 */
830 list_for_each_entry(params, &hdev->pend_le_conns, action) {
dd522a74 831 if (add_to_white_list(req, params, &num_entries, allow_rpa))
0857dd3b 832 return 0x00;
0857dd3b
JH
833 }
834
835 /* After adding all new pending connections, walk through
836 * the list of pending reports and also add these to the
dd522a74 837 * white list if there is still space. Abort if space runs out.
0857dd3b
JH
838 */
839 list_for_each_entry(params, &hdev->pend_le_reports, action) {
dd522a74 840 if (add_to_white_list(req, params, &num_entries, allow_rpa))
0857dd3b 841 return 0x00;
0857dd3b
JH
842 }
843
8208f5a9
MC
844 /* Once the controller offloading of advertisement monitor is in place,
845 * the if condition should include the support of MSFT extension
51b64c47
MC
846 * support. If suspend is ongoing, whitelist should be the default to
847 * prevent waking by random advertisements.
8208f5a9 848 */
51b64c47 849 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended)
8208f5a9
MC
850 return 0x00;
851
0857dd3b
JH
852 /* Select filter policy to use white list */
853 return 0x01;
854}
855
82a37ade
JH
856static bool scan_use_rpa(struct hci_dev *hdev)
857{
858 return hci_dev_test_flag(hdev, HCI_PRIVACY);
859}
860
3baef810 861static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
e1d57235
MH
862 u16 window, u8 own_addr_type, u8 filter_policy,
863 bool addr_resolv)
0857dd3b 864{
a2344b9e 865 struct hci_dev *hdev = req->hdev;
3baef810 866
3a0377d9
APS
867 if (hdev->scanning_paused) {
868 bt_dev_dbg(hdev, "Scanning is paused for suspend");
869 return;
870 }
871
e1d57235
MH
872 if (use_ll_privacy(hdev) && addr_resolv) {
873 u8 enable = 0x01;
874 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
875 }
876
a2344b9e
JK
877 /* Use ext scanning if set ext scan param and ext scan enable is
878 * supported
879 */
880 if (use_ext_scan(hdev)) {
881 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
882 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
883 struct hci_cp_le_scan_phy_params *phy_params;
45bdd86e
JK
884 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
885 u32 plen;
a2344b9e
JK
886
887 ext_param_cp = (void *)data;
888 phy_params = (void *)ext_param_cp->data;
889
890 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
891 ext_param_cp->own_addr_type = own_addr_type;
892 ext_param_cp->filter_policy = filter_policy;
a2344b9e 893
45bdd86e
JK
894 plen = sizeof(*ext_param_cp);
895
896 if (scan_1m(hdev) || scan_2m(hdev)) {
897 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
898
899 memset(phy_params, 0, sizeof(*phy_params));
900 phy_params->type = type;
901 phy_params->interval = cpu_to_le16(interval);
902 phy_params->window = cpu_to_le16(window);
903
904 plen += sizeof(*phy_params);
905 phy_params++;
906 }
907
908 if (scan_coded(hdev)) {
909 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
910
911 memset(phy_params, 0, sizeof(*phy_params));
912 phy_params->type = type;
913 phy_params->interval = cpu_to_le16(interval);
914 phy_params->window = cpu_to_le16(window);
915
916 plen += sizeof(*phy_params);
917 phy_params++;
918 }
a2344b9e
JK
919
920 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
45bdd86e 921 plen, ext_param_cp);
a2344b9e
JK
922
923 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
924 ext_enable_cp.enable = LE_SCAN_ENABLE;
925 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
926
927 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
928 sizeof(ext_enable_cp), &ext_enable_cp);
929 } else {
930 struct hci_cp_le_set_scan_param param_cp;
931 struct hci_cp_le_set_scan_enable enable_cp;
932
933 memset(&param_cp, 0, sizeof(param_cp));
934 param_cp.type = type;
935 param_cp.interval = cpu_to_le16(interval);
936 param_cp.window = cpu_to_le16(window);
937 param_cp.own_address_type = own_addr_type;
938 param_cp.filter_policy = filter_policy;
939 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
940 &param_cp);
941
942 memset(&enable_cp, 0, sizeof(enable_cp));
943 enable_cp.enable = LE_SCAN_ENABLE;
944 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
945 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
946 &enable_cp);
947 }
3baef810
JK
948}
949
e1d57235
MH
950/* Ensure to call hci_req_add_le_scan_disable() first to disable the
951 * controller based address resolution to be able to reconfigure
952 * resolving list.
953 */
3baef810
JK
954void hci_req_add_le_passive_scan(struct hci_request *req)
955{
0857dd3b
JH
956 struct hci_dev *hdev = req->hdev;
957 u8 own_addr_type;
958 u8 filter_policy;
aaebf8e6 959 u16 window, interval;
e1d57235
MH
960 /* Background scanning should run with address resolution */
961 bool addr_resolv = true;
dd522a74
APS
962
963 if (hdev->scanning_paused) {
964 bt_dev_dbg(hdev, "Scanning is paused for suspend");
965 return;
966 }
0857dd3b
JH
967
968 /* Set require_privacy to false since no SCAN_REQ are send
969 * during passive scanning. Not using an non-resolvable address
970 * here is important so that peer devices using direct
971 * advertising with our address will be correctly reported
972 * by the controller.
973 */
82a37ade
JH
974 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
975 &own_addr_type))
0857dd3b
JH
976 return;
977
978 /* Adding or removing entries from the white list must
979 * happen before enabling scanning. The controller does
980 * not allow white list modification while scanning.
981 */
982 filter_policy = update_white_list(req);
983
984 /* When the controller is using random resolvable addresses and
985 * with that having LE privacy enabled, then controllers with
986 * Extended Scanner Filter Policies support can now enable support
987 * for handling directed advertising.
988 *
989 * So instead of using filter polices 0x00 (no whitelist)
990 * and 0x01 (whitelist enabled) use the new filter policies
991 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
992 */
d7a5a11d 993 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
0857dd3b
JH
994 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
995 filter_policy |= 0x02;
996
dd522a74 997 if (hdev->suspended) {
10873f99
AM
998 window = hdev->le_scan_window_suspend;
999 interval = hdev->le_scan_int_suspend;
dd522a74
APS
1000 } else {
1001 window = hdev->le_scan_window;
1002 interval = hdev->le_scan_interval;
1003 }
1004
1005 bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
1006 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
e1d57235 1007 own_addr_type, filter_policy, addr_resolv);
0857dd3b
JH
1008}
1009
de181e88
JK
1010static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
1011{
1012 struct adv_info *adv_instance;
1013
492ad783 1014 /* Instance 0x00 always set local name */
de181e88 1015 if (instance == 0x00)
492ad783 1016 return 1;
de181e88
JK
1017
1018 adv_instance = hci_find_adv_instance(hdev, instance);
1019 if (!adv_instance)
1020 return 0;
1021
1022 /* TODO: Take into account the "appearance" and "local-name" flags here.
1023 * These are currently being ignored as they are not supported.
1024 */
1025 return adv_instance->scan_rsp_len;
1026}
1027
4f40afc6
APS
1028static void hci_req_clear_event_filter(struct hci_request *req)
1029{
1030 struct hci_cp_set_event_filter f;
1031
1032 memset(&f, 0, sizeof(f));
1033 f.flt_type = HCI_FLT_CLEAR_ALL;
1034 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
1035
1036 /* Update page scan state (since we may have modified it when setting
1037 * the event filter).
1038 */
1039 __hci_req_update_scan(req);
1040}
1041
1042static void hci_req_set_event_filter(struct hci_request *req)
1043{
7a92906f 1044 struct bdaddr_list_with_flags *b;
4f40afc6
APS
1045 struct hci_cp_set_event_filter f;
1046 struct hci_dev *hdev = req->hdev;
7a92906f 1047 u8 scan = SCAN_DISABLED;
4f40afc6
APS
1048
1049 /* Always clear event filter when starting */
1050 hci_req_clear_event_filter(req);
1051
7a92906f
APS
1052 list_for_each_entry(b, &hdev->whitelist, list) {
1053 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
1054 b->current_flags))
1055 continue;
1056
4f40afc6
APS
1057 memset(&f, 0, sizeof(f));
1058 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
1059 f.flt_type = HCI_FLT_CONN_SETUP;
1060 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
1061 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
1062
1063 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
1064 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
7a92906f 1065 scan = SCAN_PAGE;
4f40afc6
APS
1066 }
1067
4f40afc6
APS
1068 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1069}
1070
dd522a74
APS
1071static void hci_req_config_le_suspend_scan(struct hci_request *req)
1072{
6fb00d4e
MM
1073 /* Before changing params disable scan if enabled */
1074 if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
1075 hci_req_add_le_scan_disable(req);
dd522a74
APS
1076
1077 /* Configure params and enable scanning */
1078 hci_req_add_le_passive_scan(req);
1079
1080 /* Block suspend notifier on response */
1081 set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1082}
1083
4f40afc6
APS
1084static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1085{
1086 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1087 status);
1088 if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1089 test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1090 wake_up(&hdev->suspend_wait_q);
1091 }
1092}
1093
9952d90e
APS
1094/* Call with hci_dev_lock */
1095void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1096{
4867bd00 1097 int old_state;
4f40afc6
APS
1098 struct hci_conn *conn;
1099 struct hci_request req;
1100 u8 page_scan;
1101 int disconnect_counter;
1102
9952d90e
APS
1103 if (next == hdev->suspend_state) {
1104 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1105 goto done;
1106 }
1107
1108 hdev->suspend_state = next;
4f40afc6
APS
1109 hci_req_init(&req, hdev);
1110
1111 if (next == BT_SUSPEND_DISCONNECT) {
1112 /* Mark device as suspended */
1113 hdev->suspended = true;
1114
4867bd00
APS
1115 /* Pause discovery if not already stopped */
1116 old_state = hdev->discovery.state;
1117 if (old_state != DISCOVERY_STOPPED) {
1118 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1119 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1120 queue_work(hdev->req_workqueue, &hdev->discov_update);
1121 }
1122
1123 hdev->discovery_paused = true;
1124 hdev->discovery_old_state = old_state;
1125
1126 /* Stop advertising */
1127 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1128 if (old_state) {
1129 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1130 cancel_delayed_work(&hdev->discov_off);
1131 queue_delayed_work(hdev->req_workqueue,
1132 &hdev->discov_off, 0);
1133 }
1134
1135 hdev->advertising_paused = true;
1136 hdev->advertising_old_state = old_state;
4f40afc6
APS
1137 /* Disable page scan */
1138 page_scan = SCAN_DISABLED;
1139 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1140
6fb00d4e
MM
1141 /* Disable LE passive scan if enabled */
1142 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1143 hci_req_add_le_scan_disable(&req);
dd522a74 1144
4f40afc6
APS
1145 /* Mark task needing completion */
1146 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1147
1148 /* Prevent disconnects from causing scanning to be re-enabled */
1149 hdev->scanning_paused = true;
1150
1151 /* Run commands before disconnecting */
1152 hci_req_run(&req, suspend_req_complete);
1153
1154 disconnect_counter = 0;
1155 /* Soft disconnect everything (power off) */
1156 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1157 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1158 disconnect_counter++;
1159 }
1160
1161 if (disconnect_counter > 0) {
1162 bt_dev_dbg(hdev,
1163 "Had %d disconnects. Will wait on them",
1164 disconnect_counter);
1165 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1166 }
0d2c9825 1167 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
4f40afc6
APS
1168 /* Unpause to take care of updating scanning params */
1169 hdev->scanning_paused = false;
1170 /* Enable event filter for paired devices */
1171 hci_req_set_event_filter(&req);
dd522a74
APS
1172 /* Enable passive scan at lower duty cycle */
1173 hci_req_config_le_suspend_scan(&req);
4f40afc6
APS
1174 /* Pause scan changes again. */
1175 hdev->scanning_paused = true;
1176 hci_req_run(&req, suspend_req_complete);
1177 } else {
1178 hdev->suspended = false;
1179 hdev->scanning_paused = false;
1180
1181 hci_req_clear_event_filter(&req);
dd522a74
APS
1182 /* Reset passive/background scanning to normal */
1183 hci_req_config_le_suspend_scan(&req);
4867bd00
APS
1184
1185 /* Unpause advertising */
1186 hdev->advertising_paused = false;
1187 if (hdev->advertising_old_state) {
1188 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1189 hdev->suspend_tasks);
1190 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1191 queue_work(hdev->req_workqueue,
1192 &hdev->discoverable_update);
1193 hdev->advertising_old_state = 0;
1194 }
1195
1196 /* Unpause discovery */
1197 hdev->discovery_paused = false;
1198 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1199 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1200 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1201 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1202 queue_work(hdev->req_workqueue, &hdev->discov_update);
1203 }
1204
4f40afc6
APS
1205 hci_req_run(&req, suspend_req_complete);
1206 }
1207
1208 hdev->suspend_state = next;
9952d90e
APS
1209
1210done:
1211 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1212 wake_up(&hdev->suspend_wait_q);
1213}
1214
f2252570
JH
1215static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1216{
cab054ab 1217 u8 instance = hdev->cur_adv_instance;
f2252570
JH
1218 struct adv_info *adv_instance;
1219
492ad783 1220 /* Instance 0x00 always set local name */
f2252570 1221 if (instance == 0x00)
492ad783 1222 return 1;
f2252570
JH
1223
1224 adv_instance = hci_find_adv_instance(hdev, instance);
1225 if (!adv_instance)
1226 return 0;
1227
1228 /* TODO: Take into account the "appearance" and "local-name" flags here.
1229 * These are currently being ignored as they are not supported.
1230 */
1231 return adv_instance->scan_rsp_len;
1232}
1233
1234void __hci_req_disable_advertising(struct hci_request *req)
1235{
45b7749f 1236 if (ext_adv_capable(req->hdev)) {
37adf701 1237 __hci_req_disable_ext_adv_instance(req, 0x00);
f2252570 1238
45b7749f
JK
1239 } else {
1240 u8 enable = 0x00;
1241
1242 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1243 }
f2252570
JH
1244}
1245
1246static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1247{
1248 u32 flags;
1249 struct adv_info *adv_instance;
1250
1251 if (instance == 0x00) {
1252 /* Instance 0 always manages the "Tx Power" and "Flags"
1253 * fields
1254 */
1255 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1256
1257 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1258 * corresponds to the "connectable" instance flag.
1259 */
1260 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1261 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1262
6a19cc8c
JH
1263 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1264 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1265 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
d43efbd0
JH
1266 flags |= MGMT_ADV_FLAG_DISCOV;
1267
f2252570
JH
1268 return flags;
1269 }
1270
1271 adv_instance = hci_find_adv_instance(hdev, instance);
1272
1273 /* Return 0 when we got an invalid instance identifier. */
1274 if (!adv_instance)
1275 return 0;
1276
1277 return adv_instance->flags;
1278}
1279
82a37ade
JH
1280static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1281{
1282 /* If privacy is not enabled don't use RPA */
1283 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1284 return false;
1285
1286 /* If basic privacy mode is enabled use RPA */
1287 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1288 return true;
1289
1290 /* If limited privacy mode is enabled don't use RPA if we're
1291 * both discoverable and bondable.
1292 */
1293 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1294 hci_dev_test_flag(hdev, HCI_BONDABLE))
1295 return false;
1296
1297 /* We're neither bondable nor discoverable in the limited
1298 * privacy mode, therefore use RPA.
1299 */
1300 return true;
1301}
1302
9e1e9f20
ŁR
1303static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1304{
1305 /* If there is no connection we are OK to advertise. */
1306 if (hci_conn_num(hdev, LE_LINK) == 0)
1307 return true;
1308
1309 /* Check le_states if there is any connection in slave role. */
1310 if (hdev->conn_hash.le_num_slave > 0) {
1311 /* Slave connection state and non connectable mode bit 20. */
1312 if (!connectable && !(hdev->le_states[2] & 0x10))
1313 return false;
1314
1315 /* Slave connection state and connectable mode bit 38
1316 * and scannable bit 21.
1317 */
62ebdc25
ŁR
1318 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1319 !(hdev->le_states[2] & 0x20)))
9e1e9f20
ŁR
1320 return false;
1321 }
1322
1323 /* Check le_states if there is any connection in master role. */
1324 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1325 /* Master connection state and non connectable mode bit 18. */
1326 if (!connectable && !(hdev->le_states[2] & 0x02))
1327 return false;
1328
1329 /* Master connection state and connectable mode bit 35 and
1330 * scannable 19.
1331 */
62ebdc25 1332 if (connectable && (!(hdev->le_states[4] & 0x08) ||
9e1e9f20
ŁR
1333 !(hdev->le_states[2] & 0x08)))
1334 return false;
1335 }
1336
1337 return true;
1338}
1339
f2252570
JH
1340void __hci_req_enable_advertising(struct hci_request *req)
1341{
1342 struct hci_dev *hdev = req->hdev;
1343 struct hci_cp_le_set_adv_param cp;
1344 u8 own_addr_type, enable = 0x01;
1345 bool connectable;
ad4a6795 1346 u16 adv_min_interval, adv_max_interval;
f2252570
JH
1347 u32 flags;
1348
9e1e9f20
ŁR
1349 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1350
1351 /* If the "connectable" instance flag was not set, then choose between
1352 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1353 */
1354 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1355 mgmt_get_connectable(hdev);
1356
1357 if (!is_advertising_allowed(hdev, connectable))
f2252570
JH
1358 return;
1359
1360 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1361 __hci_req_disable_advertising(req);
1362
1363 /* Clear the HCI_LE_ADV bit temporarily so that the
1364 * hci_update_random_address knows that it's safe to go ahead
1365 * and write a new random address. The flag will be set back on
1366 * as soon as the SET_ADV_ENABLE HCI command completes.
1367 */
1368 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1369
f2252570
JH
1370 /* Set require_privacy to true only when non-connectable
1371 * advertising is used. In that case it is fine to use a
1372 * non-resolvable private address.
1373 */
82a37ade
JH
1374 if (hci_update_random_address(req, !connectable,
1375 adv_use_rpa(hdev, flags),
1376 &own_addr_type) < 0)
f2252570
JH
1377 return;
1378
1379 memset(&cp, 0, sizeof(cp));
f2252570 1380
ad4a6795 1381 if (connectable) {
f2252570 1382 cp.type = LE_ADV_IND;
f2252570 1383
ad4a6795
SRK
1384 adv_min_interval = hdev->le_adv_min_interval;
1385 adv_max_interval = hdev->le_adv_max_interval;
1386 } else {
1387 if (get_cur_adv_instance_scan_rsp_len(hdev))
1388 cp.type = LE_ADV_SCAN_IND;
1389 else
1390 cp.type = LE_ADV_NONCONN_IND;
1391
1392 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1393 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1394 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1395 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1396 } else {
1397 adv_min_interval = hdev->le_adv_min_interval;
1398 adv_max_interval = hdev->le_adv_max_interval;
1399 }
1400 }
1401
1402 cp.min_interval = cpu_to_le16(adv_min_interval);
1403 cp.max_interval = cpu_to_le16(adv_max_interval);
f2252570
JH
1404 cp.own_address_type = own_addr_type;
1405 cp.channel_map = hdev->le_adv_channel_map;
1406
1407 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1408
1409 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1410}
1411
f61851f6 1412u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
f2252570 1413{
cecbf3e9 1414 size_t short_len;
f61851f6 1415 size_t complete_len;
f2252570 1416
f61851f6
MN
1417 /* no space left for name (+ NULL + type + len) */
1418 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
cecbf3e9 1419 return ad_len;
f2252570 1420
f61851f6
MN
1421 /* use complete name if present and fits */
1422 complete_len = strlen(hdev->dev_name);
1423 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1b422066 1424 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
f61851f6 1425 hdev->dev_name, complete_len + 1);
cecbf3e9 1426
f61851f6
MN
1427 /* use short name if present */
1428 short_len = strlen(hdev->short_name);
1429 if (short_len)
1b422066 1430 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
f61851f6 1431 hdev->short_name, short_len + 1);
cecbf3e9 1432
f61851f6
MN
1433 /* use shortened full name if present, we already know that name
1434 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1435 */
1436 if (complete_len) {
1437 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1438
1439 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1440 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1441
1442 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1443 sizeof(name));
f2252570
JH
1444 }
1445
1446 return ad_len;
1447}
1448
1b422066
MN
1449static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1450{
1451 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1452}
1453
7c295c48
MN
1454static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1455{
7ddb30c7
MN
1456 u8 scan_rsp_len = 0;
1457
1458 if (hdev->appearance) {
1b422066 1459 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
7ddb30c7
MN
1460 }
1461
1b422066 1462 return append_local_name(hdev, ptr, scan_rsp_len);
7c295c48
MN
1463}
1464
f2252570
JH
1465static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1466 u8 *ptr)
1467{
1468 struct adv_info *adv_instance;
7c295c48
MN
1469 u32 instance_flags;
1470 u8 scan_rsp_len = 0;
f2252570
JH
1471
1472 adv_instance = hci_find_adv_instance(hdev, instance);
1473 if (!adv_instance)
1474 return 0;
1475
7c295c48
MN
1476 instance_flags = adv_instance->flags;
1477
c4960ecf 1478 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1b422066 1479 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
c4960ecf
MN
1480 }
1481
1b422066 1482 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
f2252570
JH
1483 adv_instance->scan_rsp_len);
1484
7c295c48 1485 scan_rsp_len += adv_instance->scan_rsp_len;
7c295c48
MN
1486
1487 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1488 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1489
1490 return scan_rsp_len;
f2252570
JH
1491}
1492
cab054ab 1493void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
f2252570
JH
1494{
1495 struct hci_dev *hdev = req->hdev;
f2252570
JH
1496 u8 len;
1497
1498 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1499 return;
1500
a0fb3726
JK
1501 if (ext_adv_capable(hdev)) {
1502 struct hci_cp_le_set_ext_scan_rsp_data cp;
f2252570 1503
a0fb3726 1504 memset(&cp, 0, sizeof(cp));
f2252570 1505
a0fb3726
JK
1506 if (instance)
1507 len = create_instance_scan_rsp_data(hdev, instance,
1508 cp.data);
1509 else
1510 len = create_default_scan_rsp_data(hdev, cp.data);
1511
1512 if (hdev->scan_rsp_data_len == len &&
1513 !memcmp(cp.data, hdev->scan_rsp_data, len))
1514 return;
1515
1516 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1517 hdev->scan_rsp_data_len = len;
1518
eaa7b722 1519 cp.handle = instance;
a0fb3726
JK
1520 cp.length = len;
1521 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1522 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1523
1524 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1525 &cp);
1526 } else {
1527 struct hci_cp_le_set_scan_rsp_data cp;
1528
1529 memset(&cp, 0, sizeof(cp));
1530
1531 if (instance)
1532 len = create_instance_scan_rsp_data(hdev, instance,
1533 cp.data);
1534 else
1535 len = create_default_scan_rsp_data(hdev, cp.data);
1536
1537 if (hdev->scan_rsp_data_len == len &&
1538 !memcmp(cp.data, hdev->scan_rsp_data, len))
1539 return;
f2252570 1540
a0fb3726
JK
1541 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1542 hdev->scan_rsp_data_len = len;
f2252570 1543
a0fb3726 1544 cp.length = len;
f2252570 1545
a0fb3726
JK
1546 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1547 }
f2252570
JH
1548}
1549
f2252570
JH
1550static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1551{
1552 struct adv_info *adv_instance = NULL;
1553 u8 ad_len = 0, flags = 0;
1554 u32 instance_flags;
1555
1556 /* Return 0 when the current instance identifier is invalid. */
1557 if (instance) {
1558 adv_instance = hci_find_adv_instance(hdev, instance);
1559 if (!adv_instance)
1560 return 0;
1561 }
1562
1563 instance_flags = get_adv_instance_flags(hdev, instance);
1564
6012b934
LAD
1565 /* If instance already has the flags set skip adding it once
1566 * again.
1567 */
1568 if (adv_instance && eir_get_data(adv_instance->adv_data,
1569 adv_instance->adv_data_len, EIR_FLAGS,
1570 NULL))
1571 goto skip_flags;
1572
f2252570
JH
1573 /* The Add Advertising command allows userspace to set both the general
1574 * and limited discoverable flags.
1575 */
1576 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1577 flags |= LE_AD_GENERAL;
1578
1579 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1580 flags |= LE_AD_LIMITED;
1581
f18ba58f
JH
1582 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1583 flags |= LE_AD_NO_BREDR;
1584
f2252570
JH
1585 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1586 /* If a discovery flag wasn't provided, simply use the global
1587 * settings.
1588 */
1589 if (!flags)
1590 flags |= mgmt_get_adv_discov_flags(hdev);
1591
f2252570
JH
1592 /* If flags would still be empty, then there is no need to
1593 * include the "Flags" AD field".
1594 */
1595 if (flags) {
1596 ptr[0] = 0x02;
1597 ptr[1] = EIR_FLAGS;
1598 ptr[2] = flags;
1599
1600 ad_len += 3;
1601 ptr += 3;
1602 }
1603 }
1604
6012b934 1605skip_flags:
f2252570
JH
1606 if (adv_instance) {
1607 memcpy(ptr, adv_instance->adv_data,
1608 adv_instance->adv_data_len);
1609 ad_len += adv_instance->adv_data_len;
1610 ptr += adv_instance->adv_data_len;
1611 }
1612
de181e88
JK
1613 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1614 s8 adv_tx_power;
f2252570 1615
de181e88
JK
1616 if (ext_adv_capable(hdev)) {
1617 if (adv_instance)
1618 adv_tx_power = adv_instance->tx_power;
1619 else
1620 adv_tx_power = hdev->adv_tx_power;
1621 } else {
1622 adv_tx_power = hdev->adv_tx_power;
1623 }
1624
1625 /* Provide Tx Power only if we can provide a valid value for it */
1626 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1627 ptr[0] = 0x02;
1628 ptr[1] = EIR_TX_POWER;
1629 ptr[2] = (u8)adv_tx_power;
1630
1631 ad_len += 3;
1632 ptr += 3;
1633 }
f2252570
JH
1634 }
1635
1636 return ad_len;
1637}
1638
cab054ab 1639void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
f2252570
JH
1640{
1641 struct hci_dev *hdev = req->hdev;
f2252570
JH
1642 u8 len;
1643
1644 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1645 return;
1646
a0fb3726
JK
1647 if (ext_adv_capable(hdev)) {
1648 struct hci_cp_le_set_ext_adv_data cp;
f2252570 1649
a0fb3726 1650 memset(&cp, 0, sizeof(cp));
f2252570 1651
a0fb3726
JK
1652 len = create_instance_adv_data(hdev, instance, cp.data);
1653
1654 /* There's nothing to do if the data hasn't changed */
1655 if (hdev->adv_data_len == len &&
1656 memcmp(cp.data, hdev->adv_data, len) == 0)
1657 return;
1658
1659 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1660 hdev->adv_data_len = len;
1661
1662 cp.length = len;
eaa7b722 1663 cp.handle = instance;
a0fb3726
JK
1664 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1665 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
f2252570 1666
a0fb3726
JK
1667 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1668 } else {
1669 struct hci_cp_le_set_adv_data cp;
1670
1671 memset(&cp, 0, sizeof(cp));
f2252570 1672
a0fb3726
JK
1673 len = create_instance_adv_data(hdev, instance, cp.data);
1674
1675 /* There's nothing to do if the data hasn't changed */
1676 if (hdev->adv_data_len == len &&
1677 memcmp(cp.data, hdev->adv_data, len) == 0)
1678 return;
f2252570 1679
a0fb3726
JK
1680 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1681 hdev->adv_data_len = len;
1682
1683 cp.length = len;
1684
1685 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1686 }
f2252570
JH
1687}
1688
cab054ab 1689int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
f2252570
JH
1690{
1691 struct hci_request req;
1692
1693 hci_req_init(&req, hdev);
1694 __hci_req_update_adv_data(&req, instance);
1695
1696 return hci_req_run(&req, NULL);
1697}
1698
1699static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1700{
1701 BT_DBG("%s status %u", hdev->name, status);
1702}
1703
1704void hci_req_reenable_advertising(struct hci_dev *hdev)
1705{
1706 struct hci_request req;
f2252570
JH
1707
1708 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
17fd08ff 1709 list_empty(&hdev->adv_instances))
f2252570
JH
1710 return;
1711
f2252570
JH
1712 hci_req_init(&req, hdev);
1713
cab054ab
JH
1714 if (hdev->cur_adv_instance) {
1715 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1716 true);
f2252570 1717 } else {
de181e88
JK
1718 if (ext_adv_capable(hdev)) {
1719 __hci_req_start_ext_adv(&req, 0x00);
1720 } else {
1721 __hci_req_update_adv_data(&req, 0x00);
1722 __hci_req_update_scan_rsp_data(&req, 0x00);
1723 __hci_req_enable_advertising(&req);
1724 }
f2252570
JH
1725 }
1726
1727 hci_req_run(&req, adv_enable_complete);
1728}
1729
1730static void adv_timeout_expire(struct work_struct *work)
1731{
1732 struct hci_dev *hdev = container_of(work, struct hci_dev,
1733 adv_instance_expire.work);
1734
1735 struct hci_request req;
1736 u8 instance;
1737
1738 BT_DBG("%s", hdev->name);
1739
1740 hci_dev_lock(hdev);
1741
1742 hdev->adv_instance_timeout = 0;
1743
cab054ab 1744 instance = hdev->cur_adv_instance;
f2252570
JH
1745 if (instance == 0x00)
1746 goto unlock;
1747
1748 hci_req_init(&req, hdev);
1749
37d3a1fa 1750 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
f2252570
JH
1751
1752 if (list_empty(&hdev->adv_instances))
1753 __hci_req_disable_advertising(&req);
1754
550a8ca7 1755 hci_req_run(&req, NULL);
f2252570
JH
1756
1757unlock:
1758 hci_dev_unlock(hdev);
1759}
1760
a73c046a
JK
1761int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1762 bool use_rpa, struct adv_info *adv_instance,
1763 u8 *own_addr_type, bdaddr_t *rand_addr)
1764{
1765 int err;
1766
1767 bacpy(rand_addr, BDADDR_ANY);
1768
1769 /* If privacy is enabled use a resolvable private address. If
1770 * current RPA has expired then generate a new one.
1771 */
1772 if (use_rpa) {
1773 int to;
1774
1775 *own_addr_type = ADDR_LE_DEV_RANDOM;
1776
1777 if (adv_instance) {
1778 if (!adv_instance->rpa_expired &&
1779 !bacmp(&adv_instance->random_addr, &hdev->rpa))
1780 return 0;
1781
1782 adv_instance->rpa_expired = false;
1783 } else {
1784 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1785 !bacmp(&hdev->random_addr, &hdev->rpa))
1786 return 0;
1787 }
1788
1789 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1790 if (err < 0) {
00b383b8 1791 bt_dev_err(hdev, "failed to generate new RPA");
a73c046a
JK
1792 return err;
1793 }
1794
1795 bacpy(rand_addr, &hdev->rpa);
1796
1797 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1798 if (adv_instance)
1799 queue_delayed_work(hdev->workqueue,
1800 &adv_instance->rpa_expired_cb, to);
1801 else
1802 queue_delayed_work(hdev->workqueue,
1803 &hdev->rpa_expired, to);
1804
1805 return 0;
1806 }
1807
1808 /* In case of required privacy without resolvable private address,
1809 * use an non-resolvable private address. This is useful for
1810 * non-connectable advertising.
1811 */
1812 if (require_privacy) {
1813 bdaddr_t nrpa;
1814
1815 while (true) {
1816 /* The non-resolvable private address is generated
1817 * from random six bytes with the two most significant
1818 * bits cleared.
1819 */
1820 get_random_bytes(&nrpa, 6);
1821 nrpa.b[5] &= 0x3f;
1822
1823 /* The non-resolvable private address shall not be
1824 * equal to the public address.
1825 */
1826 if (bacmp(&hdev->bdaddr, &nrpa))
1827 break;
1828 }
1829
1830 *own_addr_type = ADDR_LE_DEV_RANDOM;
1831 bacpy(rand_addr, &nrpa);
1832
1833 return 0;
1834 }
1835
1836 /* No privacy so use a public address. */
1837 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1838
1839 return 0;
1840}
1841
45b7749f
JK
1842void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1843{
1844 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1845}
1846
a0fb3726 1847int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
de181e88
JK
1848{
1849 struct hci_cp_le_set_ext_adv_params cp;
1850 struct hci_dev *hdev = req->hdev;
1851 bool connectable;
1852 u32 flags;
a73c046a
JK
1853 bdaddr_t random_addr;
1854 u8 own_addr_type;
1855 int err;
1856 struct adv_info *adv_instance;
85a721a8 1857 bool secondary_adv;
de181e88 1858
a73c046a
JK
1859 if (instance > 0) {
1860 adv_instance = hci_find_adv_instance(hdev, instance);
1861 if (!adv_instance)
1862 return -EINVAL;
1863 } else {
1864 adv_instance = NULL;
1865 }
1866
de181e88
JK
1867 flags = get_adv_instance_flags(hdev, instance);
1868
1869 /* If the "connectable" instance flag was not set, then choose between
1870 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1871 */
1872 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1873 mgmt_get_connectable(hdev);
1874
75edd1f2 1875 if (!is_advertising_allowed(hdev, connectable))
de181e88
JK
1876 return -EPERM;
1877
a73c046a
JK
1878 /* Set require_privacy to true only when non-connectable
1879 * advertising is used. In that case it is fine to use a
1880 * non-resolvable private address.
1881 */
1882 err = hci_get_random_address(hdev, !connectable,
1883 adv_use_rpa(hdev, flags), adv_instance,
1884 &own_addr_type, &random_addr);
1885 if (err < 0)
1886 return err;
1887
de181e88
JK
1888 memset(&cp, 0, sizeof(cp));
1889
5cbd3ebd
AM
1890 /* In ext adv set param interval is 3 octets */
1891 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1892 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
de181e88 1893
85a721a8
JK
1894 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1895
1896 if (connectable) {
1897 if (secondary_adv)
1898 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1899 else
1900 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1901 } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1902 if (secondary_adv)
1903 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1904 else
1905 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1906 } else {
1907 if (secondary_adv)
1908 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1909 else
1910 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1911 }
de181e88 1912
a73c046a 1913 cp.own_addr_type = own_addr_type;
de181e88
JK
1914 cp.channel_map = hdev->le_adv_channel_map;
1915 cp.tx_power = 127;
1d0fac2c 1916 cp.handle = instance;
de181e88 1917
85a721a8
JK
1918 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1919 cp.primary_phy = HCI_ADV_PHY_1M;
1920 cp.secondary_phy = HCI_ADV_PHY_2M;
1921 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1922 cp.primary_phy = HCI_ADV_PHY_CODED;
1923 cp.secondary_phy = HCI_ADV_PHY_CODED;
1924 } else {
1925 /* In all other cases use 1M */
1926 cp.primary_phy = HCI_ADV_PHY_1M;
1927 cp.secondary_phy = HCI_ADV_PHY_1M;
1928 }
1929
de181e88
JK
1930 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1931
a73c046a
JK
1932 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1933 bacmp(&random_addr, BDADDR_ANY)) {
1934 struct hci_cp_le_set_adv_set_rand_addr cp;
1935
1936 /* Check if random address need to be updated */
1937 if (adv_instance) {
1938 if (!bacmp(&random_addr, &adv_instance->random_addr))
1939 return 0;
1940 } else {
1941 if (!bacmp(&random_addr, &hdev->random_addr))
1942 return 0;
1943 }
1944
1945 memset(&cp, 0, sizeof(cp));
1946
eaa7b722 1947 cp.handle = instance;
a73c046a
JK
1948 bacpy(&cp.bdaddr, &random_addr);
1949
1950 hci_req_add(req,
1951 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1952 sizeof(cp), &cp);
1953 }
1954
de181e88
JK
1955 return 0;
1956}
1957
1d0fac2c 1958int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
de181e88 1959{
1d0fac2c 1960 struct hci_dev *hdev = req->hdev;
de181e88
JK
1961 struct hci_cp_le_set_ext_adv_enable *cp;
1962 struct hci_cp_ext_adv_set *adv_set;
1963 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1d0fac2c
LAD
1964 struct adv_info *adv_instance;
1965
1966 if (instance > 0) {
1967 adv_instance = hci_find_adv_instance(hdev, instance);
1968 if (!adv_instance)
1969 return -EINVAL;
1970 } else {
1971 adv_instance = NULL;
1972 }
de181e88
JK
1973
1974 cp = (void *) data;
1975 adv_set = (void *) cp->data;
1976
1977 memset(cp, 0, sizeof(*cp));
1978
1979 cp->enable = 0x01;
1980 cp->num_of_sets = 0x01;
1981
1982 memset(adv_set, 0, sizeof(*adv_set));
1983
1d0fac2c
LAD
1984 adv_set->handle = instance;
1985
1986 /* Set duration per instance since controller is responsible for
1987 * scheduling it.
1988 */
1989 if (adv_instance && adv_instance->duration) {
10bbffa3 1990 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
1d0fac2c
LAD
1991
1992 /* Time = N * 10 ms */
1993 adv_set->duration = cpu_to_le16(duration / 10);
1994 }
de181e88
JK
1995
1996 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1997 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1998 data);
1d0fac2c
LAD
1999
2000 return 0;
de181e88
JK
2001}
2002
37adf701
DW
2003int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
2004{
2005 struct hci_dev *hdev = req->hdev;
2006 struct hci_cp_le_set_ext_adv_enable *cp;
2007 struct hci_cp_ext_adv_set *adv_set;
2008 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
2009 u8 req_size;
2010
2011 /* If request specifies an instance that doesn't exist, fail */
2012 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2013 return -EINVAL;
2014
2015 memset(data, 0, sizeof(data));
2016
2017 cp = (void *)data;
2018 adv_set = (void *)cp->data;
2019
2020 /* Instance 0x00 indicates all advertising instances will be disabled */
2021 cp->num_of_sets = !!instance;
2022 cp->enable = 0x00;
2023
2024 adv_set->handle = instance;
2025
2026 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
2027 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
2028
2029 return 0;
2030}
2031
2032int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
2033{
2034 struct hci_dev *hdev = req->hdev;
2035
2036 /* If request specifies an instance that doesn't exist, fail */
2037 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
2038 return -EINVAL;
2039
2040 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
2041
2042 return 0;
2043}
2044
de181e88
JK
2045int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
2046{
45b7749f 2047 struct hci_dev *hdev = req->hdev;
37adf701 2048 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
de181e88
JK
2049 int err;
2050
37adf701
DW
2051 /* If instance isn't pending, the chip knows about it, and it's safe to
2052 * disable
2053 */
2054 if (adv_instance && !adv_instance->pending)
2055 __hci_req_disable_ext_adv_instance(req, instance);
45b7749f 2056
de181e88
JK
2057 err = __hci_req_setup_ext_adv_instance(req, instance);
2058 if (err < 0)
2059 return err;
2060
a0fb3726 2061 __hci_req_update_scan_rsp_data(req, instance);
1d0fac2c 2062 __hci_req_enable_ext_advertising(req, instance);
de181e88
JK
2063
2064 return 0;
2065}
2066
f2252570
JH
2067int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
2068 bool force)
2069{
2070 struct hci_dev *hdev = req->hdev;
2071 struct adv_info *adv_instance = NULL;
2072 u16 timeout;
2073
2074 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
17fd08ff 2075 list_empty(&hdev->adv_instances))
f2252570
JH
2076 return -EPERM;
2077
2078 if (hdev->adv_instance_timeout)
2079 return -EBUSY;
2080
2081 adv_instance = hci_find_adv_instance(hdev, instance);
2082 if (!adv_instance)
2083 return -ENOENT;
2084
2085 /* A zero timeout means unlimited advertising. As long as there is
2086 * only one instance, duration should be ignored. We still set a timeout
2087 * in case further instances are being added later on.
2088 *
2089 * If the remaining lifetime of the instance is more than the duration
2090 * then the timeout corresponds to the duration, otherwise it will be
2091 * reduced to the remaining instance lifetime.
2092 */
2093 if (adv_instance->timeout == 0 ||
2094 adv_instance->duration <= adv_instance->remaining_time)
2095 timeout = adv_instance->duration;
2096 else
2097 timeout = adv_instance->remaining_time;
2098
2099 /* The remaining time is being reduced unless the instance is being
2100 * advertised without time limit.
2101 */
2102 if (adv_instance->timeout)
2103 adv_instance->remaining_time =
2104 adv_instance->remaining_time - timeout;
2105
1d0fac2c
LAD
2106 /* Only use work for scheduling instances with legacy advertising */
2107 if (!ext_adv_capable(hdev)) {
2108 hdev->adv_instance_timeout = timeout;
2109 queue_delayed_work(hdev->req_workqueue,
f2252570
JH
2110 &hdev->adv_instance_expire,
2111 msecs_to_jiffies(timeout * 1000));
1d0fac2c 2112 }
f2252570
JH
2113
2114 /* If we're just re-scheduling the same instance again then do not
2115 * execute any HCI commands. This happens when a single instance is
2116 * being advertised.
2117 */
2118 if (!force && hdev->cur_adv_instance == instance &&
2119 hci_dev_test_flag(hdev, HCI_LE_ADV))
2120 return 0;
2121
2122 hdev->cur_adv_instance = instance;
de181e88
JK
2123 if (ext_adv_capable(hdev)) {
2124 __hci_req_start_ext_adv(req, instance);
2125 } else {
2126 __hci_req_update_adv_data(req, instance);
2127 __hci_req_update_scan_rsp_data(req, instance);
2128 __hci_req_enable_advertising(req);
2129 }
f2252570
JH
2130
2131 return 0;
2132}
2133
2134static void cancel_adv_timeout(struct hci_dev *hdev)
2135{
2136 if (hdev->adv_instance_timeout) {
2137 hdev->adv_instance_timeout = 0;
2138 cancel_delayed_work(&hdev->adv_instance_expire);
2139 }
2140}
2141
2142/* For a single instance:
2143 * - force == true: The instance will be removed even when its remaining
2144 * lifetime is not zero.
2145 * - force == false: the instance will be deactivated but kept stored unless
2146 * the remaining lifetime is zero.
2147 *
2148 * For instance == 0x00:
2149 * - force == true: All instances will be removed regardless of their timeout
2150 * setting.
2151 * - force == false: Only instances that have a timeout will be removed.
2152 */
37d3a1fa
JH
2153void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2154 struct hci_request *req, u8 instance,
2155 bool force)
f2252570
JH
2156{
2157 struct adv_info *adv_instance, *n, *next_instance = NULL;
2158 int err;
2159 u8 rem_inst;
2160
2161 /* Cancel any timeout concerning the removed instance(s). */
2162 if (!instance || hdev->cur_adv_instance == instance)
2163 cancel_adv_timeout(hdev);
2164
2165 /* Get the next instance to advertise BEFORE we remove
2166 * the current one. This can be the same instance again
2167 * if there is only one instance.
2168 */
2169 if (instance && hdev->cur_adv_instance == instance)
2170 next_instance = hci_get_next_instance(hdev, instance);
2171
2172 if (instance == 0x00) {
2173 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2174 list) {
2175 if (!(force || adv_instance->timeout))
2176 continue;
2177
2178 rem_inst = adv_instance->instance;
2179 err = hci_remove_adv_instance(hdev, rem_inst);
2180 if (!err)
37d3a1fa 2181 mgmt_advertising_removed(sk, hdev, rem_inst);
f2252570 2182 }
f2252570
JH
2183 } else {
2184 adv_instance = hci_find_adv_instance(hdev, instance);
2185
2186 if (force || (adv_instance && adv_instance->timeout &&
2187 !adv_instance->remaining_time)) {
2188 /* Don't advertise a removed instance. */
2189 if (next_instance &&
2190 next_instance->instance == instance)
2191 next_instance = NULL;
2192
2193 err = hci_remove_adv_instance(hdev, instance);
2194 if (!err)
37d3a1fa 2195 mgmt_advertising_removed(sk, hdev, instance);
f2252570
JH
2196 }
2197 }
2198
f2252570
JH
2199 if (!req || !hdev_is_powered(hdev) ||
2200 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2201 return;
2202
37adf701 2203 if (next_instance && !ext_adv_capable(hdev))
f2252570
JH
2204 __hci_req_schedule_adv_instance(req, next_instance->instance,
2205 false);
2206}
2207
0857dd3b
JH
2208static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2209{
2210 struct hci_dev *hdev = req->hdev;
2211
2212 /* If we're advertising or initiating an LE connection we can't
2213 * go ahead and change the random address at this time. This is
2214 * because the eventual initiator address used for the
2215 * subsequently created connection will be undefined (some
2216 * controllers use the new address and others the one we had
2217 * when the operation started).
2218 *
2219 * In this kind of scenario skip the update and let the random
2220 * address be updated at the next cycle.
2221 */
d7a5a11d 2222 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
e7d9ab73 2223 hci_lookup_le_connect(hdev)) {
0857dd3b 2224 BT_DBG("Deferring random address update");
a1536da2 2225 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
0857dd3b
JH
2226 return;
2227 }
2228
2229 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2230}
2231
2232int hci_update_random_address(struct hci_request *req, bool require_privacy,
82a37ade 2233 bool use_rpa, u8 *own_addr_type)
0857dd3b
JH
2234{
2235 struct hci_dev *hdev = req->hdev;
2236 int err;
2237
2238 /* If privacy is enabled use a resolvable private address. If
2239 * current RPA has expired or there is something else than
2240 * the current RPA in use, then generate a new one.
2241 */
82a37ade 2242 if (use_rpa) {
0857dd3b
JH
2243 int to;
2244
d03c759e
SN
2245 /* If Controller supports LL Privacy use own address type is
2246 * 0x03
2247 */
2248 if (use_ll_privacy(hdev))
2249 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2250 else
2251 *own_addr_type = ADDR_LE_DEV_RANDOM;
0857dd3b 2252
a69d8927 2253 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
0857dd3b
JH
2254 !bacmp(&hdev->random_addr, &hdev->rpa))
2255 return 0;
2256
2257 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2258 if (err < 0) {
2064ee33 2259 bt_dev_err(hdev, "failed to generate new RPA");
0857dd3b
JH
2260 return err;
2261 }
2262
2263 set_random_addr(req, &hdev->rpa);
2264
2265 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2266 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2267
2268 return 0;
2269 }
2270
2271 /* In case of required privacy without resolvable private address,
2272 * use an non-resolvable private address. This is useful for active
2273 * scanning and non-connectable advertising.
2274 */
2275 if (require_privacy) {
2276 bdaddr_t nrpa;
2277
2278 while (true) {
2279 /* The non-resolvable private address is generated
2280 * from random six bytes with the two most significant
2281 * bits cleared.
2282 */
2283 get_random_bytes(&nrpa, 6);
2284 nrpa.b[5] &= 0x3f;
2285
2286 /* The non-resolvable private address shall not be
2287 * equal to the public address.
2288 */
2289 if (bacmp(&hdev->bdaddr, &nrpa))
2290 break;
2291 }
2292
2293 *own_addr_type = ADDR_LE_DEV_RANDOM;
2294 set_random_addr(req, &nrpa);
2295 return 0;
2296 }
2297
2298 /* If forcing static address is in use or there is no public
2299 * address use the static address as random address (but skip
2300 * the HCI command if the current random address is already the
2301 * static one.
50b5b952
MH
2302 *
2303 * In case BR/EDR has been disabled on a dual-mode controller
2304 * and a static address has been configured, then use that
2305 * address instead of the public BR/EDR address.
0857dd3b 2306 */
b7cb93e5 2307 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 2308 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 2309 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 2310 bacmp(&hdev->static_addr, BDADDR_ANY))) {
0857dd3b
JH
2311 *own_addr_type = ADDR_LE_DEV_RANDOM;
2312 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2313 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2314 &hdev->static_addr);
2315 return 0;
2316 }
2317
2318 /* Neither privacy nor static address is being used so use a
2319 * public address.
2320 */
2321 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2322
2323 return 0;
2324}
2cf22218 2325
405a2611
JH
2326static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2327{
2328 struct bdaddr_list *b;
2329
2330 list_for_each_entry(b, &hdev->whitelist, list) {
2331 struct hci_conn *conn;
2332
2333 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2334 if (!conn)
2335 return true;
2336
2337 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2338 return true;
2339 }
2340
2341 return false;
2342}
2343
01b1cb87 2344void __hci_req_update_scan(struct hci_request *req)
405a2611
JH
2345{
2346 struct hci_dev *hdev = req->hdev;
2347 u8 scan;
2348
d7a5a11d 2349 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
405a2611
JH
2350 return;
2351
2352 if (!hdev_is_powered(hdev))
2353 return;
2354
2355 if (mgmt_powering_down(hdev))
2356 return;
2357
4f40afc6
APS
2358 if (hdev->scanning_paused)
2359 return;
2360
d7a5a11d 2361 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
405a2611
JH
2362 disconnected_whitelist_entries(hdev))
2363 scan = SCAN_PAGE;
2364 else
2365 scan = SCAN_DISABLED;
2366
d7a5a11d 2367 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
405a2611
JH
2368 scan |= SCAN_INQUIRY;
2369
01b1cb87
JH
2370 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2371 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2372 return;
2373
405a2611
JH
2374 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2375}
2376
01b1cb87 2377static int update_scan(struct hci_request *req, unsigned long opt)
405a2611 2378{
01b1cb87
JH
2379 hci_dev_lock(req->hdev);
2380 __hci_req_update_scan(req);
2381 hci_dev_unlock(req->hdev);
2382 return 0;
2383}
405a2611 2384
01b1cb87
JH
2385static void scan_update_work(struct work_struct *work)
2386{
2387 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2388
2389 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
405a2611
JH
2390}
2391
53c0ba74
JH
2392static int connectable_update(struct hci_request *req, unsigned long opt)
2393{
2394 struct hci_dev *hdev = req->hdev;
2395
2396 hci_dev_lock(hdev);
2397
2398 __hci_req_update_scan(req);
2399
2400 /* If BR/EDR is not enabled and we disable advertising as a
2401 * by-product of disabling connectable, we need to update the
2402 * advertising flags.
2403 */
2404 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
cab054ab 2405 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
53c0ba74
JH
2406
2407 /* Update the advertising parameters if necessary */
2408 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
de181e88
JK
2409 !list_empty(&hdev->adv_instances)) {
2410 if (ext_adv_capable(hdev))
2411 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2412 else
2413 __hci_req_enable_advertising(req);
2414 }
53c0ba74
JH
2415
2416 __hci_update_background_scan(req);
2417
2418 hci_dev_unlock(hdev);
2419
2420 return 0;
2421}
2422
2423static void connectable_update_work(struct work_struct *work)
2424{
2425 struct hci_dev *hdev = container_of(work, struct hci_dev,
2426 connectable_update);
2427 u8 status;
2428
2429 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2430 mgmt_set_connectable_complete(hdev, status);
2431}
2432
14bf5eac
JH
2433static u8 get_service_classes(struct hci_dev *hdev)
2434{
2435 struct bt_uuid *uuid;
2436 u8 val = 0;
2437
2438 list_for_each_entry(uuid, &hdev->uuids, list)
2439 val |= uuid->svc_hint;
2440
2441 return val;
2442}
2443
2444void __hci_req_update_class(struct hci_request *req)
2445{
2446 struct hci_dev *hdev = req->hdev;
2447 u8 cod[3];
2448
2449 BT_DBG("%s", hdev->name);
2450
2451 if (!hdev_is_powered(hdev))
2452 return;
2453
2454 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2455 return;
2456
2457 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2458 return;
2459
2460 cod[0] = hdev->minor_class;
2461 cod[1] = hdev->major_class;
2462 cod[2] = get_service_classes(hdev);
2463
2464 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2465 cod[1] |= 0x20;
2466
2467 if (memcmp(cod, hdev->dev_class, 3) == 0)
2468 return;
2469
2470 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2471}
2472
aed1a885
JH
2473static void write_iac(struct hci_request *req)
2474{
2475 struct hci_dev *hdev = req->hdev;
2476 struct hci_cp_write_current_iac_lap cp;
2477
2478 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2479 return;
2480
2481 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2482 /* Limited discoverable mode */
2483 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2484 cp.iac_lap[0] = 0x00; /* LIAC */
2485 cp.iac_lap[1] = 0x8b;
2486 cp.iac_lap[2] = 0x9e;
2487 cp.iac_lap[3] = 0x33; /* GIAC */
2488 cp.iac_lap[4] = 0x8b;
2489 cp.iac_lap[5] = 0x9e;
2490 } else {
2491 /* General discoverable mode */
2492 cp.num_iac = 1;
2493 cp.iac_lap[0] = 0x33; /* GIAC */
2494 cp.iac_lap[1] = 0x8b;
2495 cp.iac_lap[2] = 0x9e;
2496 }
2497
2498 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2499 (cp.num_iac * 3) + 1, &cp);
2500}
2501
2502static int discoverable_update(struct hci_request *req, unsigned long opt)
2503{
2504 struct hci_dev *hdev = req->hdev;
2505
2506 hci_dev_lock(hdev);
2507
2508 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2509 write_iac(req);
2510 __hci_req_update_scan(req);
2511 __hci_req_update_class(req);
2512 }
2513
2514 /* Advertising instances don't use the global discoverable setting, so
2515 * only update AD if advertising was enabled using Set Advertising.
2516 */
82a37ade 2517 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
cab054ab 2518 __hci_req_update_adv_data(req, 0x00);
aed1a885 2519
82a37ade
JH
2520 /* Discoverable mode affects the local advertising
2521 * address in limited privacy mode.
2522 */
de181e88
JK
2523 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2524 if (ext_adv_capable(hdev))
2525 __hci_req_start_ext_adv(req, 0x00);
2526 else
2527 __hci_req_enable_advertising(req);
2528 }
82a37ade
JH
2529 }
2530
aed1a885
JH
2531 hci_dev_unlock(hdev);
2532
2533 return 0;
2534}
2535
2536static void discoverable_update_work(struct work_struct *work)
2537{
2538 struct hci_dev *hdev = container_of(work, struct hci_dev,
2539 discoverable_update);
2540 u8 status;
2541
2542 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2543 mgmt_set_discoverable_complete(hdev, status);
2544}
2545
dcc0f0d9
JH
2546void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2547 u8 reason)
2548{
2549 switch (conn->state) {
2550 case BT_CONNECTED:
2551 case BT_CONFIG:
2552 if (conn->type == AMP_LINK) {
2553 struct hci_cp_disconn_phy_link cp;
2554
2555 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2556 cp.reason = reason;
2557 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2558 &cp);
2559 } else {
2560 struct hci_cp_disconnect dc;
2561
2562 dc.handle = cpu_to_le16(conn->handle);
2563 dc.reason = reason;
2564 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2565 }
2566
2567 conn->state = BT_DISCONN;
2568
2569 break;
2570 case BT_CONNECT:
2571 if (conn->type == LE_LINK) {
2572 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2573 break;
2574 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2575 0, NULL);
2576 } else if (conn->type == ACL_LINK) {
2577 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2578 break;
2579 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2580 6, &conn->dst);
2581 }
2582 break;
2583 case BT_CONNECT2:
2584 if (conn->type == ACL_LINK) {
2585 struct hci_cp_reject_conn_req rej;
2586
2587 bacpy(&rej.bdaddr, &conn->dst);
2588 rej.reason = reason;
2589
2590 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2591 sizeof(rej), &rej);
2592 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2593 struct hci_cp_reject_sync_conn_req rej;
2594
2595 bacpy(&rej.bdaddr, &conn->dst);
2596
2597 /* SCO rejection has its own limited set of
2598 * allowed error values (0x0D-0x0F) which isn't
2599 * compatible with most values passed to this
2600 * function. To be safe hard-code one of the
2601 * values that's suitable for SCO.
2602 */
3c0975a7 2603 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
dcc0f0d9
JH
2604
2605 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2606 sizeof(rej), &rej);
2607 }
2608 break;
2609 default:
2610 conn->state = BT_CLOSED;
2611 break;
2612 }
2613}
2614
2615static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2616{
2617 if (status)
2618 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2619}
2620
2621int hci_abort_conn(struct hci_conn *conn, u8 reason)
2622{
2623 struct hci_request req;
2624 int err;
2625
2626 hci_req_init(&req, conn->hdev);
2627
2628 __hci_abort_conn(&req, conn, reason);
2629
2630 err = hci_req_run(&req, abort_conn_complete);
2631 if (err && err != -ENODATA) {
2064ee33 2632 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
dcc0f0d9
JH
2633 return err;
2634 }
2635
2636 return 0;
2637}
5fc16cc4 2638
a1d01db1 2639static int update_bg_scan(struct hci_request *req, unsigned long opt)
2e93e53b
JH
2640{
2641 hci_dev_lock(req->hdev);
2642 __hci_update_background_scan(req);
2643 hci_dev_unlock(req->hdev);
a1d01db1 2644 return 0;
2e93e53b
JH
2645}
2646
2647static void bg_scan_update(struct work_struct *work)
2648{
2649 struct hci_dev *hdev = container_of(work, struct hci_dev,
2650 bg_scan_update);
84235d22
JH
2651 struct hci_conn *conn;
2652 u8 status;
2653 int err;
2654
2655 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2656 if (!err)
2657 return;
2658
2659 hci_dev_lock(hdev);
2660
2661 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2662 if (conn)
2663 hci_le_conn_failed(conn, status);
2e93e53b 2664
84235d22 2665 hci_dev_unlock(hdev);
2e93e53b
JH
2666}
2667
f4a2cb4d 2668static int le_scan_disable(struct hci_request *req, unsigned long opt)
7c1fbed2 2669{
f4a2cb4d
JH
2670 hci_req_add_le_scan_disable(req);
2671 return 0;
7c1fbed2
JH
2672}
2673
f4a2cb4d 2674static int bredr_inquiry(struct hci_request *req, unsigned long opt)
7c1fbed2 2675{
f4a2cb4d 2676 u8 length = opt;
78b781ca
JH
2677 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2678 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
7c1fbed2 2679 struct hci_cp_inquiry cp;
7c1fbed2 2680
f4a2cb4d 2681 BT_DBG("%s", req->hdev->name);
7c1fbed2 2682
f4a2cb4d
JH
2683 hci_dev_lock(req->hdev);
2684 hci_inquiry_cache_flush(req->hdev);
2685 hci_dev_unlock(req->hdev);
7c1fbed2 2686
f4a2cb4d 2687 memset(&cp, 0, sizeof(cp));
78b781ca
JH
2688
2689 if (req->hdev->discovery.limited)
2690 memcpy(&cp.lap, liac, sizeof(cp.lap));
2691 else
2692 memcpy(&cp.lap, giac, sizeof(cp.lap));
2693
f4a2cb4d 2694 cp.length = length;
7c1fbed2 2695
f4a2cb4d 2696 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7c1fbed2 2697
a1d01db1 2698 return 0;
7c1fbed2
JH
2699}
2700
2701static void le_scan_disable_work(struct work_struct *work)
2702{
2703 struct hci_dev *hdev = container_of(work, struct hci_dev,
2704 le_scan_disable.work);
2705 u8 status;
7c1fbed2
JH
2706
2707 BT_DBG("%s", hdev->name);
2708
f4a2cb4d
JH
2709 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2710 return;
2711
7c1fbed2
JH
2712 cancel_delayed_work(&hdev->le_scan_restart);
2713
f4a2cb4d
JH
2714 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2715 if (status) {
2064ee33
MH
2716 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2717 status);
f4a2cb4d
JH
2718 return;
2719 }
2720
2721 hdev->discovery.scan_start = 0;
2722
2723 /* If we were running LE only scan, change discovery state. If
2724 * we were running both LE and BR/EDR inquiry simultaneously,
2725 * and BR/EDR inquiry is already finished, stop discovery,
2726 * otherwise BR/EDR inquiry will stop discovery when finished.
2727 * If we will resolve remote device name, do not change
2728 * discovery state.
2729 */
2730
2731 if (hdev->discovery.type == DISCOV_TYPE_LE)
2732 goto discov_stopped;
2733
2734 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
7c1fbed2
JH
2735 return;
2736
f4a2cb4d
JH
2737 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2738 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2739 hdev->discovery.state != DISCOVERY_RESOLVING)
2740 goto discov_stopped;
2741
2742 return;
2743 }
2744
2745 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2746 HCI_CMD_TIMEOUT, &status);
2747 if (status) {
2064ee33 2748 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
f4a2cb4d
JH
2749 goto discov_stopped;
2750 }
2751
2752 return;
2753
2754discov_stopped:
2755 hci_dev_lock(hdev);
2756 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2757 hci_dev_unlock(hdev);
7c1fbed2
JH
2758}
2759
3dfe5905
JH
2760static int le_scan_restart(struct hci_request *req, unsigned long opt)
2761{
2762 struct hci_dev *hdev = req->hdev;
3dfe5905
JH
2763
2764 /* If controller is not scanning we are done. */
2765 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2766 return 0;
2767
3a0377d9
APS
2768 if (hdev->scanning_paused) {
2769 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2770 return 0;
2771 }
2772
3dfe5905
JH
2773 hci_req_add_le_scan_disable(req);
2774
a2344b9e
JK
2775 if (use_ext_scan(hdev)) {
2776 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2777
2778 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2779 ext_enable_cp.enable = LE_SCAN_ENABLE;
2780 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2781
2782 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2783 sizeof(ext_enable_cp), &ext_enable_cp);
2784 } else {
2785 struct hci_cp_le_set_scan_enable cp;
2786
2787 memset(&cp, 0, sizeof(cp));
2788 cp.enable = LE_SCAN_ENABLE;
2789 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2790 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2791 }
3dfe5905
JH
2792
2793 return 0;
2794}
2795
2796static void le_scan_restart_work(struct work_struct *work)
7c1fbed2 2797{
3dfe5905
JH
2798 struct hci_dev *hdev = container_of(work, struct hci_dev,
2799 le_scan_restart.work);
7c1fbed2 2800 unsigned long timeout, duration, scan_start, now;
3dfe5905 2801 u8 status;
7c1fbed2
JH
2802
2803 BT_DBG("%s", hdev->name);
2804
3dfe5905 2805 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
7c1fbed2 2806 if (status) {
2064ee33
MH
2807 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2808 status);
7c1fbed2
JH
2809 return;
2810 }
2811
2812 hci_dev_lock(hdev);
2813
2814 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2815 !hdev->discovery.scan_start)
2816 goto unlock;
2817
2818 /* When the scan was started, hdev->le_scan_disable has been queued
2819 * after duration from scan_start. During scan restart this job
2820 * has been canceled, and we need to queue it again after proper
2821 * timeout, to make sure that scan does not run indefinitely.
2822 */
2823 duration = hdev->discovery.scan_duration;
2824 scan_start = hdev->discovery.scan_start;
2825 now = jiffies;
2826 if (now - scan_start <= duration) {
2827 int elapsed;
2828
2829 if (now >= scan_start)
2830 elapsed = now - scan_start;
2831 else
2832 elapsed = ULONG_MAX - scan_start + now;
2833
2834 timeout = duration - elapsed;
2835 } else {
2836 timeout = 0;
2837 }
2838
2839 queue_delayed_work(hdev->req_workqueue,
2840 &hdev->le_scan_disable, timeout);
2841
2842unlock:
2843 hci_dev_unlock(hdev);
2844}
2845
e68f072b
JH
2846static int active_scan(struct hci_request *req, unsigned long opt)
2847{
2848 uint16_t interval = opt;
2849 struct hci_dev *hdev = req->hdev;
e68f072b 2850 u8 own_addr_type;
849c9c35
MH
2851 /* White list is not used for discovery */
2852 u8 filter_policy = 0x00;
e1d57235
MH
2853 /* Discovery doesn't require controller address resolution */
2854 bool addr_resolv = false;
e68f072b
JH
2855 int err;
2856
2857 BT_DBG("%s", hdev->name);
2858
e68f072b
JH
2859 /* If controller is scanning, it means the background scanning is
2860 * running. Thus, we should temporarily stop it in order to set the
2861 * discovery scanning parameters.
2862 */
2863 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2864 hci_req_add_le_scan_disable(req);
2865
2866 /* All active scans will be done with either a resolvable private
2867 * address (when privacy feature has been enabled) or non-resolvable
2868 * private address.
2869 */
82a37ade
JH
2870 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2871 &own_addr_type);
e68f072b
JH
2872 if (err < 0)
2873 own_addr_type = ADDR_LE_DEV_PUBLIC;
2874
d4edda0f
AM
2875 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2876 hdev->le_scan_window_discovery, own_addr_type,
e1d57235 2877 filter_policy, addr_resolv);
e68f072b
JH
2878 return 0;
2879}
2880
2881static int interleaved_discov(struct hci_request *req, unsigned long opt)
2882{
2883 int err;
2884
2885 BT_DBG("%s", req->hdev->name);
2886
2887 err = active_scan(req, opt);
2888 if (err)
2889 return err;
2890
7df26b56 2891 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
e68f072b
JH
2892}
2893
2894static void start_discovery(struct hci_dev *hdev, u8 *status)
2895{
2896 unsigned long timeout;
2897
2898 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2899
2900 switch (hdev->discovery.type) {
2901 case DISCOV_TYPE_BREDR:
2902 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
7df26b56
JH
2903 hci_req_sync(hdev, bredr_inquiry,
2904 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
e68f072b
JH
2905 status);
2906 return;
2907 case DISCOV_TYPE_INTERLEAVED:
2908 /* When running simultaneous discovery, the LE scanning time
2909 * should occupy the whole discovery time sine BR/EDR inquiry
2910 * and LE scanning are scheduled by the controller.
2911 *
2912 * For interleaving discovery in comparison, BR/EDR inquiry
2913 * and LE scanning are done sequentially with separate
2914 * timeouts.
2915 */
2916 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2917 &hdev->quirks)) {
2918 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2919 /* During simultaneous discovery, we double LE scan
2920 * interval. We must leave some time for the controller
2921 * to do BR/EDR inquiry.
2922 */
2923 hci_req_sync(hdev, interleaved_discov,
d4edda0f 2924 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
e68f072b
JH
2925 status);
2926 break;
2927 }
2928
2929 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
d4edda0f 2930 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
e68f072b
JH
2931 HCI_CMD_TIMEOUT, status);
2932 break;
2933 case DISCOV_TYPE_LE:
2934 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
d4edda0f 2935 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
e68f072b
JH
2936 HCI_CMD_TIMEOUT, status);
2937 break;
2938 default:
2939 *status = HCI_ERROR_UNSPECIFIED;
2940 return;
2941 }
2942
2943 if (*status)
2944 return;
2945
2946 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2947
2948 /* When service discovery is used and the controller has a
2949 * strict duplicate filter, it is important to remember the
2950 * start and duration of the scan. This is required for
2951 * restarting scanning during the discovery phase.
2952 */
2953 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2954 hdev->discovery.result_filtering) {
2955 hdev->discovery.scan_start = jiffies;
2956 hdev->discovery.scan_duration = timeout;
2957 }
2958
2959 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2960 timeout);
2961}
2962
2154d3f4
JH
2963bool hci_req_stop_discovery(struct hci_request *req)
2964{
2965 struct hci_dev *hdev = req->hdev;
2966 struct discovery_state *d = &hdev->discovery;
2967 struct hci_cp_remote_name_req_cancel cp;
2968 struct inquiry_entry *e;
2969 bool ret = false;
2970
2971 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2972
2973 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2974 if (test_bit(HCI_INQUIRY, &hdev->flags))
2975 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2976
2977 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2978 cancel_delayed_work(&hdev->le_scan_disable);
2979 hci_req_add_le_scan_disable(req);
2980 }
2981
2982 ret = true;
2983 } else {
2984 /* Passive scanning */
2985 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2986 hci_req_add_le_scan_disable(req);
2987 ret = true;
2988 }
2989 }
2990
2991 /* No further actions needed for LE-only discovery */
2992 if (d->type == DISCOV_TYPE_LE)
2993 return ret;
2994
2995 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2996 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2997 NAME_PENDING);
2998 if (!e)
2999 return ret;
3000
3001 bacpy(&cp.bdaddr, &e->data.bdaddr);
3002 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3003 &cp);
3004 ret = true;
3005 }
3006
3007 return ret;
3008}
3009
3010static int stop_discovery(struct hci_request *req, unsigned long opt)
3011{
3012 hci_dev_lock(req->hdev);
3013 hci_req_stop_discovery(req);
3014 hci_dev_unlock(req->hdev);
3015
3016 return 0;
3017}
3018
e68f072b
JH
3019static void discov_update(struct work_struct *work)
3020{
3021 struct hci_dev *hdev = container_of(work, struct hci_dev,
3022 discov_update);
3023 u8 status = 0;
3024
3025 switch (hdev->discovery.state) {
3026 case DISCOVERY_STARTING:
3027 start_discovery(hdev, &status);
3028 mgmt_start_discovery_complete(hdev, status);
3029 if (status)
3030 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3031 else
3032 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3033 break;
2154d3f4
JH
3034 case DISCOVERY_STOPPING:
3035 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
3036 mgmt_stop_discovery_complete(hdev, status);
3037 if (!status)
3038 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3039 break;
e68f072b
JH
3040 case DISCOVERY_STOPPED:
3041 default:
3042 return;
3043 }
3044}
3045
c366f555
JH
3046static void discov_off(struct work_struct *work)
3047{
3048 struct hci_dev *hdev = container_of(work, struct hci_dev,
3049 discov_off.work);
3050
3051 BT_DBG("%s", hdev->name);
3052
3053 hci_dev_lock(hdev);
3054
3055 /* When discoverable timeout triggers, then just make sure
3056 * the limited discoverable flag is cleared. Even in the case
3057 * of a timeout triggered from general discoverable, it is
3058 * safe to unconditionally clear the flag.
3059 */
3060 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
3061 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
3062 hdev->discov_timeout = 0;
3063
3064 hci_dev_unlock(hdev);
3065
3066 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
3067 mgmt_new_settings(hdev);
3068}
3069
2ff13894
JH
3070static int powered_update_hci(struct hci_request *req, unsigned long opt)
3071{
3072 struct hci_dev *hdev = req->hdev;
2ff13894
JH
3073 u8 link_sec;
3074
3075 hci_dev_lock(hdev);
3076
3077 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
3078 !lmp_host_ssp_capable(hdev)) {
3079 u8 mode = 0x01;
3080
3081 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
3082
3083 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3084 u8 support = 0x01;
3085
3086 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3087 sizeof(support), &support);
3088 }
3089 }
3090
3091 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3092 lmp_bredr_capable(hdev)) {
3093 struct hci_cp_write_le_host_supported cp;
3094
3095 cp.le = 0x01;
3096 cp.simul = 0x00;
3097
3098 /* Check first if we already have the right
3099 * host state (host features set)
3100 */
3101 if (cp.le != lmp_host_le_capable(hdev) ||
3102 cp.simul != lmp_host_le_br_capable(hdev))
3103 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3104 sizeof(cp), &cp);
3105 }
3106
d6b7e2cd 3107 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2ff13894
JH
3108 /* Make sure the controller has a good default for
3109 * advertising data. This also applies to the case
3110 * where BR/EDR was toggled during the AUTO_OFF phase.
3111 */
d6b7e2cd
JH
3112 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3113 list_empty(&hdev->adv_instances)) {
a0fb3726
JK
3114 int err;
3115
3116 if (ext_adv_capable(hdev)) {
3117 err = __hci_req_setup_ext_adv_instance(req,
3118 0x00);
3119 if (!err)
3120 __hci_req_update_scan_rsp_data(req,
3121 0x00);
3122 } else {
3123 err = 0;
3124 __hci_req_update_adv_data(req, 0x00);
3125 __hci_req_update_scan_rsp_data(req, 0x00);
3126 }
d6b7e2cd 3127
de181e88 3128 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
a0fb3726 3129 if (!ext_adv_capable(hdev))
de181e88 3130 __hci_req_enable_advertising(req);
a0fb3726 3131 else if (!err)
1d0fac2c
LAD
3132 __hci_req_enable_ext_advertising(req,
3133 0x00);
de181e88 3134 }
d6b7e2cd
JH
3135 } else if (!list_empty(&hdev->adv_instances)) {
3136 struct adv_info *adv_instance;
2ff13894 3137
2ff13894
JH
3138 adv_instance = list_first_entry(&hdev->adv_instances,
3139 struct adv_info, list);
2ff13894 3140 __hci_req_schedule_adv_instance(req,
d6b7e2cd 3141 adv_instance->instance,
2ff13894 3142 true);
d6b7e2cd 3143 }
2ff13894
JH
3144 }
3145
3146 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3147 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3148 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3149 sizeof(link_sec), &link_sec);
3150
3151 if (lmp_bredr_capable(hdev)) {
3152 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3153 __hci_req_write_fast_connectable(req, true);
3154 else
3155 __hci_req_write_fast_connectable(req, false);
3156 __hci_req_update_scan(req);
3157 __hci_req_update_class(req);
3158 __hci_req_update_name(req);
3159 __hci_req_update_eir(req);
3160 }
3161
3162 hci_dev_unlock(hdev);
3163 return 0;
3164}
3165
3166int __hci_req_hci_power_on(struct hci_dev *hdev)
3167{
3168 /* Register the available SMP channels (BR/EDR and LE) only when
3169 * successfully powering on the controller. This late
3170 * registration is required so that LE SMP can clearly decide if
3171 * the public address or static address is used.
3172 */
3173 smp_register(hdev);
3174
3175 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3176 NULL);
3177}
3178
5fc16cc4
JH
3179void hci_request_setup(struct hci_dev *hdev)
3180{
e68f072b 3181 INIT_WORK(&hdev->discov_update, discov_update);
2e93e53b 3182 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
01b1cb87 3183 INIT_WORK(&hdev->scan_update, scan_update_work);
53c0ba74 3184 INIT_WORK(&hdev->connectable_update, connectable_update_work);
aed1a885 3185 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
c366f555 3186 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
7c1fbed2
JH
3187 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3188 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
f2252570 3189 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
5fc16cc4
JH
3190}
3191
3192void hci_request_cancel_all(struct hci_dev *hdev)
3193{
7df0f73e
JH
3194 hci_req_sync_cancel(hdev, ENODEV);
3195
e68f072b 3196 cancel_work_sync(&hdev->discov_update);
2e93e53b 3197 cancel_work_sync(&hdev->bg_scan_update);
01b1cb87 3198 cancel_work_sync(&hdev->scan_update);
53c0ba74 3199 cancel_work_sync(&hdev->connectable_update);
aed1a885 3200 cancel_work_sync(&hdev->discoverable_update);
c366f555 3201 cancel_delayed_work_sync(&hdev->discov_off);
7c1fbed2
JH
3202 cancel_delayed_work_sync(&hdev->le_scan_disable);
3203 cancel_delayed_work_sync(&hdev->le_scan_restart);
f2252570
JH
3204
3205 if (hdev->adv_instance_timeout) {
3206 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3207 hdev->adv_instance_timeout = 0;
3208 }
5fc16cc4 3209}