Bluetooth: hci_sync: Fix not updating privacy_mode
[linux-block.git] / net / bluetooth / hci_sync.c
CommitLineData
6a98e383
MH
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * BlueZ - Bluetooth protocol stack for Linux
4 *
5 * Copyright (C) 2021 Intel Corporation
6 */
7
d0b13706
LAD
8#include <linux/property.h>
9
6a98e383
MH
10#include <net/bluetooth/bluetooth.h>
11#include <net/bluetooth/hci_core.h>
12#include <net/bluetooth/mgmt.h>
13
14#include "hci_request.h"
d0b13706 15#include "hci_debugfs.h"
6a98e383 16#include "smp.h"
161510cc 17#include "eir.h"
d0b13706
LAD
18#include "msft.h"
19#include "aosp.h"
20#include "leds.h"
6a98e383
MH
21
22static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
23 struct sk_buff *skb)
24{
25 bt_dev_dbg(hdev, "result 0x%2.2x", result);
26
27 if (hdev->req_status != HCI_REQ_PEND)
28 return;
29
30 hdev->req_result = result;
31 hdev->req_status = HCI_REQ_DONE;
32
cba6b758
LAD
33 if (skb) {
34 struct sock *sk = hci_skb_sk(skb);
35
36 /* Drop sk reference if set */
37 if (sk)
38 sock_put(sk);
39
40 hdev->req_skb = skb_get(skb);
41 }
42
6a98e383
MH
43 wake_up_interruptible(&hdev->req_wait_q);
44}
45
46static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode,
47 u32 plen, const void *param,
48 struct sock *sk)
49{
50 int len = HCI_COMMAND_HDR_SIZE + plen;
51 struct hci_command_hdr *hdr;
52 struct sk_buff *skb;
53
54 skb = bt_skb_alloc(len, GFP_ATOMIC);
55 if (!skb)
56 return NULL;
57
58 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
59 hdr->opcode = cpu_to_le16(opcode);
60 hdr->plen = plen;
61
62 if (plen)
63 skb_put_data(skb, param, plen);
64
65 bt_dev_dbg(hdev, "skb len %d", skb->len);
66
67 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
68 hci_skb_opcode(skb) = opcode;
69
cba6b758
LAD
70 /* Grab a reference if command needs to be associated with a sock (e.g.
71 * likely mgmt socket that initiated the command).
72 */
73 if (sk) {
74 hci_skb_sk(skb) = sk;
75 sock_hold(sk);
76 }
77
6a98e383
MH
78 return skb;
79}
80
81static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
82 const void *param, u8 event, struct sock *sk)
83{
84 struct hci_dev *hdev = req->hdev;
85 struct sk_buff *skb;
86
87 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
88
89 /* If an error occurred during request building, there is no point in
90 * queueing the HCI command. We can simply return.
91 */
92 if (req->err)
93 return;
94
95 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk);
96 if (!skb) {
97 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
98 opcode);
99 req->err = -ENOMEM;
100 return;
101 }
102
103 if (skb_queue_empty(&req->cmd_q))
104 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
105
85b56857 106 hci_skb_event(skb) = event;
6a98e383
MH
107
108 skb_queue_tail(&req->cmd_q, skb);
109}
110
111static int hci_cmd_sync_run(struct hci_request *req)
112{
113 struct hci_dev *hdev = req->hdev;
114 struct sk_buff *skb;
115 unsigned long flags;
116
117 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
118
119 /* If an error occurred during request building, remove all HCI
120 * commands queued on the HCI request queue.
121 */
122 if (req->err) {
123 skb_queue_purge(&req->cmd_q);
124 return req->err;
125 }
126
127 /* Do not allow empty requests */
128 if (skb_queue_empty(&req->cmd_q))
129 return -ENODATA;
130
131 skb = skb_peek_tail(&req->cmd_q);
132 bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete;
133 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
134
135 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
136 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
137 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
138
139 queue_work(hdev->workqueue, &hdev->cmd_work);
140
141 return 0;
142}
143
144/* This function requires the caller holds hdev->req_lock. */
145struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
146 const void *param, u8 event, u32 timeout,
147 struct sock *sk)
148{
149 struct hci_request req;
150 struct sk_buff *skb;
151 int err = 0;
152
d0b13706 153 bt_dev_dbg(hdev, "Opcode 0x%4x", opcode);
6a98e383
MH
154
155 hci_req_init(&req, hdev);
156
157 hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
158
159 hdev->req_status = HCI_REQ_PEND;
160
161 err = hci_cmd_sync_run(&req);
162 if (err < 0)
163 return ERR_PTR(err);
164
165 err = wait_event_interruptible_timeout(hdev->req_wait_q,
166 hdev->req_status != HCI_REQ_PEND,
167 timeout);
168
169 if (err == -ERESTARTSYS)
170 return ERR_PTR(-EINTR);
171
172 switch (hdev->req_status) {
173 case HCI_REQ_DONE:
174 err = -bt_to_errno(hdev->req_result);
175 break;
176
177 case HCI_REQ_CANCELED:
178 err = -hdev->req_result;
179 break;
180
181 default:
182 err = -ETIMEDOUT;
183 break;
184 }
185
186 hdev->req_status = 0;
187 hdev->req_result = 0;
188 skb = hdev->req_skb;
189 hdev->req_skb = NULL;
190
191 bt_dev_dbg(hdev, "end: err %d", err);
192
193 if (err < 0) {
194 kfree_skb(skb);
195 return ERR_PTR(err);
196 }
197
6a98e383
MH
198 return skb;
199}
200EXPORT_SYMBOL(__hci_cmd_sync_sk);
201
202/* This function requires the caller holds hdev->req_lock. */
203struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
204 const void *param, u32 timeout)
205{
206 return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL);
207}
208EXPORT_SYMBOL(__hci_cmd_sync);
209
210/* Send HCI command and wait for command complete event */
211struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
212 const void *param, u32 timeout)
213{
214 struct sk_buff *skb;
215
216 if (!test_bit(HCI_UP, &hdev->flags))
217 return ERR_PTR(-ENETDOWN);
218
219 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
220
221 hci_req_sync_lock(hdev);
222 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
223 hci_req_sync_unlock(hdev);
224
225 return skb;
226}
227EXPORT_SYMBOL(hci_cmd_sync);
228
229/* This function requires the caller holds hdev->req_lock. */
230struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
231 const void *param, u8 event, u32 timeout)
232{
233 return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout,
234 NULL);
235}
236EXPORT_SYMBOL(__hci_cmd_sync_ev);
237
238/* This function requires the caller holds hdev->req_lock. */
239int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
240 const void *param, u8 event, u32 timeout,
241 struct sock *sk)
242{
243 struct sk_buff *skb;
244 u8 status;
245
246 skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
abfeea47 247 if (IS_ERR(skb)) {
6a98e383
MH
248 bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
249 PTR_ERR(skb));
250 return PTR_ERR(skb);
251 }
252
abfeea47
LAD
253 /* If command return a status event skb will be set to NULL as there are
254 * no parameters, in case of failure IS_ERR(skb) would have be set to
255 * the actual error would be found with PTR_ERR(skb).
256 */
257 if (!skb)
258 return 0;
259
6a98e383
MH
260 status = skb->data[0];
261
262 kfree_skb(skb);
263
264 return status;
265}
266EXPORT_SYMBOL(__hci_cmd_sync_status_sk);
267
268int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
269 const void *param, u32 timeout)
270{
271 return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout,
272 NULL);
273}
274EXPORT_SYMBOL(__hci_cmd_sync_status);
275
276static void hci_cmd_sync_work(struct work_struct *work)
277{
278 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
6a98e383
MH
279
280 bt_dev_dbg(hdev, "");
281
008ee9eb
LAD
282 /* Dequeue all entries and run them */
283 while (1) {
284 struct hci_cmd_sync_work_entry *entry;
6a98e383 285
008ee9eb
LAD
286 mutex_lock(&hdev->cmd_sync_work_lock);
287 entry = list_first_entry_or_null(&hdev->cmd_sync_work_list,
288 struct hci_cmd_sync_work_entry,
289 list);
290 if (entry)
291 list_del(&entry->list);
292 mutex_unlock(&hdev->cmd_sync_work_lock);
6a98e383 293
008ee9eb
LAD
294 if (!entry)
295 break;
6a98e383 296
008ee9eb 297 bt_dev_dbg(hdev, "entry %p", entry);
6a98e383 298
008ee9eb
LAD
299 if (entry->func) {
300 int err;
6a98e383 301
008ee9eb
LAD
302 hci_req_sync_lock(hdev);
303 err = entry->func(hdev, entry->data);
304 if (entry->destroy)
305 entry->destroy(hdev, entry->data, err);
306 hci_req_sync_unlock(hdev);
307 }
308
309 kfree(entry);
6a98e383
MH
310 }
311}
312
744451c1
BB
313static void hci_cmd_sync_cancel_work(struct work_struct *work)
314{
315 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work);
316
317 cancel_delayed_work_sync(&hdev->cmd_timer);
318 cancel_delayed_work_sync(&hdev->ncmd_timer);
319 atomic_set(&hdev->cmd_cnt, 1);
320
321 wake_up_interruptible(&hdev->req_wait_q);
322}
323
6a98e383
MH
324void hci_cmd_sync_init(struct hci_dev *hdev)
325{
326 INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
327 INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
328 mutex_init(&hdev->cmd_sync_work_lock);
744451c1
BB
329
330 INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work);
6a98e383
MH
331}
332
333void hci_cmd_sync_clear(struct hci_dev *hdev)
334{
335 struct hci_cmd_sync_work_entry *entry, *tmp;
336
337 cancel_work_sync(&hdev->cmd_sync_work);
338
339 list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
340 if (entry->destroy)
341 entry->destroy(hdev, entry->data, -ECANCELED);
342
343 list_del(&entry->list);
344 kfree(entry);
345 }
346}
347
744451c1 348void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
914b08b3
BB
349{
350 bt_dev_dbg(hdev, "err 0x%2.2x", err);
351
352 if (hdev->req_status == HCI_REQ_PEND) {
353 hdev->req_result = err;
354 hdev->req_status = HCI_REQ_CANCELED;
355
356 cancel_delayed_work_sync(&hdev->cmd_timer);
357 cancel_delayed_work_sync(&hdev->ncmd_timer);
358 atomic_set(&hdev->cmd_cnt, 1);
359
360 wake_up_interruptible(&hdev->req_wait_q);
361 }
362}
744451c1
BB
363
364void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
365{
366 bt_dev_dbg(hdev, "err 0x%2.2x", err);
367
368 if (hdev->req_status == HCI_REQ_PEND) {
369 hdev->req_result = err;
370 hdev->req_status = HCI_REQ_CANCELED;
371
372 queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
373 }
374}
914b08b3
BB
375EXPORT_SYMBOL(hci_cmd_sync_cancel);
376
6a98e383
MH
377int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
378 void *data, hci_cmd_sync_work_destroy_t destroy)
379{
380 struct hci_cmd_sync_work_entry *entry;
381
0b94f265
LAD
382 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
383 return -ENODEV;
384
6a98e383
MH
385 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
386 if (!entry)
387 return -ENOMEM;
388
389 entry->func = func;
390 entry->data = data;
391 entry->destroy = destroy;
392
393 mutex_lock(&hdev->cmd_sync_work_lock);
394 list_add_tail(&entry->list, &hdev->cmd_sync_work_list);
395 mutex_unlock(&hdev->cmd_sync_work_lock);
396
397 queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
398
399 return 0;
400}
401EXPORT_SYMBOL(hci_cmd_sync_queue);
161510cc
LAD
402
403int hci_update_eir_sync(struct hci_dev *hdev)
404{
405 struct hci_cp_write_eir cp;
406
407 bt_dev_dbg(hdev, "");
408
409 if (!hdev_is_powered(hdev))
410 return 0;
411
412 if (!lmp_ext_inq_capable(hdev))
413 return 0;
414
415 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
416 return 0;
417
418 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
419 return 0;
420
421 memset(&cp, 0, sizeof(cp));
422
423 eir_create(hdev, cp.data);
424
425 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
426 return 0;
427
428 memcpy(hdev->eir, cp.data, sizeof(cp.data));
429
430 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
431 HCI_CMD_TIMEOUT);
432}
433
434static u8 get_service_classes(struct hci_dev *hdev)
435{
436 struct bt_uuid *uuid;
437 u8 val = 0;
438
439 list_for_each_entry(uuid, &hdev->uuids, list)
440 val |= uuid->svc_hint;
441
442 return val;
443}
444
445int hci_update_class_sync(struct hci_dev *hdev)
446{
447 u8 cod[3];
448
449 bt_dev_dbg(hdev, "");
450
451 if (!hdev_is_powered(hdev))
452 return 0;
453
454 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
455 return 0;
456
457 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
458 return 0;
459
460 cod[0] = hdev->minor_class;
461 cod[1] = hdev->major_class;
462 cod[2] = get_service_classes(hdev);
463
464 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
465 cod[1] |= 0x20;
466
467 if (memcmp(cod, hdev->dev_class, 3) == 0)
468 return 0;
469
470 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV,
471 sizeof(cod), cod, HCI_CMD_TIMEOUT);
472}
cba6b758
LAD
473
474static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
475{
476 /* If there is no connection we are OK to advertise. */
477 if (hci_conn_num(hdev, LE_LINK) == 0)
478 return true;
479
480 /* Check le_states if there is any connection in peripheral role. */
481 if (hdev->conn_hash.le_num_peripheral > 0) {
482 /* Peripheral connection state and non connectable mode
483 * bit 20.
484 */
485 if (!connectable && !(hdev->le_states[2] & 0x10))
486 return false;
487
488 /* Peripheral connection state and connectable mode bit 38
489 * and scannable bit 21.
490 */
491 if (connectable && (!(hdev->le_states[4] & 0x40) ||
492 !(hdev->le_states[2] & 0x20)))
493 return false;
494 }
495
496 /* Check le_states if there is any connection in central role. */
497 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
498 /* Central connection state and non connectable mode bit 18. */
499 if (!connectable && !(hdev->le_states[2] & 0x02))
500 return false;
501
502 /* Central connection state and connectable mode bit 35 and
503 * scannable 19.
504 */
505 if (connectable && (!(hdev->le_states[4] & 0x08) ||
506 !(hdev->le_states[2] & 0x08)))
507 return false;
508 }
509
510 return true;
511}
512
513static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
514{
515 /* If privacy is not enabled don't use RPA */
516 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
517 return false;
518
519 /* If basic privacy mode is enabled use RPA */
520 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
521 return true;
522
523 /* If limited privacy mode is enabled don't use RPA if we're
524 * both discoverable and bondable.
525 */
526 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
527 hci_dev_test_flag(hdev, HCI_BONDABLE))
528 return false;
529
530 /* We're neither bondable nor discoverable in the limited
531 * privacy mode, therefore use RPA.
532 */
533 return true;
534}
535
536static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
537{
538 /* If we're advertising or initiating an LE connection we can't
539 * go ahead and change the random address at this time. This is
540 * because the eventual initiator address used for the
541 * subsequently created connection will be undefined (some
542 * controllers use the new address and others the one we had
543 * when the operation started).
544 *
545 * In this kind of scenario skip the update and let the random
546 * address be updated at the next cycle.
547 */
548 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
549 hci_lookup_le_connect(hdev)) {
550 bt_dev_dbg(hdev, "Deferring random address update");
551 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
552 return 0;
553 }
554
555 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR,
556 6, rpa, HCI_CMD_TIMEOUT);
557}
558
559int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,
560 bool rpa, u8 *own_addr_type)
561{
562 int err;
563
564 /* If privacy is enabled use a resolvable private address. If
565 * current RPA has expired or there is something else than
566 * the current RPA in use, then generate a new one.
567 */
568 if (rpa) {
569 /* If Controller supports LL Privacy use own address type is
570 * 0x03
571 */
ad383c2c 572 if (use_ll_privacy(hdev))
cba6b758
LAD
573 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
574 else
575 *own_addr_type = ADDR_LE_DEV_RANDOM;
576
577 /* Check if RPA is valid */
578 if (rpa_valid(hdev))
579 return 0;
580
581 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
582 if (err < 0) {
583 bt_dev_err(hdev, "failed to generate new RPA");
584 return err;
585 }
586
587 err = hci_set_random_addr_sync(hdev, &hdev->rpa);
588 if (err)
589 return err;
590
591 return 0;
592 }
593
594 /* In case of required privacy without resolvable private address,
595 * use an non-resolvable private address. This is useful for active
596 * scanning and non-connectable advertising.
597 */
598 if (require_privacy) {
599 bdaddr_t nrpa;
600
601 while (true) {
602 /* The non-resolvable private address is generated
603 * from random six bytes with the two most significant
604 * bits cleared.
605 */
606 get_random_bytes(&nrpa, 6);
607 nrpa.b[5] &= 0x3f;
608
609 /* The non-resolvable private address shall not be
610 * equal to the public address.
611 */
612 if (bacmp(&hdev->bdaddr, &nrpa))
613 break;
614 }
615
616 *own_addr_type = ADDR_LE_DEV_RANDOM;
617
618 return hci_set_random_addr_sync(hdev, &nrpa);
619 }
620
621 /* If forcing static address is in use or there is no public
622 * address use the static address as random address (but skip
623 * the HCI command if the current random address is already the
624 * static one.
625 *
626 * In case BR/EDR has been disabled on a dual-mode controller
627 * and a static address has been configured, then use that
628 * address instead of the public BR/EDR address.
629 */
630 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
631 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
632 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
633 bacmp(&hdev->static_addr, BDADDR_ANY))) {
634 *own_addr_type = ADDR_LE_DEV_RANDOM;
635 if (bacmp(&hdev->static_addr, &hdev->random_addr))
636 return hci_set_random_addr_sync(hdev,
637 &hdev->static_addr);
638 return 0;
639 }
640
641 /* Neither privacy nor static address is being used so use a
642 * public address.
643 */
644 *own_addr_type = ADDR_LE_DEV_PUBLIC;
645
646 return 0;
647}
648
649static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
650{
651 struct hci_cp_le_set_ext_adv_enable *cp;
652 struct hci_cp_ext_adv_set *set;
653 u8 data[sizeof(*cp) + sizeof(*set) * 1];
654 u8 size;
655
656 /* If request specifies an instance that doesn't exist, fail */
657 if (instance > 0) {
658 struct adv_info *adv;
659
660 adv = hci_find_adv_instance(hdev, instance);
661 if (!adv)
662 return -EINVAL;
663
664 /* If not enabled there is nothing to do */
665 if (!adv->enabled)
666 return 0;
667 }
668
669 memset(data, 0, sizeof(data));
670
671 cp = (void *)data;
672 set = (void *)cp->data;
673
674 /* Instance 0x00 indicates all advertising instances will be disabled */
675 cp->num_of_sets = !!instance;
676 cp->enable = 0x00;
677
678 set->handle = instance;
679
680 size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets;
681
682 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
683 size, data, HCI_CMD_TIMEOUT);
684}
685
686static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance,
687 bdaddr_t *random_addr)
688{
689 struct hci_cp_le_set_adv_set_rand_addr cp;
690 int err;
691
692 if (!instance) {
693 /* Instance 0x00 doesn't have an adv_info, instead it uses
694 * hdev->random_addr to track its address so whenever it needs
695 * to be updated this also set the random address since
696 * hdev->random_addr is shared with scan state machine.
697 */
698 err = hci_set_random_addr_sync(hdev, random_addr);
699 if (err)
700 return err;
701 }
702
703 memset(&cp, 0, sizeof(cp));
704
705 cp.handle = instance;
706 bacpy(&cp.bdaddr, random_addr);
707
708 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
709 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
710}
711
712int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
713{
714 struct hci_cp_le_set_ext_adv_params cp;
715 bool connectable;
716 u32 flags;
717 bdaddr_t random_addr;
718 u8 own_addr_type;
719 int err;
720 struct adv_info *adv;
721 bool secondary_adv;
722
723 if (instance > 0) {
724 adv = hci_find_adv_instance(hdev, instance);
725 if (!adv)
726 return -EINVAL;
727 } else {
728 adv = NULL;
729 }
730
731 /* Updating parameters of an active instance will return a
732 * Command Disallowed error, so we must first disable the
733 * instance if it is active.
734 */
735 if (adv && !adv->pending) {
736 err = hci_disable_ext_adv_instance_sync(hdev, instance);
737 if (err)
738 return err;
739 }
740
741 flags = hci_adv_instance_flags(hdev, instance);
742
743 /* If the "connectable" instance flag was not set, then choose between
744 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
745 */
746 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
747 mgmt_get_connectable(hdev);
748
749 if (!is_advertising_allowed(hdev, connectable))
750 return -EPERM;
751
752 /* Set require_privacy to true only when non-connectable
753 * advertising is used. In that case it is fine to use a
754 * non-resolvable private address.
755 */
756 err = hci_get_random_address(hdev, !connectable,
757 adv_use_rpa(hdev, flags), adv,
758 &own_addr_type, &random_addr);
759 if (err < 0)
760 return err;
761
762 memset(&cp, 0, sizeof(cp));
763
764 if (adv) {
765 hci_cpu_to_le24(adv->min_interval, cp.min_interval);
766 hci_cpu_to_le24(adv->max_interval, cp.max_interval);
767 cp.tx_power = adv->tx_power;
768 } else {
769 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
770 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
771 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
772 }
773
774 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
775
776 if (connectable) {
777 if (secondary_adv)
778 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
779 else
780 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
781 } else if (hci_adv_instance_is_scannable(hdev, instance) ||
782 (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
783 if (secondary_adv)
784 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
785 else
786 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
787 } else {
788 if (secondary_adv)
789 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
790 else
791 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
792 }
793
cf75ad8b
LAD
794 /* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter
795 * contains the peer’s Identity Address and the Peer_Address_Type
796 * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01).
797 * These parameters are used to locate the corresponding local IRK in
798 * the resolving list; this IRK is used to generate their own address
799 * used in the advertisement.
800 */
801 if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED)
802 hci_copy_identity_address(hdev, &cp.peer_addr,
803 &cp.peer_addr_type);
804
cba6b758
LAD
805 cp.own_addr_type = own_addr_type;
806 cp.channel_map = hdev->le_adv_channel_map;
807 cp.handle = instance;
808
809 if (flags & MGMT_ADV_FLAG_SEC_2M) {
810 cp.primary_phy = HCI_ADV_PHY_1M;
811 cp.secondary_phy = HCI_ADV_PHY_2M;
812 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
813 cp.primary_phy = HCI_ADV_PHY_CODED;
814 cp.secondary_phy = HCI_ADV_PHY_CODED;
815 } else {
816 /* In all other cases use 1M */
817 cp.primary_phy = HCI_ADV_PHY_1M;
818 cp.secondary_phy = HCI_ADV_PHY_1M;
819 }
820
821 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
822 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
823 if (err)
824 return err;
825
826 if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
827 own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
828 bacmp(&random_addr, BDADDR_ANY)) {
829 /* Check if random address need to be updated */
830 if (adv) {
831 if (!bacmp(&random_addr, &adv->random_addr))
832 return 0;
833 } else {
834 if (!bacmp(&random_addr, &hdev->random_addr))
835 return 0;
836 }
837
838 return hci_set_adv_set_random_addr_sync(hdev, instance,
839 &random_addr);
840 }
841
842 return 0;
843}
844
845static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
846{
847 struct {
848 struct hci_cp_le_set_ext_scan_rsp_data cp;
849 u8 data[HCI_MAX_EXT_AD_LENGTH];
850 } pdu;
851 u8 len;
34a718bc
LAD
852 struct adv_info *adv = NULL;
853 int err;
cba6b758
LAD
854
855 memset(&pdu, 0, sizeof(pdu));
856
34a718bc
LAD
857 if (instance) {
858 adv = hci_find_adv_instance(hdev, instance);
859 if (!adv || !adv->scan_rsp_changed)
860 return 0;
861 }
cba6b758 862
34a718bc 863 len = eir_create_scan_rsp(hdev, instance, pdu.data);
cba6b758
LAD
864
865 pdu.cp.handle = instance;
866 pdu.cp.length = len;
867 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
868 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
869
34a718bc
LAD
870 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
871 sizeof(pdu.cp) + len, &pdu.cp,
872 HCI_CMD_TIMEOUT);
873 if (err)
874 return err;
875
876 if (adv) {
877 adv->scan_rsp_changed = false;
878 } else {
879 memcpy(hdev->scan_rsp_data, pdu.data, len);
880 hdev->scan_rsp_data_len = len;
881 }
882
883 return 0;
cba6b758
LAD
884}
885
886static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
887{
888 struct hci_cp_le_set_scan_rsp_data cp;
889 u8 len;
890
891 memset(&cp, 0, sizeof(cp));
892
893 len = eir_create_scan_rsp(hdev, instance, cp.data);
894
895 if (hdev->scan_rsp_data_len == len &&
896 !memcmp(cp.data, hdev->scan_rsp_data, len))
897 return 0;
898
899 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
900 hdev->scan_rsp_data_len = len;
901
902 cp.length = len;
903
904 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA,
905 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
906}
907
908int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
909{
910 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
911 return 0;
912
913 if (ext_adv_capable(hdev))
914 return hci_set_ext_scan_rsp_data_sync(hdev, instance);
915
916 return __hci_set_scan_rsp_data_sync(hdev, instance);
917}
918
919int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance)
920{
921 struct hci_cp_le_set_ext_adv_enable *cp;
922 struct hci_cp_ext_adv_set *set;
923 u8 data[sizeof(*cp) + sizeof(*set) * 1];
924 struct adv_info *adv;
925
926 if (instance > 0) {
927 adv = hci_find_adv_instance(hdev, instance);
928 if (!adv)
929 return -EINVAL;
930 /* If already enabled there is nothing to do */
931 if (adv->enabled)
932 return 0;
933 } else {
934 adv = NULL;
935 }
936
937 cp = (void *)data;
938 set = (void *)cp->data;
939
940 memset(cp, 0, sizeof(*cp));
941
942 cp->enable = 0x01;
943 cp->num_of_sets = 0x01;
944
945 memset(set, 0, sizeof(*set));
946
947 set->handle = instance;
948
949 /* Set duration per instance since controller is responsible for
950 * scheduling it.
951 */
f16a491c 952 if (adv && adv->timeout) {
cba6b758
LAD
953 u16 duration = adv->timeout * MSEC_PER_SEC;
954
955 /* Time = N * 10 ms */
956 set->duration = cpu_to_le16(duration / 10);
957 }
958
959 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
960 sizeof(*cp) +
961 sizeof(*set) * cp->num_of_sets,
962 data, HCI_CMD_TIMEOUT);
963}
964
965int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance)
966{
967 int err;
968
969 err = hci_setup_ext_adv_instance_sync(hdev, instance);
970 if (err)
971 return err;
972
973 err = hci_set_ext_scan_rsp_data_sync(hdev, instance);
974 if (err)
975 return err;
976
977 return hci_enable_ext_advertising_sync(hdev, instance);
978}
979
980static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance)
981{
982 int err;
983
984 if (ext_adv_capable(hdev))
985 return hci_start_ext_adv_sync(hdev, instance);
986
987 err = hci_update_adv_data_sync(hdev, instance);
988 if (err)
989 return err;
990
991 err = hci_update_scan_rsp_data_sync(hdev, instance);
992 if (err)
993 return err;
994
995 return hci_enable_advertising_sync(hdev);
996}
997
998int hci_enable_advertising_sync(struct hci_dev *hdev)
999{
1000 struct adv_info *adv_instance;
1001 struct hci_cp_le_set_adv_param cp;
1002 u8 own_addr_type, enable = 0x01;
1003 bool connectable;
1004 u16 adv_min_interval, adv_max_interval;
1005 u32 flags;
1006 u8 status;
1007
ad383c2c
LAD
1008 if (ext_adv_capable(hdev))
1009 return hci_enable_ext_advertising_sync(hdev,
1010 hdev->cur_adv_instance);
1011
cba6b758
LAD
1012 flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
1013 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1014
1015 /* If the "connectable" instance flag was not set, then choose between
1016 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1017 */
1018 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1019 mgmt_get_connectable(hdev);
1020
1021 if (!is_advertising_allowed(hdev, connectable))
1022 return -EINVAL;
1023
ad383c2c
LAD
1024 status = hci_disable_advertising_sync(hdev);
1025 if (status)
1026 return status;
cba6b758
LAD
1027
1028 /* Clear the HCI_LE_ADV bit temporarily so that the
1029 * hci_update_random_address knows that it's safe to go ahead
1030 * and write a new random address. The flag will be set back on
1031 * as soon as the SET_ADV_ENABLE HCI command completes.
1032 */
1033 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1034
1035 /* Set require_privacy to true only when non-connectable
1036 * advertising is used. In that case it is fine to use a
1037 * non-resolvable private address.
1038 */
1039 status = hci_update_random_address_sync(hdev, !connectable,
1040 adv_use_rpa(hdev, flags),
1041 &own_addr_type);
1042 if (status)
1043 return status;
1044
1045 memset(&cp, 0, sizeof(cp));
1046
1047 if (adv_instance) {
1048 adv_min_interval = adv_instance->min_interval;
1049 adv_max_interval = adv_instance->max_interval;
1050 } else {
1051 adv_min_interval = hdev->le_adv_min_interval;
1052 adv_max_interval = hdev->le_adv_max_interval;
1053 }
1054
1055 if (connectable) {
1056 cp.type = LE_ADV_IND;
1057 } else {
1058 if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance))
1059 cp.type = LE_ADV_SCAN_IND;
1060 else
1061 cp.type = LE_ADV_NONCONN_IND;
1062
1063 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1064 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1065 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1066 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1067 }
1068 }
1069
1070 cp.min_interval = cpu_to_le16(adv_min_interval);
1071 cp.max_interval = cpu_to_le16(adv_max_interval);
1072 cp.own_address_type = own_addr_type;
1073 cp.channel_map = hdev->le_adv_channel_map;
1074
1075 status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
1076 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1077 if (status)
1078 return status;
1079
1080 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
1081 sizeof(enable), &enable, HCI_CMD_TIMEOUT);
1082}
1083
abfeea47
LAD
1084static int enable_advertising_sync(struct hci_dev *hdev, void *data)
1085{
1086 return hci_enable_advertising_sync(hdev);
1087}
1088
1089int hci_enable_advertising(struct hci_dev *hdev)
1090{
1091 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1092 list_empty(&hdev->adv_instances))
1093 return 0;
1094
1095 return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL);
1096}
1097
1098int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1099 struct sock *sk)
cba6b758
LAD
1100{
1101 int err;
1102
1103 if (!ext_adv_capable(hdev))
1104 return 0;
1105
1106 err = hci_disable_ext_adv_instance_sync(hdev, instance);
1107 if (err)
1108 return err;
1109
1110 /* If request specifies an instance that doesn't exist, fail */
1111 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1112 return -EINVAL;
1113
1114 return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET,
1115 sizeof(instance), &instance, 0,
1116 HCI_CMD_TIMEOUT, sk);
1117}
1118
1119static void cancel_adv_timeout(struct hci_dev *hdev)
1120{
1121 if (hdev->adv_instance_timeout) {
1122 hdev->adv_instance_timeout = 0;
1123 cancel_delayed_work(&hdev->adv_instance_expire);
1124 }
1125}
1126
1127static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
1128{
1129 struct {
1130 struct hci_cp_le_set_ext_adv_data cp;
1131 u8 data[HCI_MAX_EXT_AD_LENGTH];
1132 } pdu;
1133 u8 len;
34a718bc
LAD
1134 struct adv_info *adv = NULL;
1135 int err;
cba6b758
LAD
1136
1137 memset(&pdu, 0, sizeof(pdu));
1138
34a718bc
LAD
1139 if (instance) {
1140 adv = hci_find_adv_instance(hdev, instance);
1141 if (!adv || !adv->adv_data_changed)
1142 return 0;
1143 }
cba6b758 1144
34a718bc 1145 len = eir_create_adv_data(hdev, instance, pdu.data);
cba6b758
LAD
1146
1147 pdu.cp.length = len;
1148 pdu.cp.handle = instance;
1149 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1150 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1151
34a718bc
LAD
1152 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
1153 sizeof(pdu.cp) + len, &pdu.cp,
1154 HCI_CMD_TIMEOUT);
1155 if (err)
1156 return err;
1157
1158 /* Update data if the command succeed */
1159 if (adv) {
1160 adv->adv_data_changed = false;
1161 } else {
1162 memcpy(hdev->adv_data, pdu.data, len);
1163 hdev->adv_data_len = len;
1164 }
1165
1166 return 0;
cba6b758
LAD
1167}
1168
1169static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
1170{
1171 struct hci_cp_le_set_adv_data cp;
1172 u8 len;
1173
1174 memset(&cp, 0, sizeof(cp));
1175
1176 len = eir_create_adv_data(hdev, instance, cp.data);
1177
1178 /* There's nothing to do if the data hasn't changed */
1179 if (hdev->adv_data_len == len &&
1180 memcmp(cp.data, hdev->adv_data, len) == 0)
1181 return 0;
1182
1183 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1184 hdev->adv_data_len = len;
1185
1186 cp.length = len;
1187
1188 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
1189 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1190}
1191
1192int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
1193{
1194 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1195 return 0;
1196
1197 if (ext_adv_capable(hdev))
1198 return hci_set_ext_adv_data_sync(hdev, instance);
1199
1200 return hci_set_adv_data_sync(hdev, instance);
1201}
1202
1203int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1204 bool force)
1205{
1206 struct adv_info *adv = NULL;
1207 u16 timeout;
1208
cf75ad8b 1209 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev))
cba6b758
LAD
1210 return -EPERM;
1211
1212 if (hdev->adv_instance_timeout)
1213 return -EBUSY;
1214
1215 adv = hci_find_adv_instance(hdev, instance);
1216 if (!adv)
1217 return -ENOENT;
1218
1219 /* A zero timeout means unlimited advertising. As long as there is
1220 * only one instance, duration should be ignored. We still set a timeout
1221 * in case further instances are being added later on.
1222 *
1223 * If the remaining lifetime of the instance is more than the duration
1224 * then the timeout corresponds to the duration, otherwise it will be
1225 * reduced to the remaining instance lifetime.
1226 */
1227 if (adv->timeout == 0 || adv->duration <= adv->remaining_time)
1228 timeout = adv->duration;
1229 else
1230 timeout = adv->remaining_time;
1231
1232 /* The remaining time is being reduced unless the instance is being
1233 * advertised without time limit.
1234 */
1235 if (adv->timeout)
1236 adv->remaining_time = adv->remaining_time - timeout;
1237
1238 /* Only use work for scheduling instances with legacy advertising */
1239 if (!ext_adv_capable(hdev)) {
1240 hdev->adv_instance_timeout = timeout;
1241 queue_delayed_work(hdev->req_workqueue,
1242 &hdev->adv_instance_expire,
1243 msecs_to_jiffies(timeout * 1000));
1244 }
1245
1246 /* If we're just re-scheduling the same instance again then do not
1247 * execute any HCI commands. This happens when a single instance is
1248 * being advertised.
1249 */
1250 if (!force && hdev->cur_adv_instance == instance &&
1251 hci_dev_test_flag(hdev, HCI_LE_ADV))
1252 return 0;
1253
1254 hdev->cur_adv_instance = instance;
1255
1256 return hci_start_adv_sync(hdev, instance);
1257}
1258
1259static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk)
1260{
1261 int err;
1262
1263 if (!ext_adv_capable(hdev))
1264 return 0;
1265
1266 /* Disable instance 0x00 to disable all instances */
1267 err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
1268 if (err)
1269 return err;
1270
1271 return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS,
1272 0, NULL, 0, HCI_CMD_TIMEOUT, sk);
1273}
1274
1275static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
1276{
1277 struct adv_info *adv, *n;
1278
1279 if (ext_adv_capable(hdev))
1280 /* Remove all existing sets */
1281 return hci_clear_adv_sets_sync(hdev, sk);
1282
1283 /* This is safe as long as there is no command send while the lock is
1284 * held.
1285 */
1286 hci_dev_lock(hdev);
1287
1288 /* Cleanup non-ext instances */
1289 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1290 u8 instance = adv->instance;
1291 int err;
1292
1293 if (!(force || adv->timeout))
1294 continue;
1295
1296 err = hci_remove_adv_instance(hdev, instance);
1297 if (!err)
1298 mgmt_advertising_removed(sk, hdev, instance);
1299 }
1300
1301 hci_dev_unlock(hdev);
1302
1303 return 0;
1304}
1305
1306static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance,
1307 struct sock *sk)
1308{
1309 int err;
1310
1311 /* If we use extended advertising, instance has to be removed first. */
1312 if (ext_adv_capable(hdev))
1313 return hci_remove_ext_adv_instance_sync(hdev, instance, sk);
1314
1315 /* This is safe as long as there is no command send while the lock is
1316 * held.
1317 */
1318 hci_dev_lock(hdev);
1319
1320 err = hci_remove_adv_instance(hdev, instance);
1321 if (!err)
1322 mgmt_advertising_removed(sk, hdev, instance);
1323
1324 hci_dev_unlock(hdev);
1325
1326 return err;
1327}
1328
1329/* For a single instance:
1330 * - force == true: The instance will be removed even when its remaining
1331 * lifetime is not zero.
1332 * - force == false: the instance will be deactivated but kept stored unless
1333 * the remaining lifetime is zero.
1334 *
1335 * For instance == 0x00:
1336 * - force == true: All instances will be removed regardless of their timeout
1337 * setting.
1338 * - force == false: Only instances that have a timeout will be removed.
1339 */
1340int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
1341 u8 instance, bool force)
1342{
1343 struct adv_info *next = NULL;
1344 int err;
1345
1346 /* Cancel any timeout concerning the removed instance(s). */
1347 if (!instance || hdev->cur_adv_instance == instance)
1348 cancel_adv_timeout(hdev);
1349
1350 /* Get the next instance to advertise BEFORE we remove
1351 * the current one. This can be the same instance again
1352 * if there is only one instance.
1353 */
1354 if (hdev->cur_adv_instance == instance)
1355 next = hci_get_next_instance(hdev, instance);
1356
1357 if (!instance) {
1358 err = hci_clear_adv_sync(hdev, sk, force);
1359 if (err)
1360 return err;
1361 } else {
1362 struct adv_info *adv = hci_find_adv_instance(hdev, instance);
1363
1364 if (force || (adv && adv->timeout && !adv->remaining_time)) {
1365 /* Don't advertise a removed instance. */
1366 if (next && next->instance == instance)
1367 next = NULL;
1368
1369 err = hci_remove_adv_sync(hdev, instance, sk);
1370 if (err)
1371 return err;
1372 }
1373 }
1374
1375 if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
1376 return 0;
1377
1378 if (next && !ext_adv_capable(hdev))
1379 hci_schedule_adv_instance_sync(hdev, next->instance, false);
1380
1381 return 0;
1382}
1383
47db6b42
BG
1384int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle)
1385{
1386 struct hci_cp_read_rssi cp;
1387
1388 cp.handle = handle;
1389 return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI,
1390 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1391}
1392
5a750137
BG
1393int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp)
1394{
1395 return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK,
1396 sizeof(*cp), cp, HCI_CMD_TIMEOUT);
1397}
1398
47db6b42
BG
1399int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type)
1400{
1401 struct hci_cp_read_tx_power cp;
1402
1403 cp.handle = handle;
1404 cp.type = type;
1405 return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER,
1406 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1407}
1408
cba6b758
LAD
1409int hci_disable_advertising_sync(struct hci_dev *hdev)
1410{
1411 u8 enable = 0x00;
1412
ad383c2c
LAD
1413 /* If controller is not advertising we are done. */
1414 if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
1415 return 0;
1416
cba6b758
LAD
1417 if (ext_adv_capable(hdev))
1418 return hci_disable_ext_adv_instance_sync(hdev, 0x00);
1419
1420 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
1421 sizeof(enable), &enable, HCI_CMD_TIMEOUT);
1422}
e8907f76
LAD
1423
1424static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val,
1425 u8 filter_dup)
1426{
1427 struct hci_cp_le_set_ext_scan_enable cp;
1428
1429 memset(&cp, 0, sizeof(cp));
1430 cp.enable = val;
1431 cp.filter_dup = filter_dup;
1432
1433 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
1434 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1435}
1436
1437static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
1438 u8 filter_dup)
1439{
1440 struct hci_cp_le_set_scan_enable cp;
1441
1442 if (use_ext_scan(hdev))
1443 return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup);
1444
1445 memset(&cp, 0, sizeof(cp));
1446 cp.enable = val;
1447 cp.filter_dup = filter_dup;
1448
1449 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
1450 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1451}
1452
1453static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val)
1454{
ad383c2c
LAD
1455 if (!use_ll_privacy(hdev))
1456 return 0;
1457
1458 /* If controller is not/already resolving we are done. */
1459 if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
e8907f76
LAD
1460 return 0;
1461
1462 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
1463 sizeof(val), &val, HCI_CMD_TIMEOUT);
1464}
1465
27592ca1 1466static int hci_scan_disable_sync(struct hci_dev *hdev)
e8907f76
LAD
1467{
1468 int err;
1469
1470 /* If controller is not scanning we are done. */
1471 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1472 return 0;
1473
1474 if (hdev->scanning_paused) {
1475 bt_dev_dbg(hdev, "Scanning is paused for suspend");
1476 return 0;
1477 }
1478
e8907f76
LAD
1479 err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00);
1480 if (err) {
1481 bt_dev_err(hdev, "Unable to disable scanning: %d", err);
1482 return err;
1483 }
1484
e8907f76
LAD
1485 return err;
1486}
1487
1488static bool scan_use_rpa(struct hci_dev *hdev)
1489{
1490 return hci_dev_test_flag(hdev, HCI_PRIVACY);
1491}
1492
1493static void hci_start_interleave_scan(struct hci_dev *hdev)
1494{
1495 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1496 queue_delayed_work(hdev->req_workqueue,
1497 &hdev->interleave_scan, 0);
1498}
1499
1500static bool is_interleave_scanning(struct hci_dev *hdev)
1501{
1502 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
1503}
1504
1505static void cancel_interleave_scan(struct hci_dev *hdev)
1506{
1507 bt_dev_dbg(hdev, "cancelling interleave scan");
1508
1509 cancel_delayed_work_sync(&hdev->interleave_scan);
1510
1511 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
1512}
1513
1514/* Return true if interleave_scan wasn't started until exiting this function,
1515 * otherwise, return false
1516 */
1517static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev)
1518{
1519 /* Do interleaved scan only if all of the following are true:
1520 * - There is at least one ADV monitor
1521 * - At least one pending LE connection or one device to be scanned for
1522 * - Monitor offloading is not supported
1523 * If so, we should alternate between allowlist scan and one without
1524 * any filters to save power.
1525 */
1526 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
1527 !(list_empty(&hdev->pend_le_conns) &&
1528 list_empty(&hdev->pend_le_reports)) &&
1529 hci_get_adv_monitor_offload_ext(hdev) ==
1530 HCI_ADV_MONITOR_EXT_NONE;
1531 bool is_interleaving = is_interleave_scanning(hdev);
1532
1533 if (use_interleaving && !is_interleaving) {
1534 hci_start_interleave_scan(hdev);
1535 bt_dev_dbg(hdev, "starting interleave scan");
1536 return true;
1537 }
1538
1539 if (!use_interleaving && is_interleaving)
1540 cancel_interleave_scan(hdev);
1541
1542 return false;
1543}
1544
1545/* Removes connection to resolve list if needed.*/
1546static int hci_le_del_resolve_list_sync(struct hci_dev *hdev,
1547 bdaddr_t *bdaddr, u8 bdaddr_type)
1548{
1549 struct hci_cp_le_del_from_resolv_list cp;
1550 struct bdaddr_list_with_irk *entry;
1551
ad383c2c 1552 if (!use_ll_privacy(hdev))
e8907f76
LAD
1553 return 0;
1554
1555 /* Check if the IRK has been programmed */
1556 entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr,
1557 bdaddr_type);
1558 if (!entry)
1559 return 0;
1560
1561 cp.bdaddr_type = bdaddr_type;
1562 bacpy(&cp.bdaddr, bdaddr);
1563
1564 return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
1565 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1566}
1567
1568static int hci_le_del_accept_list_sync(struct hci_dev *hdev,
1569 bdaddr_t *bdaddr, u8 bdaddr_type)
1570{
1571 struct hci_cp_le_del_from_accept_list cp;
1572 int err;
1573
1574 /* Check if device is on accept list before removing it */
1575 if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type))
1576 return 0;
1577
1578 cp.bdaddr_type = bdaddr_type;
1579 bacpy(&cp.bdaddr, bdaddr);
1580
ad383c2c
LAD
1581 /* Ignore errors when removing from resolving list as that is likely
1582 * that the device was never added.
1583 */
1584 hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
1585
e8907f76
LAD
1586 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
1587 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1588 if (err) {
1589 bt_dev_err(hdev, "Unable to remove from allow list: %d", err);
1590 return err;
1591 }
1592
1593 bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr,
1594 cp.bdaddr_type);
1595
ad383c2c 1596 return 0;
e8907f76
LAD
1597}
1598
cf75ad8b
LAD
1599/* Adds connection to resolve list if needed.
1600 * Setting params to NULL programs local hdev->irk
1601 */
e8907f76
LAD
1602static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
1603 struct hci_conn_params *params)
1604{
1605 struct hci_cp_le_add_to_resolv_list cp;
1606 struct smp_irk *irk;
1607 struct bdaddr_list_with_irk *entry;
1608
ad383c2c 1609 if (!use_ll_privacy(hdev))
e8907f76
LAD
1610 return 0;
1611
cf75ad8b
LAD
1612 /* Attempt to program local identity address, type and irk if params is
1613 * NULL.
1614 */
1615 if (!params) {
1616 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1617 return 0;
1618
1619 hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type);
1620 memcpy(cp.peer_irk, hdev->irk, 16);
1621 goto done;
1622 }
1623
e8907f76
LAD
1624 irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
1625 if (!irk)
1626 return 0;
1627
1628 /* Check if the IK has _not_ been programmed yet. */
1629 entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list,
1630 &params->addr,
1631 params->addr_type);
1632 if (entry)
1633 return 0;
1634
1635 cp.bdaddr_type = params->addr_type;
1636 bacpy(&cp.bdaddr, &params->addr);
1637 memcpy(cp.peer_irk, irk->val, 16);
1638
0900b1c6
LAD
1639 /* Default privacy mode is always Network */
1640 params->privacy_mode = HCI_NETWORK_PRIVACY;
1641
cf75ad8b 1642done:
e8907f76
LAD
1643 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
1644 memcpy(cp.local_irk, hdev->irk, 16);
1645 else
1646 memset(cp.local_irk, 0, 16);
1647
1648 return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST,
1649 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1650}
1651
853b70b5
LAD
1652/* Set Device Privacy Mode. */
1653static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
1654 struct hci_conn_params *params)
1655{
1656 struct hci_cp_le_set_privacy_mode cp;
1657 struct smp_irk *irk;
1658
1659 /* If device privacy mode has already been set there is nothing to do */
1660 if (params->privacy_mode == HCI_DEVICE_PRIVACY)
1661 return 0;
1662
1663 /* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also
1664 * indicates that LL Privacy has been enabled and
1665 * HCI_OP_LE_SET_PRIVACY_MODE is supported.
1666 */
e1cff700 1667 if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY))
853b70b5
LAD
1668 return 0;
1669
1670 irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
1671 if (!irk)
1672 return 0;
1673
1674 memset(&cp, 0, sizeof(cp));
1675 cp.bdaddr_type = irk->addr_type;
1676 bacpy(&cp.bdaddr, &irk->bdaddr);
1677 cp.mode = HCI_DEVICE_PRIVACY;
1678
1679 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE,
1680 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1681}
1682
e8907f76 1683/* Adds connection to allow list if needed, if the device uses RPA (has IRK)
853b70b5
LAD
1684 * this attempts to program the device in the resolving list as well and
1685 * properly set the privacy mode.
e8907f76
LAD
1686 */
1687static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
1688 struct hci_conn_params *params,
ad383c2c 1689 u8 *num_entries)
e8907f76
LAD
1690{
1691 struct hci_cp_le_add_to_accept_list cp;
1692 int err;
1693
3b420553
LAD
1694 /* During suspend, only wakeable devices can be in acceptlist */
1695 if (hdev->suspended &&
e1cff700 1696 !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
3b420553
LAD
1697 return 0;
1698
e8907f76
LAD
1699 /* Select filter policy to accept all advertising */
1700 if (*num_entries >= hdev->le_accept_list_size)
1701 return -ENOSPC;
1702
1703 /* Accept list can not be used with RPAs */
ad383c2c 1704 if (!use_ll_privacy(hdev) &&
3b420553 1705 hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
e8907f76 1706 return -EINVAL;
e8907f76 1707
ad383c2c
LAD
1708 /* Attempt to program the device in the resolving list first to avoid
1709 * having to rollback in case it fails since the resolving list is
1710 * dynamic it can probably be smaller than the accept list.
1711 */
1712 err = hci_le_add_resolve_list_sync(hdev, params);
1713 if (err) {
1714 bt_dev_err(hdev, "Unable to add to resolve list: %d", err);
1715 return err;
1716 }
1717
853b70b5
LAD
1718 /* Set Privacy Mode */
1719 err = hci_le_set_privacy_mode_sync(hdev, params);
1720 if (err) {
1721 bt_dev_err(hdev, "Unable to set privacy mode: %d", err);
1722 return err;
1723 }
1724
1725 /* Check if already in accept list */
1726 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
1727 params->addr_type))
1728 return 0;
1729
e8907f76
LAD
1730 *num_entries += 1;
1731 cp.bdaddr_type = params->addr_type;
1732 bacpy(&cp.bdaddr, &params->addr);
1733
1734 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST,
1735 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1736 if (err) {
1737 bt_dev_err(hdev, "Unable to add to allow list: %d", err);
ad383c2c
LAD
1738 /* Rollback the device from the resolving list */
1739 hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
e8907f76
LAD
1740 return err;
1741 }
1742
1743 bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr,
1744 cp.bdaddr_type);
1745
ad383c2c
LAD
1746 return 0;
1747}
1748
182ee45d 1749/* This function disables/pause all advertising instances */
ad383c2c
LAD
1750static int hci_pause_advertising_sync(struct hci_dev *hdev)
1751{
1752 int err;
182ee45d 1753 int old_state;
ad383c2c 1754
9446bdde
LAD
1755 /* If already been paused there is nothing to do. */
1756 if (hdev->advertising_paused)
ad383c2c
LAD
1757 return 0;
1758
182ee45d
LAD
1759 bt_dev_dbg(hdev, "Pausing directed advertising");
1760
1761 /* Stop directed advertising */
1762 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1763 if (old_state) {
1764 /* When discoverable timeout triggers, then just make sure
1765 * the limited discoverable flag is cleared. Even in the case
1766 * of a timeout triggered from general discoverable, it is
1767 * safe to unconditionally clear the flag.
1768 */
1769 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1770 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1771 hdev->discov_timeout = 0;
1772 }
1773
ad383c2c
LAD
1774 bt_dev_dbg(hdev, "Pausing advertising instances");
1775
1776 /* Call to disable any advertisements active on the controller.
1777 * This will succeed even if no advertisements are configured.
1778 */
1779 err = hci_disable_advertising_sync(hdev);
1780 if (err)
1781 return err;
1782
1783 /* If we are using software rotation, pause the loop */
1784 if (!ext_adv_capable(hdev))
1785 cancel_adv_timeout(hdev);
1786
1787 hdev->advertising_paused = true;
182ee45d 1788 hdev->advertising_old_state = old_state;
ad383c2c
LAD
1789
1790 return 0;
e8907f76
LAD
1791}
1792
182ee45d 1793/* This function enables all user advertising instances */
ad383c2c
LAD
1794static int hci_resume_advertising_sync(struct hci_dev *hdev)
1795{
1796 struct adv_info *adv, *tmp;
1797 int err;
1798
1799 /* If advertising has not been paused there is nothing to do. */
1800 if (!hdev->advertising_paused)
1801 return 0;
1802
182ee45d
LAD
1803 /* Resume directed advertising */
1804 hdev->advertising_paused = false;
1805 if (hdev->advertising_old_state) {
1806 hci_dev_set_flag(hdev, HCI_ADVERTISING);
182ee45d
LAD
1807 hdev->advertising_old_state = 0;
1808 }
1809
ad383c2c
LAD
1810 bt_dev_dbg(hdev, "Resuming advertising instances");
1811
1812 if (ext_adv_capable(hdev)) {
1813 /* Call for each tracked instance to be re-enabled */
1814 list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) {
1815 err = hci_enable_ext_advertising_sync(hdev,
1816 adv->instance);
1817 if (!err)
1818 continue;
1819
1820 /* If the instance cannot be resumed remove it */
1821 hci_remove_ext_adv_instance_sync(hdev, adv->instance,
1822 NULL);
1823 }
1824 } else {
1825 /* Schedule for most recent instance to be restarted and begin
1826 * the software rotation loop
1827 */
1828 err = hci_schedule_adv_instance_sync(hdev,
1829 hdev->cur_adv_instance,
1830 true);
1831 }
1832
1833 hdev->advertising_paused = false;
1834
1835 return err;
1836}
1837
f892244b
BG
1838struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev,
1839 bool extended, struct sock *sk)
1840{
1841 u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA :
1842 HCI_OP_READ_LOCAL_OOB_DATA;
1843
1844 return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk);
1845}
1846
ad383c2c
LAD
1847/* Device must not be scanning when updating the accept list.
1848 *
1849 * Update is done using the following sequence:
1850 *
1851 * use_ll_privacy((Disable Advertising) -> Disable Resolving List) ->
1852 * Remove Devices From Accept List ->
1853 * (has IRK && use_ll_privacy(Remove Devices From Resolving List))->
1854 * Add Devices to Accept List ->
1855 * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) ->
1856 * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) ->
1857 * Enable Scanning
1858 *
1859 * In case of failure advertising shall be restored to its original state and
1860 * return would disable accept list since either accept or resolving list could
1861 * not be programmed.
1862 *
1863 */
e8907f76
LAD
1864static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
1865{
1866 struct hci_conn_params *params;
1867 struct bdaddr_list *b, *t;
1868 u8 num_entries = 0;
1869 bool pend_conn, pend_report;
80740ebb 1870 u8 filter_policy;
ad383c2c
LAD
1871 int err;
1872
1873 /* Pause advertising if resolving list can be used as controllers are
1874 * cannot accept resolving list modifications while advertising.
e8907f76 1875 */
ad383c2c
LAD
1876 if (use_ll_privacy(hdev)) {
1877 err = hci_pause_advertising_sync(hdev);
1878 if (err) {
1879 bt_dev_err(hdev, "pause advertising failed: %d", err);
1880 return 0x00;
1881 }
1882 }
e8907f76 1883
ad383c2c
LAD
1884 /* Disable address resolution while reprogramming accept list since
1885 * devices that do have an IRK will be programmed in the resolving list
1886 * when LL Privacy is enabled.
1887 */
1888 err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
1889 if (err) {
1890 bt_dev_err(hdev, "Unable to disable LL privacy: %d", err);
1891 goto done;
1892 }
e8907f76
LAD
1893
1894 /* Go through the current accept list programmed into the
1895 * controller one by one and check if that address is still
1896 * in the list of pending connections or list of devices to
1897 * report. If not present in either list, then remove it from
1898 * the controller.
1899 */
1900 list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) {
1901 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
1902 &b->bdaddr,
1903 b->bdaddr_type);
1904 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
1905 &b->bdaddr,
1906 b->bdaddr_type);
1907
1908 /* If the device is not likely to connect or report,
1909 * remove it from the acceptlist.
1910 */
1911 if (!pend_conn && !pend_report) {
1912 hci_le_del_accept_list_sync(hdev, &b->bdaddr,
1913 b->bdaddr_type);
1914 continue;
1915 }
1916
e8907f76
LAD
1917 num_entries++;
1918 }
1919
1920 /* Since all no longer valid accept list entries have been
1921 * removed, walk through the list of pending connections
1922 * and ensure that any new device gets programmed into
1923 * the controller.
1924 *
1925 * If the list of the devices is larger than the list of
1926 * available accept list entries in the controller, then
1927 * just abort and return filer policy value to not use the
1928 * accept list.
1929 */
1930 list_for_each_entry(params, &hdev->pend_le_conns, action) {
ad383c2c
LAD
1931 err = hci_le_add_accept_list_sync(hdev, params, &num_entries);
1932 if (err)
1933 goto done;
e8907f76
LAD
1934 }
1935
1936 /* After adding all new pending connections, walk through
1937 * the list of pending reports and also add these to the
1938 * accept list if there is still space. Abort if space runs out.
1939 */
1940 list_for_each_entry(params, &hdev->pend_le_reports, action) {
ad383c2c
LAD
1941 err = hci_le_add_accept_list_sync(hdev, params, &num_entries);
1942 if (err)
1943 goto done;
e8907f76
LAD
1944 }
1945
1946 /* Use the allowlist unless the following conditions are all true:
1947 * - We are not currently suspending
1948 * - There are 1 or more ADV monitors registered and it's not offloaded
1949 * - Interleaved scanning is not currently using the allowlist
1950 */
1951 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
1952 hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
1953 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
ad383c2c
LAD
1954 err = -EINVAL;
1955
1956done:
80740ebb
LAD
1957 filter_policy = err ? 0x00 : 0x01;
1958
ad383c2c
LAD
1959 /* Enable address resolution when LL Privacy is enabled. */
1960 err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
1961 if (err)
1962 bt_dev_err(hdev, "Unable to enable LL privacy: %d", err);
1963
1964 /* Resume advertising if it was paused */
1965 if (use_ll_privacy(hdev))
1966 hci_resume_advertising_sync(hdev);
e8907f76
LAD
1967
1968 /* Select filter policy to use accept list */
80740ebb 1969 return filter_policy;
e8907f76
LAD
1970}
1971
1972/* Returns true if an le connection is in the scanning state */
1973static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
1974{
1975 struct hci_conn_hash *h = &hdev->conn_hash;
1976 struct hci_conn *c;
1977
1978 rcu_read_lock();
1979
1980 list_for_each_entry_rcu(c, &h->list, list) {
1981 if (c->type == LE_LINK && c->state == BT_CONNECT &&
1982 test_bit(HCI_CONN_SCANNING, &c->flags)) {
1983 rcu_read_unlock();
1984 return true;
1985 }
1986 }
1987
1988 rcu_read_unlock();
1989
1990 return false;
1991}
1992
1993static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
1994 u16 interval, u16 window,
1995 u8 own_addr_type, u8 filter_policy)
1996{
1997 struct hci_cp_le_set_ext_scan_params *cp;
1998 struct hci_cp_le_scan_phy_params *phy;
1999 u8 data[sizeof(*cp) + sizeof(*phy) * 2];
2000 u8 num_phy = 0;
2001
2002 cp = (void *)data;
2003 phy = (void *)cp->data;
2004
2005 memset(data, 0, sizeof(data));
2006
2007 cp->own_addr_type = own_addr_type;
2008 cp->filter_policy = filter_policy;
2009
2010 if (scan_1m(hdev) || scan_2m(hdev)) {
2011 cp->scanning_phys |= LE_SCAN_PHY_1M;
2012
2013 phy->type = type;
2014 phy->interval = cpu_to_le16(interval);
2015 phy->window = cpu_to_le16(window);
2016
2017 num_phy++;
2018 phy++;
2019 }
2020
2021 if (scan_coded(hdev)) {
2022 cp->scanning_phys |= LE_SCAN_PHY_CODED;
2023
2024 phy->type = type;
2025 phy->interval = cpu_to_le16(interval);
2026 phy->window = cpu_to_le16(window);
2027
2028 num_phy++;
2029 phy++;
2030 }
2031
2032 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
2033 sizeof(*cp) + sizeof(*phy) * num_phy,
2034 data, HCI_CMD_TIMEOUT);
2035}
2036
2037static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type,
2038 u16 interval, u16 window,
2039 u8 own_addr_type, u8 filter_policy)
2040{
2041 struct hci_cp_le_set_scan_param cp;
2042
2043 if (use_ext_scan(hdev))
2044 return hci_le_set_ext_scan_param_sync(hdev, type, interval,
2045 window, own_addr_type,
2046 filter_policy);
2047
2048 memset(&cp, 0, sizeof(cp));
2049 cp.type = type;
2050 cp.interval = cpu_to_le16(interval);
2051 cp.window = cpu_to_le16(window);
2052 cp.own_address_type = own_addr_type;
2053 cp.filter_policy = filter_policy;
2054
2055 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM,
2056 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2057}
2058
2059static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval,
abfeea47
LAD
2060 u16 window, u8 own_addr_type, u8 filter_policy,
2061 u8 filter_dup)
e8907f76
LAD
2062{
2063 int err;
2064
2065 if (hdev->scanning_paused) {
2066 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2067 return 0;
2068 }
2069
e8907f76
LAD
2070 err = hci_le_set_scan_param_sync(hdev, type, interval, window,
2071 own_addr_type, filter_policy);
2072 if (err)
2073 return err;
2074
abfeea47 2075 return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup);
e8907f76
LAD
2076}
2077
27592ca1 2078static int hci_passive_scan_sync(struct hci_dev *hdev)
e8907f76
LAD
2079{
2080 u8 own_addr_type;
2081 u8 filter_policy;
2082 u16 window, interval;
ad383c2c 2083 int err;
e8907f76
LAD
2084
2085 if (hdev->scanning_paused) {
2086 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2087 return 0;
2088 }
2089
ad383c2c
LAD
2090 err = hci_scan_disable_sync(hdev);
2091 if (err) {
2092 bt_dev_err(hdev, "disable scanning failed: %d", err);
2093 return err;
2094 }
2095
e8907f76
LAD
2096 /* Set require_privacy to false since no SCAN_REQ are send
2097 * during passive scanning. Not using an non-resolvable address
2098 * here is important so that peer devices using direct
2099 * advertising with our address will be correctly reported
2100 * by the controller.
2101 */
2102 if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev),
2103 &own_addr_type))
2104 return 0;
2105
2106 if (hdev->enable_advmon_interleave_scan &&
2107 hci_update_interleaved_scan_sync(hdev))
2108 return 0;
2109
2110 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
ad383c2c 2111
e8907f76
LAD
2112 /* Adding or removing entries from the accept list must
2113 * happen before enabling scanning. The controller does
2114 * not allow accept list modification while scanning.
2115 */
2116 filter_policy = hci_update_accept_list_sync(hdev);
2117
2118 /* When the controller is using random resolvable addresses and
2119 * with that having LE privacy enabled, then controllers with
2120 * Extended Scanner Filter Policies support can now enable support
2121 * for handling directed advertising.
2122 *
2123 * So instead of using filter polices 0x00 (no acceptlist)
2124 * and 0x01 (acceptlist enabled) use the new filter policies
2125 * 0x02 (no acceptlist) and 0x03 (acceptlist enabled).
2126 */
2127 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
2128 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
2129 filter_policy |= 0x02;
2130
2131 if (hdev->suspended) {
2132 window = hdev->le_scan_window_suspend;
2133 interval = hdev->le_scan_int_suspend;
e8907f76
LAD
2134 } else if (hci_is_le_conn_scanning(hdev)) {
2135 window = hdev->le_scan_window_connect;
2136 interval = hdev->le_scan_int_connect;
2137 } else if (hci_is_adv_monitoring(hdev)) {
2138 window = hdev->le_scan_window_adv_monitor;
2139 interval = hdev->le_scan_int_adv_monitor;
2140 } else {
2141 window = hdev->le_scan_window;
2142 interval = hdev->le_scan_interval;
2143 }
2144
2145 bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy);
2146
2147 return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window,
abfeea47
LAD
2148 own_addr_type, filter_policy,
2149 LE_SCAN_FILTER_DUP_ENABLE);
e8907f76
LAD
2150}
2151
2152/* This function controls the passive scanning based on hdev->pend_le_conns
2153 * list. If there are pending LE connection we start the background scanning,
ad383c2c
LAD
2154 * otherwise we stop it in the following sequence:
2155 *
2156 * If there are devices to scan:
2157 *
2158 * Disable Scanning -> Update Accept List ->
2159 * use_ll_privacy((Disable Advertising) -> Disable Resolving List ->
2160 * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) ->
2161 * Enable Scanning
2162 *
2163 * Otherwise:
2164 *
2165 * Disable Scanning
e8907f76
LAD
2166 */
2167int hci_update_passive_scan_sync(struct hci_dev *hdev)
2168{
2169 int err;
2170
2171 if (!test_bit(HCI_UP, &hdev->flags) ||
2172 test_bit(HCI_INIT, &hdev->flags) ||
2173 hci_dev_test_flag(hdev, HCI_SETUP) ||
2174 hci_dev_test_flag(hdev, HCI_CONFIG) ||
2175 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
2176 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2177 return 0;
2178
2179 /* No point in doing scanning if LE support hasn't been enabled */
2180 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2181 return 0;
2182
2183 /* If discovery is active don't interfere with it */
2184 if (hdev->discovery.state != DISCOVERY_STOPPED)
2185 return 0;
2186
2187 /* Reset RSSI and UUID filters when starting background scanning
2188 * since these filters are meant for service discovery only.
2189 *
2190 * The Start Discovery and Start Service Discovery operations
2191 * ensure to set proper values for RSSI threshold and UUID
2192 * filter list. So it is safe to just reset them here.
2193 */
2194 hci_discovery_filter_clear(hdev);
2195
2196 bt_dev_dbg(hdev, "ADV monitoring is %s",
2197 hci_is_adv_monitoring(hdev) ? "on" : "off");
2198
2199 if (list_empty(&hdev->pend_le_conns) &&
2200 list_empty(&hdev->pend_le_reports) &&
2201 !hci_is_adv_monitoring(hdev)) {
2202 /* If there is no pending LE connections or devices
2203 * to be scanned for or no ADV monitors, we should stop the
2204 * background scanning.
2205 */
2206
2207 bt_dev_dbg(hdev, "stopping background scanning");
2208
ad383c2c 2209 err = hci_scan_disable_sync(hdev);
e8907f76
LAD
2210 if (err)
2211 bt_dev_err(hdev, "stop background scanning failed: %d",
2212 err);
2213 } else {
2214 /* If there is at least one pending LE connection, we should
2215 * keep the background scan running.
2216 */
2217
2218 /* If controller is connecting, we should not start scanning
2219 * since some controllers are not able to scan and connect at
2220 * the same time.
2221 */
2222 if (hci_lookup_le_connect(hdev))
2223 return 0;
2224
e8907f76
LAD
2225 bt_dev_dbg(hdev, "start background scanning");
2226
2227 err = hci_passive_scan_sync(hdev);
2228 if (err)
2229 bt_dev_err(hdev, "start background scanning failed: %d",
2230 err);
2231 }
2232
2233 return err;
2234}
ad383c2c
LAD
2235
2236static int update_passive_scan_sync(struct hci_dev *hdev, void *data)
2237{
2238 return hci_update_passive_scan_sync(hdev);
2239}
2240
2241int hci_update_passive_scan(struct hci_dev *hdev)
2242{
5bee2fd6
LAD
2243 /* Only queue if it would have any effect */
2244 if (!test_bit(HCI_UP, &hdev->flags) ||
2245 test_bit(HCI_INIT, &hdev->flags) ||
2246 hci_dev_test_flag(hdev, HCI_SETUP) ||
2247 hci_dev_test_flag(hdev, HCI_CONFIG) ||
2248 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
2249 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2250 return 0;
2251
ad383c2c
LAD
2252 return hci_cmd_sync_queue(hdev, update_passive_scan_sync, NULL, NULL);
2253}
cf75ad8b 2254
2f2eb0c9 2255int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val)
cf75ad8b 2256{
2f2eb0c9
BG
2257 int err;
2258
cf75ad8b
LAD
2259 if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev))
2260 return 0;
2261
2f2eb0c9 2262 err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
cf75ad8b 2263 sizeof(val), &val, HCI_CMD_TIMEOUT);
2f2eb0c9
BG
2264
2265 if (!err) {
2266 if (val) {
2267 hdev->features[1][0] |= LMP_HOST_SC;
2268 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
2269 } else {
2270 hdev->features[1][0] &= ~LMP_HOST_SC;
2271 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
2272 }
2273 }
2274
2275 return err;
cf75ad8b
LAD
2276}
2277
3244845c 2278int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode)
cf75ad8b
LAD
2279{
2280 int err;
2281
2282 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
2283 lmp_host_ssp_capable(hdev))
2284 return 0;
2285
3244845c
BG
2286 if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) {
2287 __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2288 sizeof(mode), &mode, HCI_CMD_TIMEOUT);
2289 }
2290
cf75ad8b
LAD
2291 err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
2292 sizeof(mode), &mode, HCI_CMD_TIMEOUT);
2293 if (err)
2294 return err;
2295
2296 return hci_write_sc_support_sync(hdev, 0x01);
2297}
2298
d81a494c 2299int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul)
cf75ad8b
LAD
2300{
2301 struct hci_cp_write_le_host_supported cp;
2302
2303 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2304 !lmp_bredr_capable(hdev))
2305 return 0;
2306
2307 /* Check first if we already have the right host state
2308 * (host features set)
2309 */
2310 if (le == lmp_host_le_capable(hdev) &&
2311 simul == lmp_host_le_br_capable(hdev))
2312 return 0;
2313
2314 memset(&cp, 0, sizeof(cp));
2315
2316 cp.le = le;
2317 cp.simul = simul;
2318
2319 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2320 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2321}
2322
2323static int hci_powered_update_adv_sync(struct hci_dev *hdev)
2324{
2325 struct adv_info *adv, *tmp;
2326 int err;
2327
2328 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2329 return 0;
2330
2331 /* If RPA Resolution has not been enable yet it means the
2332 * resolving list is empty and we should attempt to program the
2333 * local IRK in order to support using own_addr_type
2334 * ADDR_LE_DEV_RANDOM_RESOLVED (0x03).
2335 */
2336 if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2337 hci_le_add_resolve_list_sync(hdev, NULL);
2338 hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
2339 }
2340
2341 /* Make sure the controller has a good default for
2342 * advertising data. This also applies to the case
2343 * where BR/EDR was toggled during the AUTO_OFF phase.
2344 */
2345 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2346 list_empty(&hdev->adv_instances)) {
2347 if (ext_adv_capable(hdev)) {
2348 err = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2349 if (!err)
2350 hci_update_scan_rsp_data_sync(hdev, 0x00);
2351 } else {
2352 err = hci_update_adv_data_sync(hdev, 0x00);
2353 if (!err)
2354 hci_update_scan_rsp_data_sync(hdev, 0x00);
2355 }
2356
2357 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2358 hci_enable_advertising_sync(hdev);
2359 }
2360
2361 /* Call for each tracked instance to be scheduled */
2362 list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list)
2363 hci_schedule_adv_instance_sync(hdev, adv->instance, true);
2364
2365 return 0;
2366}
2367
2368static int hci_write_auth_enable_sync(struct hci_dev *hdev)
2369{
2370 u8 link_sec;
2371
2372 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2373 if (link_sec == test_bit(HCI_AUTH, &hdev->flags))
2374 return 0;
2375
2376 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
2377 sizeof(link_sec), &link_sec,
2378 HCI_CMD_TIMEOUT);
2379}
2380
353a0249 2381int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable)
cf75ad8b
LAD
2382{
2383 struct hci_cp_write_page_scan_activity cp;
2384 u8 type;
2385 int err = 0;
2386
2387 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2388 return 0;
2389
2390 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
2391 return 0;
2392
2393 memset(&cp, 0, sizeof(cp));
2394
2395 if (enable) {
2396 type = PAGE_SCAN_TYPE_INTERLACED;
2397
2398 /* 160 msec page scan interval */
2399 cp.interval = cpu_to_le16(0x0100);
2400 } else {
2401 type = hdev->def_page_scan_type;
2402 cp.interval = cpu_to_le16(hdev->def_page_scan_int);
2403 }
2404
2405 cp.window = cpu_to_le16(hdev->def_page_scan_window);
2406
2407 if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval ||
2408 __cpu_to_le16(hdev->page_scan_window) != cp.window) {
2409 err = __hci_cmd_sync_status(hdev,
2410 HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
2411 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2412 if (err)
2413 return err;
2414 }
2415
2416 if (hdev->page_scan_type != type)
2417 err = __hci_cmd_sync_status(hdev,
2418 HCI_OP_WRITE_PAGE_SCAN_TYPE,
2419 sizeof(type), &type,
2420 HCI_CMD_TIMEOUT);
2421
2422 return err;
2423}
2424
2425static bool disconnected_accept_list_entries(struct hci_dev *hdev)
2426{
2427 struct bdaddr_list *b;
2428
2429 list_for_each_entry(b, &hdev->accept_list, list) {
2430 struct hci_conn *conn;
2431
2432 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2433 if (!conn)
2434 return true;
2435
2436 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2437 return true;
2438 }
2439
2440 return false;
2441}
2442
2443static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val)
2444{
2445 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
2446 sizeof(val), &val,
2447 HCI_CMD_TIMEOUT);
2448}
2449
451d95a9 2450int hci_update_scan_sync(struct hci_dev *hdev)
cf75ad8b
LAD
2451{
2452 u8 scan;
2453
2454 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2455 return 0;
2456
2457 if (!hdev_is_powered(hdev))
2458 return 0;
2459
2460 if (mgmt_powering_down(hdev))
2461 return 0;
2462
2463 if (hdev->scanning_paused)
2464 return 0;
2465
2466 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2467 disconnected_accept_list_entries(hdev))
2468 scan = SCAN_PAGE;
2469 else
2470 scan = SCAN_DISABLED;
2471
2472 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2473 scan |= SCAN_INQUIRY;
2474
2475 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2476 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2477 return 0;
2478
2479 return hci_write_scan_enable_sync(hdev, scan);
2480}
2481
6f6ff38a 2482int hci_update_name_sync(struct hci_dev *hdev)
cf75ad8b
LAD
2483{
2484 struct hci_cp_write_local_name cp;
2485
2486 memset(&cp, 0, sizeof(cp));
2487
2488 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2489
2490 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME,
2491 sizeof(cp), &cp,
2492 HCI_CMD_TIMEOUT);
2493}
2494
2495/* This function perform powered update HCI command sequence after the HCI init
2496 * sequence which end up resetting all states, the sequence is as follows:
2497 *
2498 * HCI_SSP_ENABLED(Enable SSP)
2499 * HCI_LE_ENABLED(Enable LE)
2500 * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) ->
2501 * Update adv data)
2502 * Enable Authentication
2503 * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class ->
2504 * Set Name -> Set EIR)
2505 */
2506int hci_powered_update_sync(struct hci_dev *hdev)
2507{
2508 int err;
2509
2510 /* Register the available SMP channels (BR/EDR and LE) only when
2511 * successfully powering on the controller. This late
2512 * registration is required so that LE SMP can clearly decide if
2513 * the public address or static address is used.
2514 */
2515 smp_register(hdev);
2516
2517 err = hci_write_ssp_mode_sync(hdev, 0x01);
2518 if (err)
2519 return err;
2520
2521 err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00);
2522 if (err)
2523 return err;
2524
2525 err = hci_powered_update_adv_sync(hdev);
2526 if (err)
2527 return err;
2528
2529 err = hci_write_auth_enable_sync(hdev);
2530 if (err)
2531 return err;
2532
2533 if (lmp_bredr_capable(hdev)) {
2534 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2535 hci_write_fast_connectable_sync(hdev, true);
2536 else
2537 hci_write_fast_connectable_sync(hdev, false);
2538 hci_update_scan_sync(hdev);
2539 hci_update_class_sync(hdev);
2540 hci_update_name_sync(hdev);
2541 hci_update_eir_sync(hdev);
2542 }
2543
2544 return 0;
2545}
2546
d0b13706
LAD
2547/**
2548 * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
2549 * (BD_ADDR) for a HCI device from
2550 * a firmware node property.
2551 * @hdev: The HCI device
cf75ad8b 2552 *
d0b13706
LAD
2553 * Search the firmware node for 'local-bd-address'.
2554 *
2555 * All-zero BD addresses are rejected, because those could be properties
2556 * that exist in the firmware tables, but were not updated by the firmware. For
2557 * example, the DTS could define 'local-bd-address', with zero BD addresses.
cf75ad8b 2558 */
d0b13706 2559static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
cf75ad8b 2560{
d0b13706
LAD
2561 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
2562 bdaddr_t ba;
2563 int ret;
cf75ad8b 2564
d0b13706
LAD
2565 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
2566 (u8 *)&ba, sizeof(ba));
2567 if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
2568 return;
cf75ad8b 2569
d0b13706
LAD
2570 bacpy(&hdev->public_addr, &ba);
2571}
cf75ad8b 2572
d0b13706
LAD
2573struct hci_init_stage {
2574 int (*func)(struct hci_dev *hdev);
2575};
cf75ad8b 2576
d0b13706
LAD
2577/* Run init stage NULL terminated function table */
2578static int hci_init_stage_sync(struct hci_dev *hdev,
2579 const struct hci_init_stage *stage)
2580{
2581 size_t i;
cf75ad8b 2582
d0b13706
LAD
2583 for (i = 0; stage[i].func; i++) {
2584 int err;
cf75ad8b 2585
d0b13706
LAD
2586 err = stage[i].func(hdev);
2587 if (err)
2588 return err;
cf75ad8b
LAD
2589 }
2590
2591 return 0;
2592}
2593
d0b13706
LAD
2594/* Read Local Version */
2595static int hci_read_local_version_sync(struct hci_dev *hdev)
cf75ad8b 2596{
d0b13706
LAD
2597 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION,
2598 0, NULL, HCI_CMD_TIMEOUT);
2599}
cf75ad8b 2600
d0b13706
LAD
2601/* Read BD Address */
2602static int hci_read_bd_addr_sync(struct hci_dev *hdev)
2603{
2604 return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR,
2605 0, NULL, HCI_CMD_TIMEOUT);
2606}
cf75ad8b 2607
d0b13706
LAD
2608#define HCI_INIT(_func) \
2609{ \
2610 .func = _func, \
cf75ad8b
LAD
2611}
2612
d0b13706
LAD
2613static const struct hci_init_stage hci_init0[] = {
2614 /* HCI_OP_READ_LOCAL_VERSION */
2615 HCI_INIT(hci_read_local_version_sync),
2616 /* HCI_OP_READ_BD_ADDR */
2617 HCI_INIT(hci_read_bd_addr_sync),
2618 {}
2619};
2620
2621int hci_reset_sync(struct hci_dev *hdev)
cf75ad8b 2622{
cf75ad8b
LAD
2623 int err;
2624
d0b13706 2625 set_bit(HCI_RESET, &hdev->flags);
cf75ad8b 2626
d0b13706
LAD
2627 err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL,
2628 HCI_CMD_TIMEOUT);
2629 if (err)
2630 return err;
cf75ad8b 2631
d0b13706
LAD
2632 return 0;
2633}
cf75ad8b 2634
d0b13706
LAD
2635static int hci_init0_sync(struct hci_dev *hdev)
2636{
2637 int err;
cf75ad8b 2638
d0b13706
LAD
2639 bt_dev_dbg(hdev, "");
2640
2641 /* Reset */
2642 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2643 err = hci_reset_sync(hdev);
cf75ad8b
LAD
2644 if (err)
2645 return err;
2646 }
2647
d0b13706
LAD
2648 return hci_init_stage_sync(hdev, hci_init0);
2649}
abfeea47 2650
d0b13706
LAD
2651static int hci_unconf_init_sync(struct hci_dev *hdev)
2652{
2653 int err;
2654
2655 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
cf75ad8b
LAD
2656 return 0;
2657
d0b13706
LAD
2658 err = hci_init0_sync(hdev);
2659 if (err < 0)
2660 return err;
cf75ad8b 2661
d0b13706
LAD
2662 if (hci_dev_test_flag(hdev, HCI_SETUP))
2663 hci_debugfs_create_basic(hdev);
cf75ad8b
LAD
2664
2665 return 0;
2666}
2667
d0b13706
LAD
2668/* Read Local Supported Features. */
2669static int hci_read_local_features_sync(struct hci_dev *hdev)
cf75ad8b 2670{
d0b13706
LAD
2671 /* Not all AMP controllers support this command */
2672 if (hdev->dev_type == HCI_AMP && !(hdev->commands[14] & 0x20))
2673 return 0;
cf75ad8b 2674
d0b13706
LAD
2675 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES,
2676 0, NULL, HCI_CMD_TIMEOUT);
cf75ad8b
LAD
2677}
2678
d0b13706
LAD
2679/* BR Controller init stage 1 command sequence */
2680static const struct hci_init_stage br_init1[] = {
2681 /* HCI_OP_READ_LOCAL_FEATURES */
2682 HCI_INIT(hci_read_local_features_sync),
2683 /* HCI_OP_READ_LOCAL_VERSION */
2684 HCI_INIT(hci_read_local_version_sync),
2685 /* HCI_OP_READ_BD_ADDR */
2686 HCI_INIT(hci_read_bd_addr_sync),
2687 {}
2688};
2689
2690/* Read Local Commands */
2691static int hci_read_local_cmds_sync(struct hci_dev *hdev)
cf75ad8b 2692{
d0b13706
LAD
2693 /* All Bluetooth 1.2 and later controllers should support the
2694 * HCI command for reading the local supported commands.
2695 *
2696 * Unfortunately some controllers indicate Bluetooth 1.2 support,
2697 * but do not have support for this command. If that is the case,
2698 * the driver can quirk the behavior and skip reading the local
2699 * supported commands.
2700 */
2701 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
2702 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
2703 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS,
2704 0, NULL, HCI_CMD_TIMEOUT);
cf75ad8b 2705
d0b13706 2706 return 0;
cf75ad8b
LAD
2707}
2708
d0b13706
LAD
2709/* Read Local AMP Info */
2710static int hci_read_local_amp_info_sync(struct hci_dev *hdev)
cf75ad8b 2711{
d0b13706
LAD
2712 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_AMP_INFO,
2713 0, NULL, HCI_CMD_TIMEOUT);
cf75ad8b
LAD
2714}
2715
d0b13706
LAD
2716/* Read Data Blk size */
2717static int hci_read_data_block_size_sync(struct hci_dev *hdev)
cf75ad8b 2718{
d0b13706
LAD
2719 return __hci_cmd_sync_status(hdev, HCI_OP_READ_DATA_BLOCK_SIZE,
2720 0, NULL, HCI_CMD_TIMEOUT);
2721}
cf75ad8b 2722
d0b13706
LAD
2723/* Read Flow Control Mode */
2724static int hci_read_flow_control_mode_sync(struct hci_dev *hdev)
2725{
2726 return __hci_cmd_sync_status(hdev, HCI_OP_READ_FLOW_CONTROL_MODE,
2727 0, NULL, HCI_CMD_TIMEOUT);
2728}
cf75ad8b 2729
d0b13706
LAD
2730/* Read Location Data */
2731static int hci_read_location_data_sync(struct hci_dev *hdev)
2732{
2733 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCATION_DATA,
2734 0, NULL, HCI_CMD_TIMEOUT);
cf75ad8b
LAD
2735}
2736
d0b13706
LAD
2737/* AMP Controller init stage 1 command sequence */
2738static const struct hci_init_stage amp_init1[] = {
2739 /* HCI_OP_READ_LOCAL_VERSION */
2740 HCI_INIT(hci_read_local_version_sync),
2741 /* HCI_OP_READ_LOCAL_COMMANDS */
2742 HCI_INIT(hci_read_local_cmds_sync),
2743 /* HCI_OP_READ_LOCAL_AMP_INFO */
2744 HCI_INIT(hci_read_local_amp_info_sync),
2745 /* HCI_OP_READ_DATA_BLOCK_SIZE */
2746 HCI_INIT(hci_read_data_block_size_sync),
2747 /* HCI_OP_READ_FLOW_CONTROL_MODE */
2748 HCI_INIT(hci_read_flow_control_mode_sync),
2749 /* HCI_OP_READ_LOCATION_DATA */
2750 HCI_INIT(hci_read_location_data_sync),
2751};
2752
2753static int hci_init1_sync(struct hci_dev *hdev)
cf75ad8b 2754{
d0b13706 2755 int err;
cf75ad8b 2756
d0b13706 2757 bt_dev_dbg(hdev, "");
cf75ad8b 2758
d0b13706
LAD
2759 /* Reset */
2760 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2761 err = hci_reset_sync(hdev);
2762 if (err)
2763 return err;
2764 }
cf75ad8b 2765
d0b13706
LAD
2766 switch (hdev->dev_type) {
2767 case HCI_PRIMARY:
2768 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2769 return hci_init_stage_sync(hdev, br_init1);
2770 case HCI_AMP:
2771 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2772 return hci_init_stage_sync(hdev, amp_init1);
2773 default:
2774 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
2775 break;
2776 }
2777
2778 return 0;
cf75ad8b
LAD
2779}
2780
d0b13706
LAD
2781/* AMP Controller init stage 2 command sequence */
2782static const struct hci_init_stage amp_init2[] = {
2783 /* HCI_OP_READ_LOCAL_FEATURES */
2784 HCI_INIT(hci_read_local_features_sync),
2785};
2786
2787/* Read Buffer Size (ACL mtu, max pkt, etc.) */
2788static int hci_read_buffer_size_sync(struct hci_dev *hdev)
cf75ad8b 2789{
d0b13706
LAD
2790 return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE,
2791 0, NULL, HCI_CMD_TIMEOUT);
2792}
cf75ad8b 2793
d0b13706
LAD
2794/* Read Class of Device */
2795static int hci_read_dev_class_sync(struct hci_dev *hdev)
2796{
2797 return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV,
2798 0, NULL, HCI_CMD_TIMEOUT);
2799}
cf75ad8b 2800
d0b13706
LAD
2801/* Read Local Name */
2802static int hci_read_local_name_sync(struct hci_dev *hdev)
2803{
2804 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME,
2805 0, NULL, HCI_CMD_TIMEOUT);
2806}
cf75ad8b 2807
d0b13706
LAD
2808/* Read Voice Setting */
2809static int hci_read_voice_setting_sync(struct hci_dev *hdev)
2810{
2811 return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING,
2812 0, NULL, HCI_CMD_TIMEOUT);
cf75ad8b
LAD
2813}
2814
d0b13706
LAD
2815/* Read Number of Supported IAC */
2816static int hci_read_num_supported_iac_sync(struct hci_dev *hdev)
cf75ad8b 2817{
d0b13706
LAD
2818 return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC,
2819 0, NULL, HCI_CMD_TIMEOUT);
2820}
cf75ad8b 2821
d0b13706
LAD
2822/* Read Current IAC LAP */
2823static int hci_read_current_iac_lap_sync(struct hci_dev *hdev)
2824{
2825 return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP,
2826 0, NULL, HCI_CMD_TIMEOUT);
cf75ad8b
LAD
2827}
2828
d0b13706
LAD
2829static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type,
2830 u8 cond_type, bdaddr_t *bdaddr,
2831 u8 auto_accept)
cf75ad8b 2832{
d0b13706 2833 struct hci_cp_set_event_filter cp;
cf75ad8b 2834
d0b13706 2835 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
cf75ad8b
LAD
2836 return 0;
2837
0eaecfb2
IFM
2838 if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
2839 return 0;
2840
d0b13706
LAD
2841 memset(&cp, 0, sizeof(cp));
2842 cp.flt_type = flt_type;
cf75ad8b 2843
d0b13706
LAD
2844 if (flt_type != HCI_FLT_CLEAR_ALL) {
2845 cp.cond_type = cond_type;
2846 bacpy(&cp.addr_conn_flt.bdaddr, bdaddr);
2847 cp.addr_conn_flt.auto_accept = auto_accept;
cf75ad8b
LAD
2848 }
2849
d0b13706
LAD
2850 return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT,
2851 flt_type == HCI_FLT_CLEAR_ALL ?
2852 sizeof(cp.flt_type) : sizeof(cp), &cp,
2853 HCI_CMD_TIMEOUT);
cf75ad8b
LAD
2854}
2855
d0b13706 2856static int hci_clear_event_filter_sync(struct hci_dev *hdev)
cf75ad8b 2857{
d0b13706
LAD
2858 if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED))
2859 return 0;
2860
0eaecfb2
IFM
2861 /* In theory the state machine should not reach here unless
2862 * a hci_set_event_filter_sync() call succeeds, but we do
2863 * the check both for parity and as a future reminder.
2864 */
2865 if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
2866 return 0;
2867
d0b13706
LAD
2868 return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00,
2869 BDADDR_ANY, 0x00);
2870}
2871
2872/* Connection accept timeout ~20 secs */
2873static int hci_write_ca_timeout_sync(struct hci_dev *hdev)
2874{
2875 __le16 param = cpu_to_le16(0x7d00);
2876
2877 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT,
2878 sizeof(param), &param, HCI_CMD_TIMEOUT);
2879}
2880
2881/* BR Controller init stage 2 command sequence */
2882static const struct hci_init_stage br_init2[] = {
2883 /* HCI_OP_READ_BUFFER_SIZE */
2884 HCI_INIT(hci_read_buffer_size_sync),
2885 /* HCI_OP_READ_CLASS_OF_DEV */
2886 HCI_INIT(hci_read_dev_class_sync),
2887 /* HCI_OP_READ_LOCAL_NAME */
2888 HCI_INIT(hci_read_local_name_sync),
2889 /* HCI_OP_READ_VOICE_SETTING */
2890 HCI_INIT(hci_read_voice_setting_sync),
2891 /* HCI_OP_READ_NUM_SUPPORTED_IAC */
2892 HCI_INIT(hci_read_num_supported_iac_sync),
2893 /* HCI_OP_READ_CURRENT_IAC_LAP */
2894 HCI_INIT(hci_read_current_iac_lap_sync),
2895 /* HCI_OP_SET_EVENT_FLT */
2896 HCI_INIT(hci_clear_event_filter_sync),
2897 /* HCI_OP_WRITE_CA_TIMEOUT */
2898 HCI_INIT(hci_write_ca_timeout_sync),
2899 {}
2900};
2901
2902static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev)
2903{
2904 u8 mode = 0x01;
2905
2906 if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2907 return 0;
2908
2909 /* When SSP is available, then the host features page
2910 * should also be available as well. However some
2911 * controllers list the max_page as 0 as long as SSP
2912 * has not been enabled. To achieve proper debugging
2913 * output, force the minimum max_page to 1 at least.
2914 */
2915 hdev->max_page = 0x01;
2916
2917 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
2918 sizeof(mode), &mode, HCI_CMD_TIMEOUT);
2919}
2920
2921static int hci_write_eir_sync(struct hci_dev *hdev)
2922{
2923 struct hci_cp_write_eir cp;
2924
2925 if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2926 return 0;
2927
2928 memset(hdev->eir, 0, sizeof(hdev->eir));
2929 memset(&cp, 0, sizeof(cp));
2930
2931 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
2932 HCI_CMD_TIMEOUT);
2933}
2934
2935static int hci_write_inquiry_mode_sync(struct hci_dev *hdev)
2936{
2937 u8 mode;
2938
2939 if (!lmp_inq_rssi_capable(hdev) &&
2940 !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
2941 return 0;
2942
2943 /* If Extended Inquiry Result events are supported, then
2944 * they are clearly preferred over Inquiry Result with RSSI
2945 * events.
2946 */
2947 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
2948
2949 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE,
2950 sizeof(mode), &mode, HCI_CMD_TIMEOUT);
2951}
2952
2953static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev)
2954{
2955 if (!lmp_inq_tx_pwr_capable(hdev))
2956 return 0;
2957
2958 return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER,
2959 0, NULL, HCI_CMD_TIMEOUT);
2960}
2961
2962static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page)
2963{
2964 struct hci_cp_read_local_ext_features cp;
2965
2966 if (!lmp_ext_feat_capable(hdev))
2967 return 0;
2968
2969 memset(&cp, 0, sizeof(cp));
2970 cp.page = page;
2971
2972 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
2973 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2974}
2975
2976static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev)
2977{
2978 return hci_read_local_ext_features_sync(hdev, 0x01);
2979}
2980
2981/* HCI Controller init stage 2 command sequence */
2982static const struct hci_init_stage hci_init2[] = {
2983 /* HCI_OP_READ_LOCAL_COMMANDS */
2984 HCI_INIT(hci_read_local_cmds_sync),
2985 /* HCI_OP_WRITE_SSP_MODE */
2986 HCI_INIT(hci_write_ssp_mode_1_sync),
2987 /* HCI_OP_WRITE_EIR */
2988 HCI_INIT(hci_write_eir_sync),
2989 /* HCI_OP_WRITE_INQUIRY_MODE */
2990 HCI_INIT(hci_write_inquiry_mode_sync),
2991 /* HCI_OP_READ_INQ_RSP_TX_POWER */
2992 HCI_INIT(hci_read_inq_rsp_tx_power_sync),
2993 /* HCI_OP_READ_LOCAL_EXT_FEATURES */
2994 HCI_INIT(hci_read_local_ext_features_1_sync),
2995 /* HCI_OP_WRITE_AUTH_ENABLE */
2996 HCI_INIT(hci_write_auth_enable_sync),
2997 {}
2998};
2999
3000/* Read LE Buffer Size */
3001static int hci_le_read_buffer_size_sync(struct hci_dev *hdev)
3002{
3003 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE,
3004 0, NULL, HCI_CMD_TIMEOUT);
3005}
3006
3007/* Read LE Local Supported Features */
3008static int hci_le_read_local_features_sync(struct hci_dev *hdev)
3009{
3010 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES,
3011 0, NULL, HCI_CMD_TIMEOUT);
3012}
3013
3014/* Read LE Supported States */
3015static int hci_le_read_supported_states_sync(struct hci_dev *hdev)
3016{
3017 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES,
3018 0, NULL, HCI_CMD_TIMEOUT);
3019}
3020
3021/* LE Controller init stage 2 command sequence */
3022static const struct hci_init_stage le_init2[] = {
3023 /* HCI_OP_LE_READ_BUFFER_SIZE */
3024 HCI_INIT(hci_le_read_buffer_size_sync),
3025 /* HCI_OP_LE_READ_LOCAL_FEATURES */
3026 HCI_INIT(hci_le_read_local_features_sync),
3027 /* HCI_OP_LE_READ_SUPPORTED_STATES */
3028 HCI_INIT(hci_le_read_supported_states_sync),
3029 {}
3030};
3031
3032static int hci_init2_sync(struct hci_dev *hdev)
3033{
3034 int err;
3035
3036 bt_dev_dbg(hdev, "");
3037
3038 if (hdev->dev_type == HCI_AMP)
3039 return hci_init_stage_sync(hdev, amp_init2);
3040
3041 if (lmp_bredr_capable(hdev)) {
3042 err = hci_init_stage_sync(hdev, br_init2);
3043 if (err)
3044 return err;
3045 } else {
3046 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
3047 }
3048
3049 if (lmp_le_capable(hdev)) {
3050 err = hci_init_stage_sync(hdev, le_init2);
3051 if (err)
3052 return err;
3053 /* LE-only controllers have LE implicitly enabled */
3054 if (!lmp_bredr_capable(hdev))
3055 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
3056 }
3057
3058 return hci_init_stage_sync(hdev, hci_init2);
3059}
3060
3061static int hci_set_event_mask_sync(struct hci_dev *hdev)
3062{
3063 /* The second byte is 0xff instead of 0x9f (two reserved bits
3064 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
3065 * command otherwise.
3066 */
3067 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
3068
3069 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
3070 * any event mask for pre 1.2 devices.
3071 */
3072 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
3073 return 0;
3074
3075 if (lmp_bredr_capable(hdev)) {
3076 events[4] |= 0x01; /* Flow Specification Complete */
182ee45d
LAD
3077
3078 /* Don't set Disconnect Complete when suspended as that
3079 * would wakeup the host when disconnecting due to
3080 * suspend.
3081 */
3082 if (hdev->suspended)
3083 events[0] &= 0xef;
d0b13706
LAD
3084 } else {
3085 /* Use a different default for LE-only devices */
3086 memset(events, 0, sizeof(events));
3087 events[1] |= 0x20; /* Command Complete */
3088 events[1] |= 0x40; /* Command Status */
3089 events[1] |= 0x80; /* Hardware Error */
3090
3091 /* If the controller supports the Disconnect command, enable
3092 * the corresponding event. In addition enable packet flow
3093 * control related events.
3094 */
3095 if (hdev->commands[0] & 0x20) {
182ee45d
LAD
3096 /* Don't set Disconnect Complete when suspended as that
3097 * would wakeup the host when disconnecting due to
3098 * suspend.
3099 */
3100 if (!hdev->suspended)
3101 events[0] |= 0x10; /* Disconnection Complete */
d0b13706
LAD
3102 events[2] |= 0x04; /* Number of Completed Packets */
3103 events[3] |= 0x02; /* Data Buffer Overflow */
3104 }
3105
3106 /* If the controller supports the Read Remote Version
3107 * Information command, enable the corresponding event.
3108 */
3109 if (hdev->commands[2] & 0x80)
3110 events[1] |= 0x08; /* Read Remote Version Information
3111 * Complete
3112 */
3113
3114 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
3115 events[0] |= 0x80; /* Encryption Change */
3116 events[5] |= 0x80; /* Encryption Key Refresh Complete */
3117 }
3118 }
3119
3120 if (lmp_inq_rssi_capable(hdev) ||
3121 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
3122 events[4] |= 0x02; /* Inquiry Result with RSSI */
3123
3124 if (lmp_ext_feat_capable(hdev))
3125 events[4] |= 0x04; /* Read Remote Extended Features Complete */
3126
3127 if (lmp_esco_capable(hdev)) {
3128 events[5] |= 0x08; /* Synchronous Connection Complete */
3129 events[5] |= 0x10; /* Synchronous Connection Changed */
3130 }
3131
3132 if (lmp_sniffsubr_capable(hdev))
3133 events[5] |= 0x20; /* Sniff Subrating */
3134
3135 if (lmp_pause_enc_capable(hdev))
3136 events[5] |= 0x80; /* Encryption Key Refresh Complete */
3137
3138 if (lmp_ext_inq_capable(hdev))
3139 events[5] |= 0x40; /* Extended Inquiry Result */
3140
3141 if (lmp_no_flush_capable(hdev))
3142 events[7] |= 0x01; /* Enhanced Flush Complete */
3143
3144 if (lmp_lsto_capable(hdev))
3145 events[6] |= 0x80; /* Link Supervision Timeout Changed */
3146
3147 if (lmp_ssp_capable(hdev)) {
3148 events[6] |= 0x01; /* IO Capability Request */
3149 events[6] |= 0x02; /* IO Capability Response */
3150 events[6] |= 0x04; /* User Confirmation Request */
3151 events[6] |= 0x08; /* User Passkey Request */
3152 events[6] |= 0x10; /* Remote OOB Data Request */
3153 events[6] |= 0x20; /* Simple Pairing Complete */
3154 events[7] |= 0x04; /* User Passkey Notification */
3155 events[7] |= 0x08; /* Keypress Notification */
3156 events[7] |= 0x10; /* Remote Host Supported
3157 * Features Notification
3158 */
3159 }
3160
3161 if (lmp_le_capable(hdev))
3162 events[7] |= 0x20; /* LE Meta-Event */
3163
3164 return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK,
3165 sizeof(events), events, HCI_CMD_TIMEOUT);
3166}
3167
3168static int hci_read_stored_link_key_sync(struct hci_dev *hdev)
3169{
3170 struct hci_cp_read_stored_link_key cp;
3171
3172 if (!(hdev->commands[6] & 0x20) ||
3173 test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
3174 return 0;
3175
3176 memset(&cp, 0, sizeof(cp));
3177 bacpy(&cp.bdaddr, BDADDR_ANY);
3178 cp.read_all = 0x01;
3179
3180 return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY,
3181 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3182}
3183
3184static int hci_setup_link_policy_sync(struct hci_dev *hdev)
3185{
3186 struct hci_cp_write_def_link_policy cp;
3187 u16 link_policy = 0;
3188
3189 if (!(hdev->commands[5] & 0x10))
3190 return 0;
3191
3192 memset(&cp, 0, sizeof(cp));
3193
3194 if (lmp_rswitch_capable(hdev))
3195 link_policy |= HCI_LP_RSWITCH;
3196 if (lmp_hold_capable(hdev))
3197 link_policy |= HCI_LP_HOLD;
3198 if (lmp_sniff_capable(hdev))
3199 link_policy |= HCI_LP_SNIFF;
3200 if (lmp_park_capable(hdev))
3201 link_policy |= HCI_LP_PARK;
3202
3203 cp.policy = cpu_to_le16(link_policy);
3204
3205 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
3206 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3207}
3208
3209static int hci_read_page_scan_activity_sync(struct hci_dev *hdev)
3210{
3211 if (!(hdev->commands[8] & 0x01))
3212 return 0;
3213
3214 return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY,
3215 0, NULL, HCI_CMD_TIMEOUT);
3216}
3217
3218static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev)
3219{
3220 if (!(hdev->commands[18] & 0x04) ||
3221 test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
3222 return 0;
3223
3224 return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING,
3225 0, NULL, HCI_CMD_TIMEOUT);
3226}
3227
3228static int hci_read_page_scan_type_sync(struct hci_dev *hdev)
3229{
3230 /* Some older Broadcom based Bluetooth 1.2 controllers do not
3231 * support the Read Page Scan Type command. Check support for
3232 * this command in the bit mask of supported commands.
3233 */
3234 if (!(hdev->commands[13] & 0x01))
3235 return 0;
3236
3237 return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE,
3238 0, NULL, HCI_CMD_TIMEOUT);
3239}
3240
3241/* Read features beyond page 1 if available */
3242static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev)
3243{
3244 u8 page;
3245 int err;
3246
3247 if (!lmp_ext_feat_capable(hdev))
3248 return 0;
3249
3250 for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page;
3251 page++) {
3252 err = hci_read_local_ext_features_sync(hdev, page);
3253 if (err)
3254 return err;
3255 }
3256
3257 return 0;
3258}
3259
3260/* HCI Controller init stage 3 command sequence */
3261static const struct hci_init_stage hci_init3[] = {
3262 /* HCI_OP_SET_EVENT_MASK */
3263 HCI_INIT(hci_set_event_mask_sync),
3264 /* HCI_OP_READ_STORED_LINK_KEY */
3265 HCI_INIT(hci_read_stored_link_key_sync),
3266 /* HCI_OP_WRITE_DEF_LINK_POLICY */
3267 HCI_INIT(hci_setup_link_policy_sync),
3268 /* HCI_OP_READ_PAGE_SCAN_ACTIVITY */
3269 HCI_INIT(hci_read_page_scan_activity_sync),
3270 /* HCI_OP_READ_DEF_ERR_DATA_REPORTING */
3271 HCI_INIT(hci_read_def_err_data_reporting_sync),
3272 /* HCI_OP_READ_PAGE_SCAN_TYPE */
3273 HCI_INIT(hci_read_page_scan_type_sync),
3274 /* HCI_OP_READ_LOCAL_EXT_FEATURES */
3275 HCI_INIT(hci_read_local_ext_features_all_sync),
3276 {}
3277};
3278
3279static int hci_le_set_event_mask_sync(struct hci_dev *hdev)
3280{
3281 u8 events[8];
3282
3283 if (!lmp_le_capable(hdev))
3284 return 0;
3285
3286 memset(events, 0, sizeof(events));
3287
3288 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
3289 events[0] |= 0x10; /* LE Long Term Key Request */
3290
3291 /* If controller supports the Connection Parameters Request
3292 * Link Layer Procedure, enable the corresponding event.
3293 */
3294 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
3295 /* LE Remote Connection Parameter Request */
3296 events[0] |= 0x20;
3297
3298 /* If the controller supports the Data Length Extension
3299 * feature, enable the corresponding event.
3300 */
3301 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
3302 events[0] |= 0x40; /* LE Data Length Change */
3303
a56a1138
LAD
3304 /* If the controller supports LL Privacy feature or LE Extended Adv,
3305 * enable the corresponding event.
d0b13706 3306 */
a56a1138 3307 if (use_enhanced_conn_complete(hdev))
d0b13706
LAD
3308 events[1] |= 0x02; /* LE Enhanced Connection Complete */
3309
3310 /* If the controller supports Extended Scanner Filter
3311 * Policies, enable the corresponding event.
3312 */
3313 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
3314 events[1] |= 0x04; /* LE Direct Advertising Report */
3315
3316 /* If the controller supports Channel Selection Algorithm #2
3317 * feature, enable the corresponding event.
3318 */
3319 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
3320 events[2] |= 0x08; /* LE Channel Selection Algorithm */
3321
3322 /* If the controller supports the LE Set Scan Enable command,
3323 * enable the corresponding advertising report event.
3324 */
3325 if (hdev->commands[26] & 0x08)
3326 events[0] |= 0x02; /* LE Advertising Report */
3327
3328 /* If the controller supports the LE Create Connection
3329 * command, enable the corresponding event.
3330 */
3331 if (hdev->commands[26] & 0x10)
3332 events[0] |= 0x01; /* LE Connection Complete */
3333
3334 /* If the controller supports the LE Connection Update
3335 * command, enable the corresponding event.
3336 */
3337 if (hdev->commands[27] & 0x04)
3338 events[0] |= 0x04; /* LE Connection Update Complete */
3339
3340 /* If the controller supports the LE Read Remote Used Features
3341 * command, enable the corresponding event.
3342 */
3343 if (hdev->commands[27] & 0x20)
3344 /* LE Read Remote Used Features Complete */
3345 events[0] |= 0x08;
3346
3347 /* If the controller supports the LE Read Local P-256
3348 * Public Key command, enable the corresponding event.
3349 */
3350 if (hdev->commands[34] & 0x02)
3351 /* LE Read Local P-256 Public Key Complete */
3352 events[0] |= 0x80;
3353
3354 /* If the controller supports the LE Generate DHKey
3355 * command, enable the corresponding event.
3356 */
3357 if (hdev->commands[34] & 0x04)
3358 events[1] |= 0x01; /* LE Generate DHKey Complete */
3359
3360 /* If the controller supports the LE Set Default PHY or
3361 * LE Set PHY commands, enable the corresponding event.
3362 */
3363 if (hdev->commands[35] & (0x20 | 0x40))
3364 events[1] |= 0x08; /* LE PHY Update Complete */
3365
3366 /* If the controller supports LE Set Extended Scan Parameters
3367 * and LE Set Extended Scan Enable commands, enable the
3368 * corresponding event.
3369 */
3370 if (use_ext_scan(hdev))
3371 events[1] |= 0x10; /* LE Extended Advertising Report */
3372
3373 /* If the controller supports the LE Extended Advertising
3374 * command, enable the corresponding event.
3375 */
3376 if (ext_adv_capable(hdev))
3377 events[2] |= 0x02; /* LE Advertising Set Terminated */
3378
3379 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK,
3380 sizeof(events), events, HCI_CMD_TIMEOUT);
3381}
3382
3383/* Read LE Advertising Channel TX Power */
3384static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev)
3385{
3386 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
3387 /* HCI TS spec forbids mixing of legacy and extended
3388 * advertising commands wherein READ_ADV_TX_POWER is
3389 * also included. So do not call it if extended adv
3390 * is supported otherwise controller will return
3391 * COMMAND_DISALLOWED for extended commands.
3392 */
3393 return __hci_cmd_sync_status(hdev,
3394 HCI_OP_LE_READ_ADV_TX_POWER,
3395 0, NULL, HCI_CMD_TIMEOUT);
3396 }
3397
3398 return 0;
3399}
3400
3401/* Read LE Min/Max Tx Power*/
3402static int hci_le_read_tx_power_sync(struct hci_dev *hdev)
3403{
d2f8114f
AG
3404 if (!(hdev->commands[38] & 0x80) ||
3405 test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks))
d0b13706
LAD
3406 return 0;
3407
3408 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER,
3409 0, NULL, HCI_CMD_TIMEOUT);
3410}
3411
3412/* Read LE Accept List Size */
3413static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev)
3414{
3415 if (!(hdev->commands[26] & 0x40))
3416 return 0;
3417
3418 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
3419 0, NULL, HCI_CMD_TIMEOUT);
3420}
3421
3422/* Clear LE Accept List */
3423static int hci_le_clear_accept_list_sync(struct hci_dev *hdev)
3424{
3425 if (!(hdev->commands[26] & 0x80))
3426 return 0;
3427
3428 return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL,
3429 HCI_CMD_TIMEOUT);
3430}
3431
3432/* Read LE Resolving List Size */
3433static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev)
3434{
3435 if (!(hdev->commands[34] & 0x40))
3436 return 0;
3437
3438 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
3439 0, NULL, HCI_CMD_TIMEOUT);
3440}
3441
3442/* Clear LE Resolving List */
3443static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev)
3444{
3445 if (!(hdev->commands[34] & 0x20))
3446 return 0;
3447
3448 return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL,
3449 HCI_CMD_TIMEOUT);
3450}
3451
3452/* Set RPA timeout */
3453static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev)
3454{
3455 __le16 timeout = cpu_to_le16(hdev->rpa_timeout);
3456
3457 if (!(hdev->commands[35] & 0x04))
3458 return 0;
3459
3460 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT,
3461 sizeof(timeout), &timeout,
3462 HCI_CMD_TIMEOUT);
3463}
3464
3465/* Read LE Maximum Data Length */
3466static int hci_le_read_max_data_len_sync(struct hci_dev *hdev)
3467{
3468 if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
3469 return 0;
3470
3471 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL,
3472 HCI_CMD_TIMEOUT);
3473}
3474
3475/* Read LE Suggested Default Data Length */
3476static int hci_le_read_def_data_len_sync(struct hci_dev *hdev)
3477{
3478 if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
3479 return 0;
3480
3481 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL,
3482 HCI_CMD_TIMEOUT);
3483}
3484
3485/* Read LE Number of Supported Advertising Sets */
3486static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev)
3487{
3488 if (!ext_adv_capable(hdev))
3489 return 0;
3490
3491 return __hci_cmd_sync_status(hdev,
3492 HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
3493 0, NULL, HCI_CMD_TIMEOUT);
3494}
3495
3496/* Write LE Host Supported */
3497static int hci_set_le_support_sync(struct hci_dev *hdev)
3498{
3499 struct hci_cp_write_le_host_supported cp;
3500
3501 /* LE-only devices do not support explicit enablement */
3502 if (!lmp_bredr_capable(hdev))
3503 return 0;
3504
3505 memset(&cp, 0, sizeof(cp));
3506
3507 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3508 cp.le = 0x01;
3509 cp.simul = 0x00;
3510 }
3511
3512 if (cp.le == lmp_host_le_capable(hdev))
3513 return 0;
3514
3515 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3516 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3517}
3518
3519/* LE Controller init stage 3 command sequence */
3520static const struct hci_init_stage le_init3[] = {
3521 /* HCI_OP_LE_SET_EVENT_MASK */
3522 HCI_INIT(hci_le_set_event_mask_sync),
3523 /* HCI_OP_LE_READ_ADV_TX_POWER */
3524 HCI_INIT(hci_le_read_adv_tx_power_sync),
3525 /* HCI_OP_LE_READ_TRANSMIT_POWER */
3526 HCI_INIT(hci_le_read_tx_power_sync),
3527 /* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */
3528 HCI_INIT(hci_le_read_accept_list_size_sync),
3529 /* HCI_OP_LE_CLEAR_ACCEPT_LIST */
3530 HCI_INIT(hci_le_clear_accept_list_sync),
3531 /* HCI_OP_LE_READ_RESOLV_LIST_SIZE */
3532 HCI_INIT(hci_le_read_resolv_list_size_sync),
3533 /* HCI_OP_LE_CLEAR_RESOLV_LIST */
3534 HCI_INIT(hci_le_clear_resolv_list_sync),
3535 /* HCI_OP_LE_SET_RPA_TIMEOUT */
3536 HCI_INIT(hci_le_set_rpa_timeout_sync),
3537 /* HCI_OP_LE_READ_MAX_DATA_LEN */
3538 HCI_INIT(hci_le_read_max_data_len_sync),
3539 /* HCI_OP_LE_READ_DEF_DATA_LEN */
3540 HCI_INIT(hci_le_read_def_data_len_sync),
3541 /* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */
3542 HCI_INIT(hci_le_read_num_support_adv_sets_sync),
3543 /* HCI_OP_WRITE_LE_HOST_SUPPORTED */
3544 HCI_INIT(hci_set_le_support_sync),
3545 {}
3546};
3547
3548static int hci_init3_sync(struct hci_dev *hdev)
3549{
3550 int err;
3551
3552 bt_dev_dbg(hdev, "");
3553
3554 err = hci_init_stage_sync(hdev, hci_init3);
3555 if (err)
3556 return err;
3557
3558 if (lmp_le_capable(hdev))
3559 return hci_init_stage_sync(hdev, le_init3);
3560
3561 return 0;
3562}
3563
3564static int hci_delete_stored_link_key_sync(struct hci_dev *hdev)
3565{
3566 struct hci_cp_delete_stored_link_key cp;
3567
3568 /* Some Broadcom based Bluetooth controllers do not support the
3569 * Delete Stored Link Key command. They are clearly indicating its
3570 * absence in the bit mask of supported commands.
3571 *
3572 * Check the supported commands and only if the command is marked
3573 * as supported send it. If not supported assume that the controller
3574 * does not have actual support for stored link keys which makes this
3575 * command redundant anyway.
3576 *
3577 * Some controllers indicate that they support handling deleting
3578 * stored link keys, but they don't. The quirk lets a driver
3579 * just disable this command.
3580 */
3581 if (!(hdev->commands[6] & 0x80) ||
3582 test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
3583 return 0;
3584
3585 memset(&cp, 0, sizeof(cp));
3586 bacpy(&cp.bdaddr, BDADDR_ANY);
3587 cp.delete_all = 0x01;
3588
3589 return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
3590 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3591}
3592
3593static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev)
3594{
3595 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
3596 bool changed = false;
3597
3598 /* Set event mask page 2 if the HCI command for it is supported */
3599 if (!(hdev->commands[22] & 0x04))
3600 return 0;
3601
3602 /* If Connectionless Peripheral Broadcast central role is supported
3603 * enable all necessary events for it.
3604 */
3605 if (lmp_cpb_central_capable(hdev)) {
3606 events[1] |= 0x40; /* Triggered Clock Capture */
3607 events[1] |= 0x80; /* Synchronization Train Complete */
3608 events[2] |= 0x10; /* Peripheral Page Response Timeout */
3609 events[2] |= 0x20; /* CPB Channel Map Change */
3610 changed = true;
3611 }
3612
3613 /* If Connectionless Peripheral Broadcast peripheral role is supported
3614 * enable all necessary events for it.
3615 */
3616 if (lmp_cpb_peripheral_capable(hdev)) {
3617 events[2] |= 0x01; /* Synchronization Train Received */
3618 events[2] |= 0x02; /* CPB Receive */
3619 events[2] |= 0x04; /* CPB Timeout */
3620 events[2] |= 0x08; /* Truncated Page Complete */
3621 changed = true;
3622 }
3623
3624 /* Enable Authenticated Payload Timeout Expired event if supported */
3625 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
3626 events[2] |= 0x80;
3627 changed = true;
3628 }
3629
3630 /* Some Broadcom based controllers indicate support for Set Event
3631 * Mask Page 2 command, but then actually do not support it. Since
3632 * the default value is all bits set to zero, the command is only
3633 * required if the event mask has to be changed. In case no change
3634 * to the event mask is needed, skip this command.
3635 */
3636 if (!changed)
3637 return 0;
3638
3639 return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2,
3640 sizeof(events), events, HCI_CMD_TIMEOUT);
3641}
3642
3643/* Read local codec list if the HCI command is supported */
3644static int hci_read_local_codecs_sync(struct hci_dev *hdev)
3645{
3646 if (!(hdev->commands[29] & 0x20))
3647 return 0;
3648
3649 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_CODECS, 0, NULL,
3650 HCI_CMD_TIMEOUT);
3651}
3652
3653/* Read local pairing options if the HCI command is supported */
3654static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev)
3655{
3656 if (!(hdev->commands[41] & 0x08))
3657 return 0;
3658
3659 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS,
3660 0, NULL, HCI_CMD_TIMEOUT);
3661}
3662
3663/* Get MWS transport configuration if the HCI command is supported */
3664static int hci_get_mws_transport_config_sync(struct hci_dev *hdev)
3665{
3666 if (!(hdev->commands[30] & 0x08))
3667 return 0;
3668
3669 return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG,
3670 0, NULL, HCI_CMD_TIMEOUT);
3671}
3672
3673/* Check for Synchronization Train support */
3674static int hci_read_sync_train_params_sync(struct hci_dev *hdev)
3675{
3676 if (!lmp_sync_train_capable(hdev))
3677 return 0;
3678
3679 return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS,
3680 0, NULL, HCI_CMD_TIMEOUT);
3681}
3682
3683/* Enable Secure Connections if supported and configured */
3684static int hci_write_sc_support_1_sync(struct hci_dev *hdev)
3685{
3686 u8 support = 0x01;
3687
3688 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
3689 !bredr_sc_enabled(hdev))
3690 return 0;
3691
3692 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
3693 sizeof(support), &support,
3694 HCI_CMD_TIMEOUT);
3695}
3696
3697/* Set erroneous data reporting if supported to the wideband speech
3698 * setting value
3699 */
3700static int hci_set_err_data_report_sync(struct hci_dev *hdev)
3701{
3702 struct hci_cp_write_def_err_data_reporting cp;
3703 bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED);
3704
3705 if (!(hdev->commands[18] & 0x08) ||
3706 test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
3707 return 0;
3708
3709 if (enabled == hdev->err_data_reporting)
3710 return 0;
3711
3712 memset(&cp, 0, sizeof(cp));
3713 cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED :
3714 ERR_DATA_REPORTING_DISABLED;
3715
3716 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
3717 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3718}
3719
3720static const struct hci_init_stage hci_init4[] = {
3721 /* HCI_OP_DELETE_STORED_LINK_KEY */
3722 HCI_INIT(hci_delete_stored_link_key_sync),
3723 /* HCI_OP_SET_EVENT_MASK_PAGE_2 */
3724 HCI_INIT(hci_set_event_mask_page_2_sync),
3725 /* HCI_OP_READ_LOCAL_CODECS */
3726 HCI_INIT(hci_read_local_codecs_sync),
3727 /* HCI_OP_READ_LOCAL_PAIRING_OPTS */
3728 HCI_INIT(hci_read_local_pairing_opts_sync),
3729 /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */
3730 HCI_INIT(hci_get_mws_transport_config_sync),
3731 /* HCI_OP_READ_SYNC_TRAIN_PARAMS */
3732 HCI_INIT(hci_read_sync_train_params_sync),
3733 /* HCI_OP_WRITE_SC_SUPPORT */
3734 HCI_INIT(hci_write_sc_support_1_sync),
3735 /* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */
3736 HCI_INIT(hci_set_err_data_report_sync),
3737 {}
3738};
3739
3740/* Set Suggested Default Data Length to maximum if supported */
3741static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev)
3742{
3743 struct hci_cp_le_write_def_data_len cp;
3744
3745 if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
3746 return 0;
3747
3748 memset(&cp, 0, sizeof(cp));
3749 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
3750 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
3751
3752 return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN,
3753 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3754}
3755
3756/* Set Default PHY parameters if command is supported */
3757static int hci_le_set_default_phy_sync(struct hci_dev *hdev)
3758{
3759 struct hci_cp_le_set_default_phy cp;
3760
3761 if (!(hdev->commands[35] & 0x20))
3762 return 0;
3763
3764 memset(&cp, 0, sizeof(cp));
3765 cp.all_phys = 0x00;
3766 cp.tx_phys = hdev->le_tx_def_phys;
3767 cp.rx_phys = hdev->le_rx_def_phys;
3768
3769 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3770 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3771}
3772
3773static const struct hci_init_stage le_init4[] = {
3774 /* HCI_OP_LE_WRITE_DEF_DATA_LEN */
3775 HCI_INIT(hci_le_set_write_def_data_len_sync),
3776 /* HCI_OP_LE_SET_DEFAULT_PHY */
3777 HCI_INIT(hci_le_set_default_phy_sync),
3778 {}
3779};
3780
3781static int hci_init4_sync(struct hci_dev *hdev)
3782{
3783 int err;
3784
3785 bt_dev_dbg(hdev, "");
3786
3787 err = hci_init_stage_sync(hdev, hci_init4);
3788 if (err)
3789 return err;
3790
3791 if (lmp_le_capable(hdev))
3792 return hci_init_stage_sync(hdev, le_init4);
3793
3794 return 0;
3795}
3796
3797static int hci_init_sync(struct hci_dev *hdev)
3798{
3799 int err;
3800
3801 err = hci_init1_sync(hdev);
3802 if (err < 0)
3803 return err;
3804
3805 if (hci_dev_test_flag(hdev, HCI_SETUP))
3806 hci_debugfs_create_basic(hdev);
3807
3808 err = hci_init2_sync(hdev);
3809 if (err < 0)
3810 return err;
3811
3812 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
3813 * BR/EDR/LE type controllers. AMP controllers only need the
3814 * first two stages of init.
3815 */
3816 if (hdev->dev_type != HCI_PRIMARY)
3817 return 0;
3818
3819 err = hci_init3_sync(hdev);
3820 if (err < 0)
3821 return err;
3822
3823 err = hci_init4_sync(hdev);
3824 if (err < 0)
3825 return err;
3826
3827 /* This function is only called when the controller is actually in
3828 * configured state. When the controller is marked as unconfigured,
3829 * this initialization procedure is not run.
3830 *
3831 * It means that it is possible that a controller runs through its
3832 * setup phase and then discovers missing settings. If that is the
3833 * case, then this function will not be called. It then will only
3834 * be called during the config phase.
3835 *
3836 * So only when in setup phase or config phase, create the debugfs
3837 * entries and register the SMP channels.
3838 */
3839 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
3840 !hci_dev_test_flag(hdev, HCI_CONFIG))
3841 return 0;
3842
3843 hci_debugfs_create_common(hdev);
3844
3845 if (lmp_bredr_capable(hdev))
3846 hci_debugfs_create_bredr(hdev);
3847
3848 if (lmp_le_capable(hdev))
3849 hci_debugfs_create_le(hdev);
3850
3851 return 0;
3852}
3853
6b5c1cda
LAD
3854#define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc }
3855
3856static const struct {
3857 unsigned long quirk;
3858 const char *desc;
3859} hci_broken_table[] = {
3860 HCI_QUIRK_BROKEN(LOCAL_COMMANDS,
3861 "HCI Read Local Supported Commands not supported"),
3862 HCI_QUIRK_BROKEN(STORED_LINK_KEY,
3863 "HCI Delete Stored Link Key command is advertised, "
3864 "but not supported."),
3865 HCI_QUIRK_BROKEN(ERR_DATA_REPORTING,
3866 "HCI Read Default Erroneous Data Reporting command is "
3867 "advertised, but not supported."),
3868 HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER,
3869 "HCI Read Transmit Power Level command is advertised, "
3870 "but not supported."),
3871 HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL,
3872 "HCI Set Event Filter command not supported."),
3873 HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN,
3874 "HCI Enhanced Setup Synchronous Connection command is "
3875 "advertised, but not supported.")
3876};
3877
d0b13706
LAD
3878int hci_dev_open_sync(struct hci_dev *hdev)
3879{
3880 int ret = 0;
3881
3882 bt_dev_dbg(hdev, "");
3883
3884 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
3885 ret = -ENODEV;
3886 goto done;
3887 }
3888
3889 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
3890 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3891 /* Check for rfkill but allow the HCI setup stage to
3892 * proceed (which in itself doesn't cause any RF activity).
3893 */
3894 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
3895 ret = -ERFKILL;
3896 goto done;
3897 }
3898
3899 /* Check for valid public address or a configured static
3900 * random address, but let the HCI setup proceed to
3901 * be able to determine if there is a public address
3902 * or not.
3903 *
3904 * In case of user channel usage, it is not important
3905 * if a public address or static random address is
3906 * available.
3907 *
3908 * This check is only valid for BR/EDR controllers
3909 * since AMP controllers do not have an address.
3910 */
3911 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
3912 hdev->dev_type == HCI_PRIMARY &&
3913 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3914 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
3915 ret = -EADDRNOTAVAIL;
3916 goto done;
3917 }
3918 }
3919
3920 if (test_bit(HCI_UP, &hdev->flags)) {
3921 ret = -EALREADY;
3922 goto done;
3923 }
3924
3925 if (hdev->open(hdev)) {
3926 ret = -EIO;
3927 goto done;
3928 }
3929
3930 set_bit(HCI_RUNNING, &hdev->flags);
3931 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
3932
3933 atomic_set(&hdev->cmd_cnt, 1);
3934 set_bit(HCI_INIT, &hdev->flags);
3935
3936 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
3937 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
3938 bool invalid_bdaddr;
6b5c1cda 3939 size_t i;
d0b13706
LAD
3940
3941 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
3942
3943 if (hdev->setup)
3944 ret = hdev->setup(hdev);
3945
6b5c1cda
LAD
3946 for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) {
3947 if (test_bit(hci_broken_table[i].quirk, &hdev->quirks))
3948 bt_dev_warn(hdev, "%s",
3949 hci_broken_table[i].desc);
3950 }
3951
d0b13706
LAD
3952 /* The transport driver can set the quirk to mark the
3953 * BD_ADDR invalid before creating the HCI device or in
3954 * its setup callback.
3955 */
3956 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
3957 &hdev->quirks);
3958
3959 if (ret)
3960 goto setup_failed;
3961
3962 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
3963 if (!bacmp(&hdev->public_addr, BDADDR_ANY))
3964 hci_dev_get_bd_addr_from_property(hdev);
3965
3966 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
3967 hdev->set_bdaddr) {
3968 ret = hdev->set_bdaddr(hdev,
3969 &hdev->public_addr);
3970
3971 /* If setting of the BD_ADDR from the device
3972 * property succeeds, then treat the address
3973 * as valid even if the invalid BD_ADDR
3974 * quirk indicates otherwise.
3975 */
3976 if (!ret)
3977 invalid_bdaddr = false;
3978 }
3979 }
3980
3981setup_failed:
3982 /* The transport driver can set these quirks before
3983 * creating the HCI device or in its setup callback.
3984 *
3985 * For the invalid BD_ADDR quirk it is possible that
3986 * it becomes a valid address if the bootloader does
3987 * provide it (see above).
3988 *
3989 * In case any of them is set, the controller has to
3990 * start up as unconfigured.
3991 */
3992 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
3993 invalid_bdaddr)
3994 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3995
3996 /* For an unconfigured controller it is required to
3997 * read at least the version information provided by
3998 * the Read Local Version Information command.
3999 *
4000 * If the set_bdaddr driver callback is provided, then
4001 * also the original Bluetooth public device address
4002 * will be read using the Read BD Address command.
4003 */
4004 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4005 ret = hci_unconf_init_sync(hdev);
4006 }
4007
4008 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
4009 /* If public address change is configured, ensure that
4010 * the address gets programmed. If the driver does not
4011 * support changing the public address, fail the power
4012 * on procedure.
4013 */
4014 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
4015 hdev->set_bdaddr)
4016 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
4017 else
4018 ret = -EADDRNOTAVAIL;
4019 }
4020
4021 if (!ret) {
4022 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
4023 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4024 ret = hci_init_sync(hdev);
4025 if (!ret && hdev->post_init)
4026 ret = hdev->post_init(hdev);
4027 }
4028 }
4029
4030 /* If the HCI Reset command is clearing all diagnostic settings,
4031 * then they need to be reprogrammed after the init procedure
4032 * completed.
4033 */
4034 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
4035 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4036 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
4037 ret = hdev->set_diag(hdev, true);
4038
385315de
JM
4039 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4040 msft_do_open(hdev);
4041 aosp_do_open(hdev);
4042 }
d0b13706
LAD
4043
4044 clear_bit(HCI_INIT, &hdev->flags);
4045
4046 if (!ret) {
4047 hci_dev_hold(hdev);
4048 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
4049 hci_adv_instances_set_rpa_expired(hdev, true);
4050 set_bit(HCI_UP, &hdev->flags);
4051 hci_sock_dev_event(hdev, HCI_DEV_UP);
4052 hci_leds_update_powered(hdev, true);
4053 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4054 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
4055 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
4056 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4057 hci_dev_test_flag(hdev, HCI_MGMT) &&
4058 hdev->dev_type == HCI_PRIMARY) {
4059 ret = hci_powered_update_sync(hdev);
4060 }
4061 } else {
4062 /* Init failed, cleanup */
4063 flush_work(&hdev->tx_work);
4064
4065 /* Since hci_rx_work() is possible to awake new cmd_work
4066 * it should be flushed first to avoid unexpected call of
4067 * hci_cmd_work()
4068 */
4069 flush_work(&hdev->rx_work);
4070 flush_work(&hdev->cmd_work);
4071
4072 skb_queue_purge(&hdev->cmd_q);
4073 skb_queue_purge(&hdev->rx_q);
4074
4075 if (hdev->flush)
4076 hdev->flush(hdev);
4077
4078 if (hdev->sent_cmd) {
4079 kfree_skb(hdev->sent_cmd);
4080 hdev->sent_cmd = NULL;
4081 }
4082
4083 clear_bit(HCI_RUNNING, &hdev->flags);
4084 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4085
4086 hdev->close(hdev);
4087 hdev->flags &= BIT(HCI_RAW);
4088 }
4089
4090done:
4091 return ret;
4092}
4093
4094/* This function requires the caller holds hdev->lock */
4095static void hci_pend_le_actions_clear(struct hci_dev *hdev)
4096{
4097 struct hci_conn_params *p;
4098
4099 list_for_each_entry(p, &hdev->le_conn_params, list) {
4100 if (p->conn) {
4101 hci_conn_drop(p->conn);
4102 hci_conn_put(p->conn);
4103 p->conn = NULL;
4104 }
4105 list_del_init(&p->action);
4106 }
4107
4108 BT_DBG("All LE pending actions cleared");
4109}
4110
4111int hci_dev_close_sync(struct hci_dev *hdev)
4112{
4113 bool auto_off;
4114 int err = 0;
4115
4116 bt_dev_dbg(hdev, "");
4117
4118 cancel_delayed_work(&hdev->power_off);
4119 cancel_delayed_work(&hdev->ncmd_timer);
4120
4121 hci_request_cancel_all(hdev);
4122
4123 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
4124 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4125 test_bit(HCI_UP, &hdev->flags)) {
4126 /* Execute vendor specific shutdown routine */
4127 if (hdev->shutdown)
4128 err = hdev->shutdown(hdev);
4129 }
4130
4131 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
4132 cancel_delayed_work_sync(&hdev->cmd_timer);
4133 return err;
4134 }
4135
4136 hci_leds_update_powered(hdev, false);
4137
4138 /* Flush RX and TX works */
4139 flush_work(&hdev->tx_work);
4140 flush_work(&hdev->rx_work);
4141
4142 if (hdev->discov_timeout > 0) {
4143 hdev->discov_timeout = 0;
4144 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4145 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
4146 }
4147
4148 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
4149 cancel_delayed_work(&hdev->service_cache);
4150
4151 if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4152 struct adv_info *adv_instance;
4153
4154 cancel_delayed_work_sync(&hdev->rpa_expired);
4155
4156 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
4157 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
4158 }
4159
4160 /* Avoid potential lockdep warnings from the *_flush() calls by
4161 * ensuring the workqueue is empty up front.
4162 */
4163 drain_workqueue(hdev->workqueue);
4164
4165 hci_dev_lock(hdev);
4166
4167 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4168
4169 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
4170
4171 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
4172 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4173 hci_dev_test_flag(hdev, HCI_MGMT))
4174 __mgmt_power_off(hdev);
4175
4176 hci_inquiry_cache_flush(hdev);
4177 hci_pend_le_actions_clear(hdev);
4178 hci_conn_hash_flush(hdev);
fa78d2d1 4179 /* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */
d0b13706 4180 smp_unregister(hdev);
fa78d2d1 4181 hci_dev_unlock(hdev);
d0b13706
LAD
4182
4183 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
4184
385315de
JM
4185 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4186 aosp_do_close(hdev);
4187 msft_do_close(hdev);
4188 }
d0b13706
LAD
4189
4190 if (hdev->flush)
4191 hdev->flush(hdev);
4192
4193 /* Reset device */
4194 skb_queue_purge(&hdev->cmd_q);
4195 atomic_set(&hdev->cmd_cnt, 1);
4196 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
4197 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4198 set_bit(HCI_INIT, &hdev->flags);
4199 hci_reset_sync(hdev);
4200 clear_bit(HCI_INIT, &hdev->flags);
4201 }
4202
4203 /* flush cmd work */
4204 flush_work(&hdev->cmd_work);
4205
4206 /* Drop queues */
4207 skb_queue_purge(&hdev->rx_q);
4208 skb_queue_purge(&hdev->cmd_q);
4209 skb_queue_purge(&hdev->raw_q);
4210
4211 /* Drop last sent command */
4212 if (hdev->sent_cmd) {
4213 cancel_delayed_work_sync(&hdev->cmd_timer);
4214 kfree_skb(hdev->sent_cmd);
4215 hdev->sent_cmd = NULL;
4216 }
4217
4218 clear_bit(HCI_RUNNING, &hdev->flags);
4219 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4220
d0b13706
LAD
4221 /* After this point our queues are empty and no tasks are scheduled. */
4222 hdev->close(hdev);
4223
4224 /* Clear flags */
4225 hdev->flags &= BIT(HCI_RAW);
4226 hci_dev_clear_volatile_flags(hdev);
4227
4228 /* Controller radio is available but is currently powered down */
4229 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
4230
4231 memset(hdev->eir, 0, sizeof(hdev->eir));
4232 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
4233 bacpy(&hdev->random_addr, BDADDR_ANY);
4234
4235 hci_dev_put(hdev);
4236 return err;
4237}
4238
4239/* This function perform power on HCI command sequence as follows:
4240 *
4241 * If controller is already up (HCI_UP) performs hci_powered_update_sync
4242 * sequence otherwise run hci_dev_open_sync which will follow with
4243 * hci_powered_update_sync after the init sequence is completed.
4244 */
4245static int hci_power_on_sync(struct hci_dev *hdev)
4246{
4247 int err;
4248
4249 if (test_bit(HCI_UP, &hdev->flags) &&
4250 hci_dev_test_flag(hdev, HCI_MGMT) &&
4251 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
4252 cancel_delayed_work(&hdev->power_off);
4253 return hci_powered_update_sync(hdev);
4254 }
4255
4256 err = hci_dev_open_sync(hdev);
4257 if (err < 0)
4258 return err;
4259
4260 /* During the HCI setup phase, a few error conditions are
4261 * ignored and they need to be checked now. If they are still
4262 * valid, it is important to return the device back off.
4263 */
4264 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
4265 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
4266 (hdev->dev_type == HCI_PRIMARY &&
4267 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
4268 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
4269 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
4270 hci_dev_close_sync(hdev);
4271 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
4272 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
4273 HCI_AUTO_OFF_TIMEOUT);
4274 }
4275
4276 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4277 /* For unconfigured devices, set the HCI_RAW flag
4278 * so that userspace can easily identify them.
4279 */
4280 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4281 set_bit(HCI_RAW, &hdev->flags);
4282
4283 /* For fully configured devices, this will send
4284 * the Index Added event. For unconfigured devices,
4285 * it will send Unconfigued Index Added event.
4286 *
4287 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
4288 * and no event will be send.
4289 */
4290 mgmt_index_added(hdev);
4291 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
4292 /* When the controller is now configured, then it
4293 * is important to clear the HCI_RAW flag.
4294 */
4295 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4296 clear_bit(HCI_RAW, &hdev->flags);
4297
4298 /* Powering on the controller with HCI_CONFIG set only
4299 * happens with the transition from unconfigured to
4300 * configured. This will send the Index Added event.
4301 */
4302 mgmt_index_added(hdev);
4303 }
4304
4305 return 0;
4306}
4307
4308static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr)
4309{
4310 struct hci_cp_remote_name_req_cancel cp;
4311
4312 memset(&cp, 0, sizeof(cp));
4313 bacpy(&cp.bdaddr, addr);
4314
4315 return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
4316 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4317}
4318
4319int hci_stop_discovery_sync(struct hci_dev *hdev)
4320{
4321 struct discovery_state *d = &hdev->discovery;
4322 struct inquiry_entry *e;
4323 int err;
4324
4325 bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
4326
4327 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
4328 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
4329 err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL,
4330 0, NULL, HCI_CMD_TIMEOUT);
4331 if (err)
4332 return err;
4333 }
4334
4335 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
4336 cancel_delayed_work(&hdev->le_scan_disable);
4337 cancel_delayed_work(&hdev->le_scan_restart);
4338
4339 err = hci_scan_disable_sync(hdev);
4340 if (err)
4341 return err;
4342 }
4343
4344 } else {
4345 err = hci_scan_disable_sync(hdev);
4346 if (err)
4347 return err;
4348 }
4349
4350 /* Resume advertising if it was paused */
4351 if (use_ll_privacy(hdev))
4352 hci_resume_advertising_sync(hdev);
4353
4354 /* No further actions needed for LE-only discovery */
4355 if (d->type == DISCOV_TYPE_LE)
4356 return 0;
4357
4358 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
4359 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
4360 NAME_PENDING);
4361 if (!e)
4362 return 0;
4363
4364 return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr);
4365 }
4366
4367 return 0;
4368}
4369
4370static int hci_disconnect_phy_link_sync(struct hci_dev *hdev, u16 handle,
4371 u8 reason)
4372{
4373 struct hci_cp_disconn_phy_link cp;
4374
4375 memset(&cp, 0, sizeof(cp));
4376 cp.phy_handle = HCI_PHY_HANDLE(handle);
4377 cp.reason = reason;
4378
4379 return __hci_cmd_sync_status(hdev, HCI_OP_DISCONN_PHY_LINK,
4380 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4381}
4382
4383static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
4384 u8 reason)
4385{
4386 struct hci_cp_disconnect cp;
4387
4388 if (conn->type == AMP_LINK)
4389 return hci_disconnect_phy_link_sync(hdev, conn->handle, reason);
4390
4391 memset(&cp, 0, sizeof(cp));
4392 cp.handle = cpu_to_le16(conn->handle);
4393 cp.reason = reason;
4394
4395 /* Wait for HCI_EV_DISCONN_COMPLETE not HCI_EV_CMD_STATUS when not
4396 * suspending.
4397 */
4398 if (!hdev->suspended)
4399 return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT,
4400 sizeof(cp), &cp,
4401 HCI_EV_DISCONN_COMPLETE,
4402 HCI_CMD_TIMEOUT, NULL);
4403
4404 return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp,
4405 HCI_CMD_TIMEOUT);
4406}
4407
4408static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
4409 struct hci_conn *conn)
4410{
4411 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
4412 return 0;
4413
4414 return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
4415 6, &conn->dst, HCI_CMD_TIMEOUT);
4416}
4417
4418static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn)
4419{
4420 if (conn->type == LE_LINK)
4421 return hci_le_connect_cancel_sync(hdev, conn);
4422
4423 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
4424 return 0;
4425
4426 return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL,
4427 6, &conn->dst, HCI_CMD_TIMEOUT);
4428}
4429
4430static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn,
4431 u8 reason)
4432{
4433 struct hci_cp_reject_sync_conn_req cp;
4434
4435 memset(&cp, 0, sizeof(cp));
4436 bacpy(&cp.bdaddr, &conn->dst);
4437 cp.reason = reason;
4438
4439 /* SCO rejection has its own limited set of
4440 * allowed error values (0x0D-0x0F).
4441 */
4442 if (reason < 0x0d || reason > 0x0f)
4443 cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
4444
4445 return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ,
4446 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4447}
4448
4449static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
4450 u8 reason)
4451{
4452 struct hci_cp_reject_conn_req cp;
4453
4454 if (conn->type == SCO_LINK || conn->type == ESCO_LINK)
4455 return hci_reject_sco_sync(hdev, conn, reason);
4456
4457 memset(&cp, 0, sizeof(cp));
4458 bacpy(&cp.bdaddr, &conn->dst);
4459 cp.reason = reason;
4460
4461 return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ,
4462 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4463}
4464
4465static int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
4466 u8 reason)
4467{
9b3628d7
LAD
4468 int err;
4469
d0b13706
LAD
4470 switch (conn->state) {
4471 case BT_CONNECTED:
4472 case BT_CONFIG:
4473 return hci_disconnect_sync(hdev, conn, reason);
4474 case BT_CONNECT:
9b3628d7
LAD
4475 err = hci_connect_cancel_sync(hdev, conn);
4476 /* Cleanup hci_conn object if it cannot be cancelled as it
4477 * likelly means the controller and host stack are out of sync.
4478 */
4479 if (err)
4480 hci_conn_failed(conn, err);
4481
4482 return err;
d0b13706
LAD
4483 case BT_CONNECT2:
4484 return hci_reject_conn_sync(hdev, conn, reason);
4485 default:
4486 conn->state = BT_CLOSED;
4487 break;
4488 }
4489
4490 return 0;
4491}
4492
182ee45d
LAD
4493static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason)
4494{
4495 struct hci_conn *conn, *tmp;
4496 int err;
4497
4498 list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
4499 err = hci_abort_conn_sync(hdev, conn, reason);
4500 if (err)
4501 return err;
4502 }
4503
8cd3c55c 4504 return 0;
182ee45d
LAD
4505}
4506
d0b13706
LAD
4507/* This function perform power off HCI command sequence as follows:
4508 *
4509 * Clear Advertising
4510 * Stop Discovery
4511 * Disconnect all connections
4512 * hci_dev_close_sync
4513 */
4514static int hci_power_off_sync(struct hci_dev *hdev)
4515{
d0b13706
LAD
4516 int err;
4517
4518 /* If controller is already down there is nothing to do */
4519 if (!test_bit(HCI_UP, &hdev->flags))
4520 return 0;
4521
4522 if (test_bit(HCI_ISCAN, &hdev->flags) ||
4523 test_bit(HCI_PSCAN, &hdev->flags)) {
4524 err = hci_write_scan_enable_sync(hdev, 0x00);
4525 if (err)
4526 return err;
4527 }
4528
4529 err = hci_clear_adv_sync(hdev, NULL, false);
4530 if (err)
4531 return err;
4532
4533 err = hci_stop_discovery_sync(hdev);
4534 if (err)
4535 return err;
4536
182ee45d
LAD
4537 /* Terminated due to Power Off */
4538 err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
4539 if (err)
4540 return err;
d0b13706
LAD
4541
4542 return hci_dev_close_sync(hdev);
4543}
4544
4545int hci_set_powered_sync(struct hci_dev *hdev, u8 val)
4546{
4547 if (val)
4548 return hci_power_on_sync(hdev);
cf75ad8b
LAD
4549
4550 return hci_power_off_sync(hdev);
4551}
abfeea47 4552
2bd1b237
LAD
4553static int hci_write_iac_sync(struct hci_dev *hdev)
4554{
4555 struct hci_cp_write_current_iac_lap cp;
4556
4557 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
4558 return 0;
4559
4560 memset(&cp, 0, sizeof(cp));
4561
4562 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
4563 /* Limited discoverable mode */
4564 cp.num_iac = min_t(u8, hdev->num_iac, 2);
4565 cp.iac_lap[0] = 0x00; /* LIAC */
4566 cp.iac_lap[1] = 0x8b;
4567 cp.iac_lap[2] = 0x9e;
4568 cp.iac_lap[3] = 0x33; /* GIAC */
4569 cp.iac_lap[4] = 0x8b;
4570 cp.iac_lap[5] = 0x9e;
4571 } else {
4572 /* General discoverable mode */
4573 cp.num_iac = 1;
4574 cp.iac_lap[0] = 0x33; /* GIAC */
4575 cp.iac_lap[1] = 0x8b;
4576 cp.iac_lap[2] = 0x9e;
4577 }
4578
4579 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP,
4580 (cp.num_iac * 3) + 1, &cp,
4581 HCI_CMD_TIMEOUT);
4582}
4583
4584int hci_update_discoverable_sync(struct hci_dev *hdev)
4585{
4586 int err = 0;
4587
4588 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4589 err = hci_write_iac_sync(hdev);
4590 if (err)
4591 return err;
4592
4593 err = hci_update_scan_sync(hdev);
4594 if (err)
4595 return err;
4596
4597 err = hci_update_class_sync(hdev);
4598 if (err)
4599 return err;
4600 }
4601
4602 /* Advertising instances don't use the global discoverable setting, so
4603 * only update AD if advertising was enabled using Set Advertising.
4604 */
4605 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
4606 err = hci_update_adv_data_sync(hdev, 0x00);
4607 if (err)
4608 return err;
4609
4610 /* Discoverable mode affects the local advertising
4611 * address in limited privacy mode.
4612 */
4613 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
4614 if (ext_adv_capable(hdev))
4615 err = hci_start_ext_adv_sync(hdev, 0x00);
4616 else
4617 err = hci_enable_advertising_sync(hdev);
4618 }
4619 }
4620
4621 return err;
4622}
4623
4624static int update_discoverable_sync(struct hci_dev *hdev, void *data)
4625{
4626 return hci_update_discoverable_sync(hdev);
4627}
4628
4629int hci_update_discoverable(struct hci_dev *hdev)
4630{
4631 /* Only queue if it would have any effect */
4632 if (hdev_is_powered(hdev) &&
4633 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4634 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
4635 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
4636 return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL,
4637 NULL);
4638
4639 return 0;
4640}
4641
f056a657
LAD
4642int hci_update_connectable_sync(struct hci_dev *hdev)
4643{
4644 int err;
4645
4646 err = hci_update_scan_sync(hdev);
4647 if (err)
4648 return err;
4649
4650 /* If BR/EDR is not enabled and we disable advertising as a
4651 * by-product of disabling connectable, we need to update the
4652 * advertising flags.
4653 */
4654 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
4655 err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
4656
4657 /* Update the advertising parameters if necessary */
4658 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4659 !list_empty(&hdev->adv_instances)) {
4660 if (ext_adv_capable(hdev))
4661 err = hci_start_ext_adv_sync(hdev,
4662 hdev->cur_adv_instance);
4663 else
4664 err = hci_enable_advertising_sync(hdev);
4665
4666 if (err)
4667 return err;
4668 }
4669
4670 return hci_update_passive_scan_sync(hdev);
4671}
4672
abfeea47
LAD
4673static int hci_inquiry_sync(struct hci_dev *hdev, u8 length)
4674{
4675 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
4676 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
4677 struct hci_cp_inquiry cp;
4678
4679 bt_dev_dbg(hdev, "");
4680
4681 if (hci_dev_test_flag(hdev, HCI_INQUIRY))
4682 return 0;
4683
4684 hci_dev_lock(hdev);
4685 hci_inquiry_cache_flush(hdev);
4686 hci_dev_unlock(hdev);
4687
4688 memset(&cp, 0, sizeof(cp));
4689
4690 if (hdev->discovery.limited)
4691 memcpy(&cp.lap, liac, sizeof(cp.lap));
4692 else
4693 memcpy(&cp.lap, giac, sizeof(cp.lap));
4694
4695 cp.length = length;
4696
4697 return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY,
4698 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4699}
4700
4701static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval)
4702{
4703 u8 own_addr_type;
4704 /* Accept list is not used for discovery */
4705 u8 filter_policy = 0x00;
4706 /* Default is to enable duplicates filter */
4707 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
4708 int err;
4709
4710 bt_dev_dbg(hdev, "");
4711
4712 /* If controller is scanning, it means the passive scanning is
4713 * running. Thus, we should temporarily stop it in order to set the
4714 * discovery scanning parameters.
4715 */
4716 err = hci_scan_disable_sync(hdev);
4717 if (err) {
4718 bt_dev_err(hdev, "Unable to disable scanning: %d", err);
4719 return err;
4720 }
4721
4722 cancel_interleave_scan(hdev);
4723
4724 /* Pause advertising since active scanning disables address resolution
4725 * which advertising depend on in order to generate its RPAs.
4726 */
4727 if (use_ll_privacy(hdev)) {
4728 err = hci_pause_advertising_sync(hdev);
4729 if (err) {
4730 bt_dev_err(hdev, "pause advertising failed: %d", err);
4731 goto failed;
4732 }
4733 }
4734
4735 /* Disable address resolution while doing active scanning since the
4736 * accept list shall not be used and all reports shall reach the host
4737 * anyway.
4738 */
4739 err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
4740 if (err) {
4741 bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
4742 err);
4743 goto failed;
4744 }
4745
4746 /* All active scans will be done with either a resolvable private
4747 * address (when privacy feature has been enabled) or non-resolvable
4748 * private address.
4749 */
4750 err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev),
4751 &own_addr_type);
4752 if (err < 0)
4753 own_addr_type = ADDR_LE_DEV_PUBLIC;
4754
4755 if (hci_is_adv_monitoring(hdev)) {
4756 /* Duplicate filter should be disabled when some advertisement
4757 * monitor is activated, otherwise AdvMon can only receive one
4758 * advertisement for one peer(*) during active scanning, and
4759 * might report loss to these peers.
4760 *
4761 * Note that different controllers have different meanings of
4762 * |duplicate|. Some of them consider packets with the same
4763 * address as duplicate, and others consider packets with the
4764 * same address and the same RSSI as duplicate. Although in the
4765 * latter case we don't need to disable duplicate filter, but
4766 * it is common to have active scanning for a short period of
4767 * time, the power impact should be neglectable.
4768 */
4769 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
4770 }
4771
4772 err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval,
4773 hdev->le_scan_window_discovery,
4774 own_addr_type, filter_policy, filter_dup);
4775 if (!err)
4776 return err;
4777
4778failed:
4779 /* Resume advertising if it was paused */
4780 if (use_ll_privacy(hdev))
4781 hci_resume_advertising_sync(hdev);
4782
4783 /* Resume passive scanning */
4784 hci_update_passive_scan_sync(hdev);
4785 return err;
4786}
4787
4788static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev)
4789{
4790 int err;
4791
4792 bt_dev_dbg(hdev, "");
4793
4794 err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2);
4795 if (err)
4796 return err;
4797
4798 return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN);
4799}
4800
4801int hci_start_discovery_sync(struct hci_dev *hdev)
4802{
4803 unsigned long timeout;
4804 int err;
4805
4806 bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
4807
4808 switch (hdev->discovery.type) {
4809 case DISCOV_TYPE_BREDR:
4810 return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN);
4811 case DISCOV_TYPE_INTERLEAVED:
4812 /* When running simultaneous discovery, the LE scanning time
4813 * should occupy the whole discovery time sine BR/EDR inquiry
4814 * and LE scanning are scheduled by the controller.
4815 *
4816 * For interleaving discovery in comparison, BR/EDR inquiry
4817 * and LE scanning are done sequentially with separate
4818 * timeouts.
4819 */
4820 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
4821 &hdev->quirks)) {
4822 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4823 /* During simultaneous discovery, we double LE scan
4824 * interval. We must leave some time for the controller
4825 * to do BR/EDR inquiry.
4826 */
4827 err = hci_start_interleaved_discovery_sync(hdev);
4828 break;
4829 }
4830
4831 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4832 err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
4833 break;
4834 case DISCOV_TYPE_LE:
4835 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4836 err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
4837 break;
4838 default:
4839 return -EINVAL;
4840 }
4841
4842 if (err)
4843 return err;
4844
4845 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
4846
4847 /* When service discovery is used and the controller has a
4848 * strict duplicate filter, it is important to remember the
4849 * start and duration of the scan. This is required for
4850 * restarting scanning during the discovery phase.
4851 */
4852 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
4853 hdev->discovery.result_filtering) {
4854 hdev->discovery.scan_start = jiffies;
4855 hdev->discovery.scan_duration = timeout;
4856 }
4857
4858 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
4859 timeout);
abfeea47
LAD
4860 return 0;
4861}
182ee45d
LAD
4862
4863static void hci_suspend_monitor_sync(struct hci_dev *hdev)
4864{
4865 switch (hci_get_adv_monitor_offload_ext(hdev)) {
4866 case HCI_ADV_MONITOR_EXT_MSFT:
4867 msft_suspend_sync(hdev);
4868 break;
4869 default:
4870 return;
4871 }
4872}
4873
4874/* This function disables discovery and mark it as paused */
4875static int hci_pause_discovery_sync(struct hci_dev *hdev)
4876{
4877 int old_state = hdev->discovery.state;
4878 int err;
4879
4880 /* If discovery already stopped/stopping/paused there nothing to do */
4881 if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING ||
4882 hdev->discovery_paused)
4883 return 0;
4884
4885 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4886 err = hci_stop_discovery_sync(hdev);
4887 if (err)
4888 return err;
4889
4890 hdev->discovery_paused = true;
4891 hdev->discovery_old_state = old_state;
4892 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4893
4894 return 0;
4895}
4896
4897static int hci_update_event_filter_sync(struct hci_dev *hdev)
4898{
4899 struct bdaddr_list_with_flags *b;
4900 u8 scan = SCAN_DISABLED;
4901 bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
4902 int err;
4903
4904 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
4905 return 0;
4906
0eaecfb2
IFM
4907 /* Some fake CSR controllers lock up after setting this type of
4908 * filter, so avoid sending the request altogether.
4909 */
4910 if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
4911 return 0;
4912
182ee45d
LAD
4913 /* Always clear event filter when starting */
4914 hci_clear_event_filter_sync(hdev);
4915
4916 list_for_each_entry(b, &hdev->accept_list, list) {
e1cff700 4917 if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
182ee45d
LAD
4918 continue;
4919
4920 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
4921
4922 err = hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP,
4923 HCI_CONN_SETUP_ALLOW_BDADDR,
4924 &b->bdaddr,
4925 HCI_CONN_SETUP_AUTO_ON);
4926 if (err)
4927 bt_dev_dbg(hdev, "Failed to set event filter for %pMR",
4928 &b->bdaddr);
4929 else
4930 scan = SCAN_PAGE;
4931 }
4932
4933 if (scan && !scanning)
4934 hci_write_scan_enable_sync(hdev, scan);
4935 else if (!scan && scanning)
4936 hci_write_scan_enable_sync(hdev, scan);
4937
4938 return 0;
4939}
4940
3b420553
LAD
4941/* This function disables scan (BR and LE) and mark it as paused */
4942static int hci_pause_scan_sync(struct hci_dev *hdev)
4943{
4944 if (hdev->scanning_paused)
4945 return 0;
4946
4947 /* Disable page scan if enabled */
4948 if (test_bit(HCI_PSCAN, &hdev->flags))
4949 hci_write_scan_enable_sync(hdev, SCAN_DISABLED);
4950
4951 hci_scan_disable_sync(hdev);
4952
4953 hdev->scanning_paused = true;
4954
4955 return 0;
4956}
4957
182ee45d
LAD
4958/* This function performs the HCI suspend procedures in the follow order:
4959 *
4960 * Pause discovery (active scanning/inquiry)
4961 * Pause Directed Advertising/Advertising
3b420553 4962 * Pause Scanning (passive scanning in case discovery was not active)
182ee45d
LAD
4963 * Disconnect all connections
4964 * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup
4965 * otherwise:
4966 * Update event mask (only set events that are allowed to wake up the host)
4967 * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP)
4968 * Update passive scanning (lower duty cycle)
4969 * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE
4970 */
4971int hci_suspend_sync(struct hci_dev *hdev)
4972{
4973 int err;
4974
4975 /* If marked as suspended there nothing to do */
4976 if (hdev->suspended)
4977 return 0;
4978
4979 /* Mark device as suspended */
4980 hdev->suspended = true;
4981
4982 /* Pause discovery if not already stopped */
4983 hci_pause_discovery_sync(hdev);
4984
4985 /* Pause other advertisements */
4986 hci_pause_advertising_sync(hdev);
4987
182ee45d
LAD
4988 /* Suspend monitor filters */
4989 hci_suspend_monitor_sync(hdev);
4990
4991 /* Prevent disconnects from causing scanning to be re-enabled */
3b420553 4992 hci_pause_scan_sync(hdev);
182ee45d
LAD
4993
4994 /* Soft disconnect everything (power off) */
4995 err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
4996 if (err) {
4997 /* Set state to BT_RUNNING so resume doesn't notify */
4998 hdev->suspend_state = BT_RUNNING;
4999 hci_resume_sync(hdev);
5000 return err;
5001 }
5002
5003 /* Only configure accept list if disconnect succeeded and wake
5004 * isn't being prevented.
5005 */
5006 if (!hdev->wakeup || !hdev->wakeup(hdev)) {
5007 hdev->suspend_state = BT_SUSPEND_DISCONNECT;
5008 return 0;
5009 }
5010
5011 /* Unpause to take care of updating scanning params */
5012 hdev->scanning_paused = false;
5013
5014 /* Update event mask so only the allowed event can wakeup the host */
5015 hci_set_event_mask_sync(hdev);
5016
5017 /* Enable event filter for paired devices */
5018 hci_update_event_filter_sync(hdev);
5019
5020 /* Update LE passive scan if enabled */
5021 hci_update_passive_scan_sync(hdev);
5022
5023 /* Pause scan changes again. */
5024 hdev->scanning_paused = true;
5025
5026 hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE;
5027
5028 return 0;
5029}
5030
5031/* This function resumes discovery */
5032static int hci_resume_discovery_sync(struct hci_dev *hdev)
5033{
5034 int err;
5035
5036 /* If discovery not paused there nothing to do */
5037 if (!hdev->discovery_paused)
5038 return 0;
5039
5040 hdev->discovery_paused = false;
5041
5042 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5043
5044 err = hci_start_discovery_sync(hdev);
5045
5046 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED :
5047 DISCOVERY_FINDING);
5048
5049 return err;
5050}
5051
5052static void hci_resume_monitor_sync(struct hci_dev *hdev)
5053{
5054 switch (hci_get_adv_monitor_offload_ext(hdev)) {
5055 case HCI_ADV_MONITOR_EXT_MSFT:
5056 msft_resume_sync(hdev);
5057 break;
5058 default:
5059 return;
5060 }
5061}
5062
3b420553
LAD
5063/* This function resume scan and reset paused flag */
5064static int hci_resume_scan_sync(struct hci_dev *hdev)
5065{
5066 if (!hdev->scanning_paused)
5067 return 0;
5068
68253f3c
ZJ
5069 hdev->scanning_paused = false;
5070
3b420553
LAD
5071 hci_update_scan_sync(hdev);
5072
5073 /* Reset passive scanning to normal */
5074 hci_update_passive_scan_sync(hdev);
5075
3b420553
LAD
5076 return 0;
5077}
5078
182ee45d
LAD
5079/* This function performs the HCI suspend procedures in the follow order:
5080 *
5081 * Restore event mask
5082 * Clear event filter
5083 * Update passive scanning (normal duty cycle)
5084 * Resume Directed Advertising/Advertising
5085 * Resume discovery (active scanning/inquiry)
5086 */
5087int hci_resume_sync(struct hci_dev *hdev)
5088{
5089 /* If not marked as suspended there nothing to do */
5090 if (!hdev->suspended)
5091 return 0;
5092
5093 hdev->suspended = false;
182ee45d
LAD
5094
5095 /* Restore event mask */
5096 hci_set_event_mask_sync(hdev);
5097
5098 /* Clear any event filters and restore scan state */
5099 hci_clear_event_filter_sync(hdev);
182ee45d 5100
3b420553
LAD
5101 /* Resume scanning */
5102 hci_resume_scan_sync(hdev);
182ee45d
LAD
5103
5104 /* Resume monitor filters */
5105 hci_resume_monitor_sync(hdev);
5106
5107 /* Resume other advertisements */
5108 hci_resume_advertising_sync(hdev);
5109
5110 /* Resume discovery */
5111 hci_resume_discovery_sync(hdev);
5112
5113 return 0;
5114}
8e8b92ee
LAD
5115
5116static bool conn_use_rpa(struct hci_conn *conn)
5117{
5118 struct hci_dev *hdev = conn->hdev;
5119
5120 return hci_dev_test_flag(hdev, HCI_PRIVACY);
5121}
5122
5123static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev,
5124 struct hci_conn *conn)
5125{
5126 struct hci_cp_le_set_ext_adv_params cp;
5127 int err;
5128 bdaddr_t random_addr;
5129 u8 own_addr_type;
5130
5131 err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
5132 &own_addr_type);
5133 if (err)
5134 return err;
5135
5136 /* Set require_privacy to false so that the remote device has a
5137 * chance of identifying us.
5138 */
5139 err = hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
5140 &own_addr_type, &random_addr);
5141 if (err)
5142 return err;
5143
5144 memset(&cp, 0, sizeof(cp));
5145
5146 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
5147 cp.own_addr_type = own_addr_type;
5148 cp.channel_map = hdev->le_adv_channel_map;
5149 cp.tx_power = HCI_TX_POWER_INVALID;
5150 cp.primary_phy = HCI_ADV_PHY_1M;
5151 cp.secondary_phy = HCI_ADV_PHY_1M;
5152 cp.handle = 0x00; /* Use instance 0 for directed adv */
5153 cp.own_addr_type = own_addr_type;
5154 cp.peer_addr_type = conn->dst_type;
5155 bacpy(&cp.peer_addr, &conn->dst);
5156
5157 /* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for
5158 * advertising_event_property LE_LEGACY_ADV_DIRECT_IND
5159 * does not supports advertising data when the advertising set already
5160 * contains some, the controller shall return erroc code 'Invalid
5161 * HCI Command Parameters(0x12).
5162 * So it is required to remove adv set for handle 0x00. since we use
5163 * instance 0 for directed adv.
5164 */
5165 err = hci_remove_ext_adv_instance_sync(hdev, cp.handle, NULL);
5166 if (err)
5167 return err;
5168
5169 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
5170 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5171 if (err)
5172 return err;
5173
5174 /* Check if random address need to be updated */
5175 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
5176 bacmp(&random_addr, BDADDR_ANY) &&
5177 bacmp(&random_addr, &hdev->random_addr)) {
5178 err = hci_set_adv_set_random_addr_sync(hdev, 0x00,
5179 &random_addr);
5180 if (err)
5181 return err;
5182 }
5183
5184 return hci_enable_ext_advertising_sync(hdev, 0x00);
5185}
5186
5187static int hci_le_directed_advertising_sync(struct hci_dev *hdev,
5188 struct hci_conn *conn)
5189{
5190 struct hci_cp_le_set_adv_param cp;
5191 u8 status;
5192 u8 own_addr_type;
5193 u8 enable;
5194
5195 if (ext_adv_capable(hdev))
5196 return hci_le_ext_directed_advertising_sync(hdev, conn);
5197
5198 /* Clear the HCI_LE_ADV bit temporarily so that the
5199 * hci_update_random_address knows that it's safe to go ahead
5200 * and write a new random address. The flag will be set back on
5201 * as soon as the SET_ADV_ENABLE HCI command completes.
5202 */
5203 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5204
5205 /* Set require_privacy to false so that the remote device has a
5206 * chance of identifying us.
5207 */
5208 status = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
5209 &own_addr_type);
5210 if (status)
5211 return status;
5212
5213 memset(&cp, 0, sizeof(cp));
5214
5215 /* Some controllers might reject command if intervals are not
5216 * within range for undirected advertising.
5217 * BCM20702A0 is known to be affected by this.
5218 */
5219 cp.min_interval = cpu_to_le16(0x0020);
5220 cp.max_interval = cpu_to_le16(0x0020);
5221
5222 cp.type = LE_ADV_DIRECT_IND;
5223 cp.own_address_type = own_addr_type;
5224 cp.direct_addr_type = conn->dst_type;
5225 bacpy(&cp.direct_addr, &conn->dst);
5226 cp.channel_map = hdev->le_adv_channel_map;
5227
5228 status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
5229 sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5230 if (status)
5231 return status;
5232
5233 enable = 0x01;
5234
5235 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
5236 sizeof(enable), &enable, HCI_CMD_TIMEOUT);
5237}
5238
5239static void set_ext_conn_params(struct hci_conn *conn,
5240 struct hci_cp_le_ext_conn_param *p)
5241{
5242 struct hci_dev *hdev = conn->hdev;
5243
5244 memset(p, 0, sizeof(*p));
5245
5246 p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
5247 p->scan_window = cpu_to_le16(hdev->le_scan_window_connect);
5248 p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
5249 p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
5250 p->conn_latency = cpu_to_le16(conn->le_conn_latency);
5251 p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
5252 p->min_ce_len = cpu_to_le16(0x0000);
5253 p->max_ce_len = cpu_to_le16(0x0000);
5254}
5255
89a0b8b9
LAD
5256static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
5257 struct hci_conn *conn, u8 own_addr_type)
8e8b92ee
LAD
5258{
5259 struct hci_cp_le_ext_create_conn *cp;
5260 struct hci_cp_le_ext_conn_param *p;
5261 u8 data[sizeof(*cp) + sizeof(*p) * 3];
5262 u32 plen;
5263
5264 cp = (void *)data;
5265 p = (void *)cp->data;
5266
5267 memset(cp, 0, sizeof(*cp));
5268
5269 bacpy(&cp->peer_addr, &conn->dst);
5270 cp->peer_addr_type = conn->dst_type;
5271 cp->own_addr_type = own_addr_type;
5272
5273 plen = sizeof(*cp);
5274
5275 if (scan_1m(hdev)) {
5276 cp->phys |= LE_SCAN_PHY_1M;
5277 set_ext_conn_params(conn, p);
5278
5279 p++;
5280 plen += sizeof(*p);
5281 }
5282
5283 if (scan_2m(hdev)) {
5284 cp->phys |= LE_SCAN_PHY_2M;
5285 set_ext_conn_params(conn, p);
5286
5287 p++;
5288 plen += sizeof(*p);
5289 }
5290
5291 if (scan_coded(hdev)) {
5292 cp->phys |= LE_SCAN_PHY_CODED;
5293 set_ext_conn_params(conn, p);
5294
5295 plen += sizeof(*p);
5296 }
5297
6cd29ec6
LAD
5298 return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN,
5299 plen, data,
5300 HCI_EV_LE_ENHANCED_CONN_COMPLETE,
a56a1138 5301 conn->conn_timeout, NULL);
8e8b92ee
LAD
5302}
5303
5304int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
5305{
5306 struct hci_cp_le_create_conn cp;
5307 struct hci_conn_params *params;
5308 u8 own_addr_type;
5309 int err;
5310
8e8b92ee
LAD
5311 /* If requested to connect as peripheral use directed advertising */
5312 if (conn->role == HCI_ROLE_SLAVE) {
76d0685b
LAD
5313 /* If we're active scanning and simultaneous roles is not
5314 * enabled simply reject the attempt.
8e8b92ee
LAD
5315 */
5316 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4fc9857a 5317 hdev->le_scan_type == LE_SCAN_ACTIVE &&
76d0685b 5318 !hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) {
8e8b92ee
LAD
5319 hci_conn_del(conn);
5320 return -EBUSY;
5321 }
5322
4fc9857a
LAD
5323 /* Pause advertising while doing directed advertising. */
5324 hci_pause_advertising_sync(hdev);
5325
8e8b92ee
LAD
5326 err = hci_le_directed_advertising_sync(hdev, conn);
5327 goto done;
5328 }
5329
76d0685b
LAD
5330 /* Disable advertising if simultaneous roles is not in use. */
5331 if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4fc9857a
LAD
5332 hci_pause_advertising_sync(hdev);
5333
8e8b92ee
LAD
5334 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
5335 if (params) {
5336 conn->le_conn_min_interval = params->conn_min_interval;
5337 conn->le_conn_max_interval = params->conn_max_interval;
5338 conn->le_conn_latency = params->conn_latency;
5339 conn->le_supv_timeout = params->supervision_timeout;
5340 } else {
5341 conn->le_conn_min_interval = hdev->le_conn_min_interval;
5342 conn->le_conn_max_interval = hdev->le_conn_max_interval;
5343 conn->le_conn_latency = hdev->le_conn_latency;
5344 conn->le_supv_timeout = hdev->le_supv_timeout;
5345 }
5346
5347 /* If controller is scanning, we stop it since some controllers are
5348 * not able to scan and connect at the same time. Also set the
5349 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
5350 * handler for scan disabling knows to set the correct discovery
5351 * state.
5352 */
5353 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
5354 hci_scan_disable_sync(hdev);
5355 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
5356 }
5357
5358 /* Update random address, but set require_privacy to false so
5359 * that we never connect with an non-resolvable address.
5360 */
5361 err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
5362 &own_addr_type);
5363 if (err)
5364 goto done;
5365
5366 if (use_ext_conn(hdev)) {
5367 err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type);
5368 goto done;
5369 }
5370
5371 memset(&cp, 0, sizeof(cp));
5372
5373 cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
5374 cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect);
5375
5376 bacpy(&cp.peer_addr, &conn->dst);
5377 cp.peer_addr_type = conn->dst_type;
5378 cp.own_address_type = own_addr_type;
5379 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
5380 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
5381 cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
5382 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
5383 cp.min_ce_len = cpu_to_le16(0x0000);
5384 cp.max_ce_len = cpu_to_le16(0x0000);
5385
a56a1138
LAD
5386 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261:
5387 *
5388 * If this event is unmasked and the HCI_LE_Connection_Complete event
5389 * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is
5390 * sent when a new connection has been created.
5391 */
6cd29ec6 5392 err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN,
a56a1138
LAD
5393 sizeof(cp), &cp,
5394 use_enhanced_conn_complete(hdev) ?
5395 HCI_EV_LE_ENHANCED_CONN_COMPLETE :
5396 HCI_EV_LE_CONN_COMPLETE,
5397 conn->conn_timeout, NULL);
8e8b92ee
LAD
5398
5399done:
4fc9857a 5400 /* Re-enable advertising after the connection attempt is finished. */
8e8b92ee
LAD
5401 hci_resume_advertising_sync(hdev);
5402 return err;
5403}