2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <linux/sched/signal.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
35 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
37 skb_queue_head_init(&req->cmd_q);
42 void hci_req_purge(struct hci_request *req)
44 skb_queue_purge(&req->cmd_q);
47 bool hci_req_status_pend(struct hci_dev *hdev)
49 return hdev->req_status == HCI_REQ_PEND;
52 static int req_run(struct hci_request *req, hci_req_complete_t complete,
53 hci_req_complete_skb_t complete_skb)
55 struct hci_dev *hdev = req->hdev;
59 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
61 /* If an error occurred during request building, remove all HCI
62 * commands queued on the HCI request queue.
65 skb_queue_purge(&req->cmd_q);
69 /* Do not allow empty requests */
70 if (skb_queue_empty(&req->cmd_q))
73 skb = skb_peek_tail(&req->cmd_q);
75 bt_cb(skb)->hci.req_complete = complete;
76 } else if (complete_skb) {
77 bt_cb(skb)->hci.req_complete_skb = complete_skb;
78 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
82 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
83 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
90 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
92 return req_run(req, complete, NULL);
95 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
97 return req_run(req, NULL, complete);
100 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 bt_dev_dbg(hdev, "result 0x%2.2x", result);
105 if (hdev->req_status == HCI_REQ_PEND) {
106 hdev->req_result = result;
107 hdev->req_status = HCI_REQ_DONE;
109 hdev->req_skb = skb_get(skb);
110 wake_up_interruptible(&hdev->req_wait_q);
114 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
116 bt_dev_dbg(hdev, "err 0x%2.2x", err);
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
125 /* Execute request and wait for completion. */
126 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
128 unsigned long opt, u32 timeout, u8 *hci_status)
130 struct hci_request req;
133 bt_dev_dbg(hdev, "start");
135 hci_req_init(&req, hdev);
137 hdev->req_status = HCI_REQ_PEND;
139 err = func(&req, opt);
142 *hci_status = HCI_ERROR_UNSPECIFIED;
146 err = hci_req_run_skb(&req, hci_req_sync_complete);
148 hdev->req_status = 0;
150 /* ENODATA means the HCI request command queue is empty.
151 * This can happen when a request with conditionals doesn't
152 * trigger any commands to be sent. This is normal behavior
153 * and should not trigger an error return.
155 if (err == -ENODATA) {
162 *hci_status = HCI_ERROR_UNSPECIFIED;
167 err = wait_event_interruptible_timeout(hdev->req_wait_q,
168 hdev->req_status != HCI_REQ_PEND, timeout);
170 if (err == -ERESTARTSYS)
173 switch (hdev->req_status) {
175 err = -bt_to_errno(hdev->req_result);
177 *hci_status = hdev->req_result;
180 case HCI_REQ_CANCELED:
181 err = -hdev->req_result;
183 *hci_status = HCI_ERROR_UNSPECIFIED;
189 *hci_status = HCI_ERROR_UNSPECIFIED;
193 kfree_skb(hdev->req_skb);
194 hdev->req_skb = NULL;
195 hdev->req_status = hdev->req_result = 0;
197 bt_dev_dbg(hdev, "end: err %d", err);
202 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
204 unsigned long opt, u32 timeout, u8 *hci_status)
208 /* Serialize all requests */
209 hci_req_sync_lock(hdev);
210 /* check the state after obtaing the lock to protect the HCI_UP
211 * against any races from hci_dev_do_close when the controller
214 if (test_bit(HCI_UP, &hdev->flags))
215 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
218 hci_req_sync_unlock(hdev);
223 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
226 int len = HCI_COMMAND_HDR_SIZE + plen;
227 struct hci_command_hdr *hdr;
230 skb = bt_skb_alloc(len, GFP_ATOMIC);
234 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
235 hdr->opcode = cpu_to_le16(opcode);
239 skb_put_data(skb, param, plen);
241 bt_dev_dbg(hdev, "skb len %d", skb->len);
243 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
244 hci_skb_opcode(skb) = opcode;
249 /* Queue a command to an asynchronous HCI request */
250 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
251 const void *param, u8 event)
253 struct hci_dev *hdev = req->hdev;
256 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
258 /* If an error occurred during request building, there is no point in
259 * queueing the HCI command. We can simply return.
264 skb = hci_prepare_cmd(hdev, opcode, plen, param);
266 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
272 if (skb_queue_empty(&req->cmd_q))
273 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
275 bt_cb(skb)->hci.req_event = event;
277 skb_queue_tail(&req->cmd_q, skb);
280 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
283 hci_req_add_ev(req, opcode, plen, param, 0);
286 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
288 struct hci_dev *hdev = req->hdev;
289 struct hci_cp_write_page_scan_activity acp;
292 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
295 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
299 type = PAGE_SCAN_TYPE_INTERLACED;
301 /* 160 msec page scan interval */
302 acp.interval = cpu_to_le16(0x0100);
304 type = hdev->def_page_scan_type;
305 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
308 acp.window = cpu_to_le16(hdev->def_page_scan_window);
310 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
311 __cpu_to_le16(hdev->page_scan_window) != acp.window)
312 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
315 if (hdev->page_scan_type != type)
316 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
319 static void start_interleave_scan(struct hci_dev *hdev)
321 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
322 queue_delayed_work(hdev->req_workqueue,
323 &hdev->interleave_scan, 0);
326 static bool is_interleave_scanning(struct hci_dev *hdev)
328 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
331 static void cancel_interleave_scan(struct hci_dev *hdev)
333 bt_dev_dbg(hdev, "cancelling interleave scan");
335 cancel_delayed_work_sync(&hdev->interleave_scan);
337 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
340 /* Return true if interleave_scan wasn't started until exiting this function,
341 * otherwise, return false
343 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
345 /* Do interleaved scan only if all of the following are true:
346 * - There is at least one ADV monitor
347 * - At least one pending LE connection or one device to be scanned for
348 * - Monitor offloading is not supported
349 * If so, we should alternate between allowlist scan and one without
350 * any filters to save power.
352 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
353 !(list_empty(&hdev->pend_le_conns) &&
354 list_empty(&hdev->pend_le_reports)) &&
355 hci_get_adv_monitor_offload_ext(hdev) ==
356 HCI_ADV_MONITOR_EXT_NONE;
357 bool is_interleaving = is_interleave_scanning(hdev);
359 if (use_interleaving && !is_interleaving) {
360 start_interleave_scan(hdev);
361 bt_dev_dbg(hdev, "starting interleave scan");
365 if (!use_interleaving && is_interleaving)
366 cancel_interleave_scan(hdev);
371 /* This function controls the background scanning based on hdev->pend_le_conns
372 * list. If there are pending LE connection we start the background scanning,
373 * otherwise we stop it.
375 * This function requires the caller holds hdev->lock.
377 static void __hci_update_background_scan(struct hci_request *req)
379 struct hci_dev *hdev = req->hdev;
381 if (!test_bit(HCI_UP, &hdev->flags) ||
382 test_bit(HCI_INIT, &hdev->flags) ||
383 hci_dev_test_flag(hdev, HCI_SETUP) ||
384 hci_dev_test_flag(hdev, HCI_CONFIG) ||
385 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
386 hci_dev_test_flag(hdev, HCI_UNREGISTER))
389 /* No point in doing scanning if LE support hasn't been enabled */
390 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
393 /* If discovery is active don't interfere with it */
394 if (hdev->discovery.state != DISCOVERY_STOPPED)
397 /* Reset RSSI and UUID filters when starting background scanning
398 * since these filters are meant for service discovery only.
400 * The Start Discovery and Start Service Discovery operations
401 * ensure to set proper values for RSSI threshold and UUID
402 * filter list. So it is safe to just reset them here.
404 hci_discovery_filter_clear(hdev);
406 bt_dev_dbg(hdev, "ADV monitoring is %s",
407 hci_is_adv_monitoring(hdev) ? "on" : "off");
409 if (list_empty(&hdev->pend_le_conns) &&
410 list_empty(&hdev->pend_le_reports) &&
411 !hci_is_adv_monitoring(hdev)) {
412 /* If there is no pending LE connections or devices
413 * to be scanned for or no ADV monitors, we should stop the
414 * background scanning.
417 /* If controller is not scanning we are done. */
418 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
421 hci_req_add_le_scan_disable(req, false);
423 bt_dev_dbg(hdev, "stopping background scanning");
425 /* If there is at least one pending LE connection, we should
426 * keep the background scan running.
429 /* If controller is connecting, we should not start scanning
430 * since some controllers are not able to scan and connect at
433 if (hci_lookup_le_connect(hdev))
436 /* If controller is currently scanning, we stop it to ensure we
437 * don't miss any advertising (due to duplicates filter).
439 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
440 hci_req_add_le_scan_disable(req, false);
442 hci_req_add_le_passive_scan(req);
443 bt_dev_dbg(hdev, "starting background scanning");
447 void __hci_req_update_name(struct hci_request *req)
449 struct hci_dev *hdev = req->hdev;
450 struct hci_cp_write_local_name cp;
452 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
454 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
457 void __hci_req_update_eir(struct hci_request *req)
459 struct hci_dev *hdev = req->hdev;
460 struct hci_cp_write_eir cp;
462 if (!hdev_is_powered(hdev))
465 if (!lmp_ext_inq_capable(hdev))
468 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
471 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
474 memset(&cp, 0, sizeof(cp));
476 eir_create(hdev, cp.data);
478 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
481 memcpy(hdev->eir, cp.data, sizeof(cp.data));
483 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
486 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
488 struct hci_dev *hdev = req->hdev;
490 if (hdev->scanning_paused) {
491 bt_dev_dbg(hdev, "Scanning is paused for suspend");
496 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
498 if (use_ext_scan(hdev)) {
499 struct hci_cp_le_set_ext_scan_enable cp;
501 memset(&cp, 0, sizeof(cp));
502 cp.enable = LE_SCAN_DISABLE;
503 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
506 struct hci_cp_le_set_scan_enable cp;
508 memset(&cp, 0, sizeof(cp));
509 cp.enable = LE_SCAN_DISABLE;
510 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
513 /* Disable address resolution */
514 if (use_ll_privacy(hdev) &&
515 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
516 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
519 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
523 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
526 struct hci_cp_le_del_from_accept_list cp;
528 cp.bdaddr_type = bdaddr_type;
529 bacpy(&cp.bdaddr, bdaddr);
531 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
533 hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
535 if (use_ll_privacy(req->hdev) &&
536 hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
539 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
541 struct hci_cp_le_del_from_resolv_list cp;
543 cp.bdaddr_type = bdaddr_type;
544 bacpy(&cp.bdaddr, bdaddr);
546 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
552 /* Adds connection to accept list if needed. On error, returns -1. */
553 static int add_to_accept_list(struct hci_request *req,
554 struct hci_conn_params *params, u8 *num_entries,
557 struct hci_cp_le_add_to_accept_list cp;
558 struct hci_dev *hdev = req->hdev;
560 /* Already in accept list */
561 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr,
565 /* Select filter policy to accept all advertising */
566 if (*num_entries >= hdev->le_accept_list_size)
569 /* Accept list can not be used with RPAs */
571 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
572 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
576 /* During suspend, only wakeable devices can be in accept list */
577 if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
578 params->current_flags))
582 cp.bdaddr_type = params->addr_type;
583 bacpy(&cp.bdaddr, ¶ms->addr);
585 bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
587 hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
589 if (use_ll_privacy(hdev) &&
590 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
593 irk = hci_find_irk_by_addr(hdev, ¶ms->addr,
596 struct hci_cp_le_add_to_resolv_list cp;
598 cp.bdaddr_type = params->addr_type;
599 bacpy(&cp.bdaddr, ¶ms->addr);
600 memcpy(cp.peer_irk, irk->val, 16);
602 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
603 memcpy(cp.local_irk, hdev->irk, 16);
605 memset(cp.local_irk, 0, 16);
607 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
615 static u8 update_accept_list(struct hci_request *req)
617 struct hci_dev *hdev = req->hdev;
618 struct hci_conn_params *params;
619 struct bdaddr_list *b;
621 bool pend_conn, pend_report;
622 /* We allow usage of accept list even with RPAs in suspend. In the worst
623 * case, we won't be able to wake from devices that use the privacy1.2
624 * features. Additionally, once we support privacy1.2 and IRK
625 * offloading, we can update this to also check for those conditions.
627 bool allow_rpa = hdev->suspended;
629 if (use_ll_privacy(hdev) &&
630 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
633 /* Go through the current accept list programmed into the
634 * controller one by one and check if that address is still
635 * in the list of pending connections or list of devices to
636 * report. If not present in either list, then queue the
637 * command to remove it from the controller.
639 list_for_each_entry(b, &hdev->le_accept_list, list) {
640 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
643 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
647 /* If the device is not likely to connect or report,
648 * remove it from the accept list.
650 if (!pend_conn && !pend_report) {
651 del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
655 /* Accept list can not be used with RPAs */
657 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
658 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
665 /* Since all no longer valid accept list entries have been
666 * removed, walk through the list of pending connections
667 * and ensure that any new device gets programmed into
670 * If the list of the devices is larger than the list of
671 * available accept list entries in the controller, then
672 * just abort and return filer policy value to not use the
675 list_for_each_entry(params, &hdev->pend_le_conns, action) {
676 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
680 /* After adding all new pending connections, walk through
681 * the list of pending reports and also add these to the
682 * accept list if there is still space. Abort if space runs out.
684 list_for_each_entry(params, &hdev->pend_le_reports, action) {
685 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
689 /* Use the allowlist unless the following conditions are all true:
690 * - We are not currently suspending
691 * - There are 1 or more ADV monitors registered and it's not offloaded
692 * - Interleaved scanning is not currently using the allowlist
694 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
695 hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
696 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
699 /* Select filter policy to use accept list */
703 static bool scan_use_rpa(struct hci_dev *hdev)
705 return hci_dev_test_flag(hdev, HCI_PRIVACY);
708 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
709 u16 window, u8 own_addr_type, u8 filter_policy,
710 bool filter_dup, bool addr_resolv)
712 struct hci_dev *hdev = req->hdev;
714 if (hdev->scanning_paused) {
715 bt_dev_dbg(hdev, "Scanning is paused for suspend");
719 if (use_ll_privacy(hdev) &&
720 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
724 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
727 /* Use ext scanning if set ext scan param and ext scan enable is
730 if (use_ext_scan(hdev)) {
731 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
732 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
733 struct hci_cp_le_scan_phy_params *phy_params;
734 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
737 ext_param_cp = (void *)data;
738 phy_params = (void *)ext_param_cp->data;
740 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
741 ext_param_cp->own_addr_type = own_addr_type;
742 ext_param_cp->filter_policy = filter_policy;
744 plen = sizeof(*ext_param_cp);
746 if (scan_1m(hdev) || scan_2m(hdev)) {
747 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
749 memset(phy_params, 0, sizeof(*phy_params));
750 phy_params->type = type;
751 phy_params->interval = cpu_to_le16(interval);
752 phy_params->window = cpu_to_le16(window);
754 plen += sizeof(*phy_params);
758 if (scan_coded(hdev)) {
759 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
761 memset(phy_params, 0, sizeof(*phy_params));
762 phy_params->type = type;
763 phy_params->interval = cpu_to_le16(interval);
764 phy_params->window = cpu_to_le16(window);
766 plen += sizeof(*phy_params);
770 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
773 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
774 ext_enable_cp.enable = LE_SCAN_ENABLE;
775 ext_enable_cp.filter_dup = filter_dup;
777 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
778 sizeof(ext_enable_cp), &ext_enable_cp);
780 struct hci_cp_le_set_scan_param param_cp;
781 struct hci_cp_le_set_scan_enable enable_cp;
783 memset(¶m_cp, 0, sizeof(param_cp));
784 param_cp.type = type;
785 param_cp.interval = cpu_to_le16(interval);
786 param_cp.window = cpu_to_le16(window);
787 param_cp.own_address_type = own_addr_type;
788 param_cp.filter_policy = filter_policy;
789 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
792 memset(&enable_cp, 0, sizeof(enable_cp));
793 enable_cp.enable = LE_SCAN_ENABLE;
794 enable_cp.filter_dup = filter_dup;
795 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
800 /* Returns true if an le connection is in the scanning state */
801 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
803 struct hci_conn_hash *h = &hdev->conn_hash;
808 list_for_each_entry_rcu(c, &h->list, list) {
809 if (c->type == LE_LINK && c->state == BT_CONNECT &&
810 test_bit(HCI_CONN_SCANNING, &c->flags)) {
821 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
822 * controller based address resolution to be able to reconfigure
825 void hci_req_add_le_passive_scan(struct hci_request *req)
827 struct hci_dev *hdev = req->hdev;
830 u16 window, interval;
831 /* Default is to enable duplicates filter */
832 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
833 /* Background scanning should run with address resolution */
834 bool addr_resolv = true;
836 if (hdev->scanning_paused) {
837 bt_dev_dbg(hdev, "Scanning is paused for suspend");
841 /* Set require_privacy to false since no SCAN_REQ are send
842 * during passive scanning. Not using an non-resolvable address
843 * here is important so that peer devices using direct
844 * advertising with our address will be correctly reported
847 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
851 if (hdev->enable_advmon_interleave_scan &&
852 __hci_update_interleaved_scan(hdev))
855 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
856 /* Adding or removing entries from the accept list must
857 * happen before enabling scanning. The controller does
858 * not allow accept list modification while scanning.
860 filter_policy = update_accept_list(req);
862 /* When the controller is using random resolvable addresses and
863 * with that having LE privacy enabled, then controllers with
864 * Extended Scanner Filter Policies support can now enable support
865 * for handling directed advertising.
867 * So instead of using filter polices 0x00 (no accept list)
868 * and 0x01 (accept list enabled) use the new filter policies
869 * 0x02 (no accept list) and 0x03 (accept list enabled).
871 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
872 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
873 filter_policy |= 0x02;
875 if (hdev->suspended) {
876 window = hdev->le_scan_window_suspend;
877 interval = hdev->le_scan_int_suspend;
879 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
880 } else if (hci_is_le_conn_scanning(hdev)) {
881 window = hdev->le_scan_window_connect;
882 interval = hdev->le_scan_int_connect;
883 } else if (hci_is_adv_monitoring(hdev)) {
884 window = hdev->le_scan_window_adv_monitor;
885 interval = hdev->le_scan_int_adv_monitor;
887 /* Disable duplicates filter when scanning for advertisement
888 * monitor for the following reasons.
890 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
891 * controllers ignore RSSI_Sampling_Period when the duplicates
894 * For SW pattern filtering, when we're not doing interleaved
895 * scanning, it is necessary to disable duplicates filter,
896 * otherwise hosts can only receive one advertisement and it's
897 * impossible to know if a peer is still in range.
899 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
901 window = hdev->le_scan_window;
902 interval = hdev->le_scan_interval;
905 bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
907 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
908 own_addr_type, filter_policy, filter_dup,
912 static void hci_req_clear_event_filter(struct hci_request *req)
914 struct hci_cp_set_event_filter f;
916 if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
919 if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
920 memset(&f, 0, sizeof(f));
921 f.flt_type = HCI_FLT_CLEAR_ALL;
922 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
926 static void hci_req_set_event_filter(struct hci_request *req)
928 struct bdaddr_list_with_flags *b;
929 struct hci_cp_set_event_filter f;
930 struct hci_dev *hdev = req->hdev;
931 u8 scan = SCAN_DISABLED;
932 bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
934 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
937 /* Always clear event filter when starting */
938 hci_req_clear_event_filter(req);
940 list_for_each_entry(b, &hdev->accept_list, list) {
941 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
945 memset(&f, 0, sizeof(f));
946 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
947 f.flt_type = HCI_FLT_CONN_SETUP;
948 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
949 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
951 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
952 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
956 if (scan && !scanning) {
957 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
958 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
959 } else if (!scan && scanning) {
960 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
961 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
965 static void cancel_adv_timeout(struct hci_dev *hdev)
967 if (hdev->adv_instance_timeout) {
968 hdev->adv_instance_timeout = 0;
969 cancel_delayed_work(&hdev->adv_instance_expire);
973 /* This function requires the caller holds hdev->lock */
974 void __hci_req_pause_adv_instances(struct hci_request *req)
976 bt_dev_dbg(req->hdev, "Pausing advertising instances");
978 /* Call to disable any advertisements active on the controller.
979 * This will succeed even if no advertisements are configured.
981 __hci_req_disable_advertising(req);
983 /* If we are using software rotation, pause the loop */
984 if (!ext_adv_capable(req->hdev))
985 cancel_adv_timeout(req->hdev);
988 /* This function requires the caller holds hdev->lock */
989 static void __hci_req_resume_adv_instances(struct hci_request *req)
991 struct adv_info *adv;
993 bt_dev_dbg(req->hdev, "Resuming advertising instances");
995 if (ext_adv_capable(req->hdev)) {
996 /* Call for each tracked instance to be re-enabled */
997 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
998 __hci_req_enable_ext_advertising(req,
1003 /* Schedule for most recent instance to be restarted and begin
1004 * the software rotation loop
1006 __hci_req_schedule_adv_instance(req,
1007 req->hdev->cur_adv_instance,
1012 /* This function requires the caller holds hdev->lock */
1013 int hci_req_resume_adv_instances(struct hci_dev *hdev)
1015 struct hci_request req;
1017 hci_req_init(&req, hdev);
1018 __hci_req_resume_adv_instances(&req);
1020 return hci_req_run(&req, NULL);
1023 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1025 bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1027 if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1028 test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1029 clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1030 clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1031 wake_up(&hdev->suspend_wait_q);
1034 if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1035 clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1036 wake_up(&hdev->suspend_wait_q);
1040 static void hci_req_prepare_adv_monitor_suspend(struct hci_request *req,
1043 struct hci_dev *hdev = req->hdev;
1045 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1046 case HCI_ADV_MONITOR_EXT_MSFT:
1056 /* No need to block when enabling since it's on resume path */
1057 if (hdev->suspended && suspending)
1058 set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1061 /* Call with hci_dev_lock */
1062 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1065 struct hci_conn *conn;
1066 struct hci_request req;
1068 int disconnect_counter;
1070 if (next == hdev->suspend_state) {
1071 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1075 hdev->suspend_state = next;
1076 hci_req_init(&req, hdev);
1078 if (next == BT_SUSPEND_DISCONNECT) {
1079 /* Mark device as suspended */
1080 hdev->suspended = true;
1082 /* Pause discovery if not already stopped */
1083 old_state = hdev->discovery.state;
1084 if (old_state != DISCOVERY_STOPPED) {
1085 set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1086 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1087 queue_work(hdev->req_workqueue, &hdev->discov_update);
1090 hdev->discovery_paused = true;
1091 hdev->discovery_old_state = old_state;
1093 /* Stop directed advertising */
1094 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1096 set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1097 cancel_delayed_work(&hdev->discov_off);
1098 queue_delayed_work(hdev->req_workqueue,
1099 &hdev->discov_off, 0);
1102 /* Pause other advertisements */
1103 if (hdev->adv_instance_cnt)
1104 __hci_req_pause_adv_instances(&req);
1106 hdev->advertising_paused = true;
1107 hdev->advertising_old_state = old_state;
1109 /* Disable page scan if enabled */
1110 if (test_bit(HCI_PSCAN, &hdev->flags)) {
1111 page_scan = SCAN_DISABLED;
1112 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
1114 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1117 /* Disable LE passive scan if enabled */
1118 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1119 cancel_interleave_scan(hdev);
1120 hci_req_add_le_scan_disable(&req, false);
1123 /* Disable advertisement filters */
1124 hci_req_prepare_adv_monitor_suspend(&req, true);
1126 /* Prevent disconnects from causing scanning to be re-enabled */
1127 hdev->scanning_paused = true;
1129 /* Run commands before disconnecting */
1130 hci_req_run(&req, suspend_req_complete);
1132 disconnect_counter = 0;
1133 /* Soft disconnect everything (power off) */
1134 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1135 hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1136 disconnect_counter++;
1139 if (disconnect_counter > 0) {
1141 "Had %d disconnects. Will wait on them",
1142 disconnect_counter);
1143 set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1145 } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1146 /* Unpause to take care of updating scanning params */
1147 hdev->scanning_paused = false;
1148 /* Enable event filter for paired devices */
1149 hci_req_set_event_filter(&req);
1150 /* Enable passive scan at lower duty cycle */
1151 __hci_update_background_scan(&req);
1152 /* Pause scan changes again. */
1153 hdev->scanning_paused = true;
1154 hci_req_run(&req, suspend_req_complete);
1156 hdev->suspended = false;
1157 hdev->scanning_paused = false;
1159 /* Clear any event filters and restore scan state */
1160 hci_req_clear_event_filter(&req);
1161 __hci_req_update_scan(&req);
1163 /* Reset passive/background scanning to normal */
1164 __hci_update_background_scan(&req);
1165 /* Enable all of the advertisement filters */
1166 hci_req_prepare_adv_monitor_suspend(&req, false);
1168 /* Unpause directed advertising */
1169 hdev->advertising_paused = false;
1170 if (hdev->advertising_old_state) {
1171 set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1172 hdev->suspend_tasks);
1173 hci_dev_set_flag(hdev, HCI_ADVERTISING);
1174 queue_work(hdev->req_workqueue,
1175 &hdev->discoverable_update);
1176 hdev->advertising_old_state = 0;
1179 /* Resume other advertisements */
1180 if (hdev->adv_instance_cnt)
1181 __hci_req_resume_adv_instances(&req);
1183 /* Unpause discovery */
1184 hdev->discovery_paused = false;
1185 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1186 hdev->discovery_old_state != DISCOVERY_STOPPING) {
1187 set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1188 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1189 queue_work(hdev->req_workqueue, &hdev->discov_update);
1192 hci_req_run(&req, suspend_req_complete);
1195 hdev->suspend_state = next;
1198 clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1199 wake_up(&hdev->suspend_wait_q);
1202 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
1204 return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
1207 void __hci_req_disable_advertising(struct hci_request *req)
1209 if (ext_adv_capable(req->hdev)) {
1210 __hci_req_disable_ext_adv_instance(req, 0x00);
1215 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1219 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1221 /* If privacy is not enabled don't use RPA */
1222 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1225 /* If basic privacy mode is enabled use RPA */
1226 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1229 /* If limited privacy mode is enabled don't use RPA if we're
1230 * both discoverable and bondable.
1232 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1233 hci_dev_test_flag(hdev, HCI_BONDABLE))
1236 /* We're neither bondable nor discoverable in the limited
1237 * privacy mode, therefore use RPA.
1242 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1244 /* If there is no connection we are OK to advertise. */
1245 if (hci_conn_num(hdev, LE_LINK) == 0)
1248 /* Check le_states if there is any connection in peripheral role. */
1249 if (hdev->conn_hash.le_num_peripheral > 0) {
1250 /* Peripheral connection state and non connectable mode bit 20.
1252 if (!connectable && !(hdev->le_states[2] & 0x10))
1255 /* Peripheral connection state and connectable mode bit 38
1256 * and scannable bit 21.
1258 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1259 !(hdev->le_states[2] & 0x20)))
1263 /* Check le_states if there is any connection in central role. */
1264 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
1265 /* Central connection state and non connectable mode bit 18. */
1266 if (!connectable && !(hdev->le_states[2] & 0x02))
1269 /* Central connection state and connectable mode bit 35 and
1272 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1273 !(hdev->le_states[2] & 0x08)))
1280 void __hci_req_enable_advertising(struct hci_request *req)
1282 struct hci_dev *hdev = req->hdev;
1283 struct adv_info *adv;
1284 struct hci_cp_le_set_adv_param cp;
1285 u8 own_addr_type, enable = 0x01;
1287 u16 adv_min_interval, adv_max_interval;
1290 flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
1291 adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1293 /* If the "connectable" instance flag was not set, then choose between
1294 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1296 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1297 mgmt_get_connectable(hdev);
1299 if (!is_advertising_allowed(hdev, connectable))
1302 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1303 __hci_req_disable_advertising(req);
1305 /* Clear the HCI_LE_ADV bit temporarily so that the
1306 * hci_update_random_address knows that it's safe to go ahead
1307 * and write a new random address. The flag will be set back on
1308 * as soon as the SET_ADV_ENABLE HCI command completes.
1310 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1312 /* Set require_privacy to true only when non-connectable
1313 * advertising is used. In that case it is fine to use a
1314 * non-resolvable private address.
1316 if (hci_update_random_address(req, !connectable,
1317 adv_use_rpa(hdev, flags),
1318 &own_addr_type) < 0)
1321 memset(&cp, 0, sizeof(cp));
1324 adv_min_interval = adv->min_interval;
1325 adv_max_interval = adv->max_interval;
1327 adv_min_interval = hdev->le_adv_min_interval;
1328 adv_max_interval = hdev->le_adv_max_interval;
1332 cp.type = LE_ADV_IND;
1334 if (adv_cur_instance_is_scannable(hdev))
1335 cp.type = LE_ADV_SCAN_IND;
1337 cp.type = LE_ADV_NONCONN_IND;
1339 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1340 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1341 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1342 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1346 cp.min_interval = cpu_to_le16(adv_min_interval);
1347 cp.max_interval = cpu_to_le16(adv_max_interval);
1348 cp.own_address_type = own_addr_type;
1349 cp.channel_map = hdev->le_adv_channel_map;
1351 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1353 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1356 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1358 struct hci_dev *hdev = req->hdev;
1361 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1364 if (ext_adv_capable(hdev)) {
1366 struct hci_cp_le_set_ext_scan_rsp_data cp;
1367 u8 data[HCI_MAX_EXT_AD_LENGTH];
1370 memset(&pdu, 0, sizeof(pdu));
1372 len = eir_create_scan_rsp(hdev, instance, pdu.data);
1374 if (hdev->scan_rsp_data_len == len &&
1375 !memcmp(pdu.data, hdev->scan_rsp_data, len))
1378 memcpy(hdev->scan_rsp_data, pdu.data, len);
1379 hdev->scan_rsp_data_len = len;
1381 pdu.cp.handle = instance;
1382 pdu.cp.length = len;
1383 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1384 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1386 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1387 sizeof(pdu.cp) + len, &pdu.cp);
1389 struct hci_cp_le_set_scan_rsp_data cp;
1391 memset(&cp, 0, sizeof(cp));
1393 len = eir_create_scan_rsp(hdev, instance, cp.data);
1395 if (hdev->scan_rsp_data_len == len &&
1396 !memcmp(cp.data, hdev->scan_rsp_data, len))
1399 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1400 hdev->scan_rsp_data_len = len;
1404 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1408 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1410 struct hci_dev *hdev = req->hdev;
1413 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1416 if (ext_adv_capable(hdev)) {
1418 struct hci_cp_le_set_ext_adv_data cp;
1419 u8 data[HCI_MAX_EXT_AD_LENGTH];
1422 memset(&pdu, 0, sizeof(pdu));
1424 len = eir_create_adv_data(hdev, instance, pdu.data);
1426 /* There's nothing to do if the data hasn't changed */
1427 if (hdev->adv_data_len == len &&
1428 memcmp(pdu.data, hdev->adv_data, len) == 0)
1431 memcpy(hdev->adv_data, pdu.data, len);
1432 hdev->adv_data_len = len;
1434 pdu.cp.length = len;
1435 pdu.cp.handle = instance;
1436 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1437 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1439 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1440 sizeof(pdu.cp) + len, &pdu.cp);
1442 struct hci_cp_le_set_adv_data cp;
1444 memset(&cp, 0, sizeof(cp));
1446 len = eir_create_adv_data(hdev, instance, cp.data);
1448 /* There's nothing to do if the data hasn't changed */
1449 if (hdev->adv_data_len == len &&
1450 memcmp(cp.data, hdev->adv_data, len) == 0)
1453 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1454 hdev->adv_data_len = len;
1458 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1462 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1464 struct hci_request req;
1466 hci_req_init(&req, hdev);
1467 __hci_req_update_adv_data(&req, instance);
1469 return hci_req_run(&req, NULL);
1472 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1475 BT_DBG("%s status %u", hdev->name, status);
1478 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1480 struct hci_request req;
1483 if (!use_ll_privacy(hdev) &&
1484 !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1487 hci_req_init(&req, hdev);
1489 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1491 hci_req_run(&req, enable_addr_resolution_complete);
1494 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1496 bt_dev_dbg(hdev, "status %u", status);
1499 void hci_req_reenable_advertising(struct hci_dev *hdev)
1501 struct hci_request req;
1503 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1504 list_empty(&hdev->adv_instances))
1507 hci_req_init(&req, hdev);
1509 if (hdev->cur_adv_instance) {
1510 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1513 if (ext_adv_capable(hdev)) {
1514 __hci_req_start_ext_adv(&req, 0x00);
1516 __hci_req_update_adv_data(&req, 0x00);
1517 __hci_req_update_scan_rsp_data(&req, 0x00);
1518 __hci_req_enable_advertising(&req);
1522 hci_req_run(&req, adv_enable_complete);
1525 static void adv_timeout_expire(struct work_struct *work)
1527 struct hci_dev *hdev = container_of(work, struct hci_dev,
1528 adv_instance_expire.work);
1530 struct hci_request req;
1533 bt_dev_dbg(hdev, "");
1537 hdev->adv_instance_timeout = 0;
1539 instance = hdev->cur_adv_instance;
1540 if (instance == 0x00)
1543 hci_req_init(&req, hdev);
1545 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1547 if (list_empty(&hdev->adv_instances))
1548 __hci_req_disable_advertising(&req);
1550 hci_req_run(&req, NULL);
1553 hci_dev_unlock(hdev);
1556 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1559 struct hci_dev *hdev = req->hdev;
1564 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1565 hci_req_add_le_scan_disable(req, false);
1566 hci_req_add_le_passive_scan(req);
1568 switch (hdev->interleave_scan_state) {
1569 case INTERLEAVE_SCAN_ALLOWLIST:
1570 bt_dev_dbg(hdev, "next state: allowlist");
1571 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1573 case INTERLEAVE_SCAN_NO_FILTER:
1574 bt_dev_dbg(hdev, "next state: no filter");
1575 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1577 case INTERLEAVE_SCAN_NONE:
1578 BT_ERR("unexpected error");
1582 hci_dev_unlock(hdev);
1587 static void interleave_scan_work(struct work_struct *work)
1589 struct hci_dev *hdev = container_of(work, struct hci_dev,
1590 interleave_scan.work);
1592 unsigned long timeout;
1594 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1595 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1596 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1597 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1599 bt_dev_err(hdev, "unexpected error");
1603 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1604 HCI_CMD_TIMEOUT, &status);
1606 /* Don't continue interleaving if it was canceled */
1607 if (is_interleave_scanning(hdev))
1608 queue_delayed_work(hdev->req_workqueue,
1609 &hdev->interleave_scan, timeout);
1612 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1613 bool use_rpa, struct adv_info *adv_instance,
1614 u8 *own_addr_type, bdaddr_t *rand_addr)
1618 bacpy(rand_addr, BDADDR_ANY);
1620 /* If privacy is enabled use a resolvable private address. If
1621 * current RPA has expired then generate a new one.
1624 /* If Controller supports LL Privacy use own address type is
1627 if (use_ll_privacy(hdev) &&
1628 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
1629 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1631 *own_addr_type = ADDR_LE_DEV_RANDOM;
1634 if (adv_rpa_valid(adv_instance))
1637 if (rpa_valid(hdev))
1641 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1643 bt_dev_err(hdev, "failed to generate new RPA");
1647 bacpy(rand_addr, &hdev->rpa);
1652 /* In case of required privacy without resolvable private address,
1653 * use an non-resolvable private address. This is useful for
1654 * non-connectable advertising.
1656 if (require_privacy) {
1660 /* The non-resolvable private address is generated
1661 * from random six bytes with the two most significant
1664 get_random_bytes(&nrpa, 6);
1667 /* The non-resolvable private address shall not be
1668 * equal to the public address.
1670 if (bacmp(&hdev->bdaddr, &nrpa))
1674 *own_addr_type = ADDR_LE_DEV_RANDOM;
1675 bacpy(rand_addr, &nrpa);
1680 /* No privacy so use a public address. */
1681 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1686 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1688 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1691 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1693 struct hci_dev *hdev = req->hdev;
1695 /* If we're advertising or initiating an LE connection we can't
1696 * go ahead and change the random address at this time. This is
1697 * because the eventual initiator address used for the
1698 * subsequently created connection will be undefined (some
1699 * controllers use the new address and others the one we had
1700 * when the operation started).
1702 * In this kind of scenario skip the update and let the random
1703 * address be updated at the next cycle.
1705 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1706 hci_lookup_le_connect(hdev)) {
1707 bt_dev_dbg(hdev, "Deferring random address update");
1708 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1712 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1715 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1717 struct hci_cp_le_set_ext_adv_params cp;
1718 struct hci_dev *hdev = req->hdev;
1721 bdaddr_t random_addr;
1724 struct adv_info *adv_instance;
1728 adv_instance = hci_find_adv_instance(hdev, instance);
1732 adv_instance = NULL;
1735 flags = hci_adv_instance_flags(hdev, instance);
1737 /* If the "connectable" instance flag was not set, then choose between
1738 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1740 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1741 mgmt_get_connectable(hdev);
1743 if (!is_advertising_allowed(hdev, connectable))
1746 /* Set require_privacy to true only when non-connectable
1747 * advertising is used. In that case it is fine to use a
1748 * non-resolvable private address.
1750 err = hci_get_random_address(hdev, !connectable,
1751 adv_use_rpa(hdev, flags), adv_instance,
1752 &own_addr_type, &random_addr);
1756 memset(&cp, 0, sizeof(cp));
1759 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
1760 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
1761 cp.tx_power = adv_instance->tx_power;
1763 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1764 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1765 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1768 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1772 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1774 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1775 } else if (hci_adv_instance_is_scannable(hdev, instance) ||
1776 (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
1778 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1780 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1783 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1785 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1788 cp.own_addr_type = own_addr_type;
1789 cp.channel_map = hdev->le_adv_channel_map;
1790 cp.handle = instance;
1792 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1793 cp.primary_phy = HCI_ADV_PHY_1M;
1794 cp.secondary_phy = HCI_ADV_PHY_2M;
1795 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1796 cp.primary_phy = HCI_ADV_PHY_CODED;
1797 cp.secondary_phy = HCI_ADV_PHY_CODED;
1799 /* In all other cases use 1M */
1800 cp.primary_phy = HCI_ADV_PHY_1M;
1801 cp.secondary_phy = HCI_ADV_PHY_1M;
1804 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1806 if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1807 bacmp(&random_addr, BDADDR_ANY)) {
1808 struct hci_cp_le_set_adv_set_rand_addr cp;
1810 /* Check if random address need to be updated */
1812 if (!bacmp(&random_addr, &adv_instance->random_addr))
1815 if (!bacmp(&random_addr, &hdev->random_addr))
1817 /* Instance 0x00 doesn't have an adv_info, instead it
1818 * uses hdev->random_addr to track its address so
1819 * whenever it needs to be updated this also set the
1820 * random address since hdev->random_addr is shared with
1821 * scan state machine.
1823 set_random_addr(req, &random_addr);
1826 memset(&cp, 0, sizeof(cp));
1828 cp.handle = instance;
1829 bacpy(&cp.bdaddr, &random_addr);
1832 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1839 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1841 struct hci_dev *hdev = req->hdev;
1842 struct hci_cp_le_set_ext_adv_enable *cp;
1843 struct hci_cp_ext_adv_set *adv_set;
1844 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1845 struct adv_info *adv_instance;
1848 adv_instance = hci_find_adv_instance(hdev, instance);
1852 adv_instance = NULL;
1856 adv_set = (void *) cp->data;
1858 memset(cp, 0, sizeof(*cp));
1861 cp->num_of_sets = 0x01;
1863 memset(adv_set, 0, sizeof(*adv_set));
1865 adv_set->handle = instance;
1867 /* Set duration per instance since controller is responsible for
1870 if (adv_instance && adv_instance->duration) {
1871 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
1873 /* Time = N * 10 ms */
1874 adv_set->duration = cpu_to_le16(duration / 10);
1877 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1878 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1884 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
1886 struct hci_dev *hdev = req->hdev;
1887 struct hci_cp_le_set_ext_adv_enable *cp;
1888 struct hci_cp_ext_adv_set *adv_set;
1889 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1892 /* If request specifies an instance that doesn't exist, fail */
1893 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1896 memset(data, 0, sizeof(data));
1899 adv_set = (void *)cp->data;
1901 /* Instance 0x00 indicates all advertising instances will be disabled */
1902 cp->num_of_sets = !!instance;
1905 adv_set->handle = instance;
1907 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
1908 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
1913 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
1915 struct hci_dev *hdev = req->hdev;
1917 /* If request specifies an instance that doesn't exist, fail */
1918 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1921 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
1926 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1928 struct hci_dev *hdev = req->hdev;
1929 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
1932 /* If instance isn't pending, the chip knows about it, and it's safe to
1935 if (adv_instance && !adv_instance->pending)
1936 __hci_req_disable_ext_adv_instance(req, instance);
1938 err = __hci_req_setup_ext_adv_instance(req, instance);
1942 __hci_req_update_scan_rsp_data(req, instance);
1943 __hci_req_enable_ext_advertising(req, instance);
1948 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1951 struct hci_dev *hdev = req->hdev;
1952 struct adv_info *adv_instance = NULL;
1955 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1956 list_empty(&hdev->adv_instances))
1959 if (hdev->adv_instance_timeout)
1962 adv_instance = hci_find_adv_instance(hdev, instance);
1966 /* A zero timeout means unlimited advertising. As long as there is
1967 * only one instance, duration should be ignored. We still set a timeout
1968 * in case further instances are being added later on.
1970 * If the remaining lifetime of the instance is more than the duration
1971 * then the timeout corresponds to the duration, otherwise it will be
1972 * reduced to the remaining instance lifetime.
1974 if (adv_instance->timeout == 0 ||
1975 adv_instance->duration <= adv_instance->remaining_time)
1976 timeout = adv_instance->duration;
1978 timeout = adv_instance->remaining_time;
1980 /* The remaining time is being reduced unless the instance is being
1981 * advertised without time limit.
1983 if (adv_instance->timeout)
1984 adv_instance->remaining_time =
1985 adv_instance->remaining_time - timeout;
1987 /* Only use work for scheduling instances with legacy advertising */
1988 if (!ext_adv_capable(hdev)) {
1989 hdev->adv_instance_timeout = timeout;
1990 queue_delayed_work(hdev->req_workqueue,
1991 &hdev->adv_instance_expire,
1992 msecs_to_jiffies(timeout * 1000));
1995 /* If we're just re-scheduling the same instance again then do not
1996 * execute any HCI commands. This happens when a single instance is
1999 if (!force && hdev->cur_adv_instance == instance &&
2000 hci_dev_test_flag(hdev, HCI_LE_ADV))
2003 hdev->cur_adv_instance = instance;
2004 if (ext_adv_capable(hdev)) {
2005 __hci_req_start_ext_adv(req, instance);
2007 __hci_req_update_adv_data(req, instance);
2008 __hci_req_update_scan_rsp_data(req, instance);
2009 __hci_req_enable_advertising(req);
2015 /* For a single instance:
2016 * - force == true: The instance will be removed even when its remaining
2017 * lifetime is not zero.
2018 * - force == false: the instance will be deactivated but kept stored unless
2019 * the remaining lifetime is zero.
2021 * For instance == 0x00:
2022 * - force == true: All instances will be removed regardless of their timeout
2024 * - force == false: Only instances that have a timeout will be removed.
2026 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2027 struct hci_request *req, u8 instance,
2030 struct adv_info *adv_instance, *n, *next_instance = NULL;
2034 /* Cancel any timeout concerning the removed instance(s). */
2035 if (!instance || hdev->cur_adv_instance == instance)
2036 cancel_adv_timeout(hdev);
2038 /* Get the next instance to advertise BEFORE we remove
2039 * the current one. This can be the same instance again
2040 * if there is only one instance.
2042 if (instance && hdev->cur_adv_instance == instance)
2043 next_instance = hci_get_next_instance(hdev, instance);
2045 if (instance == 0x00) {
2046 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2048 if (!(force || adv_instance->timeout))
2051 rem_inst = adv_instance->instance;
2052 err = hci_remove_adv_instance(hdev, rem_inst);
2054 mgmt_advertising_removed(sk, hdev, rem_inst);
2057 adv_instance = hci_find_adv_instance(hdev, instance);
2059 if (force || (adv_instance && adv_instance->timeout &&
2060 !adv_instance->remaining_time)) {
2061 /* Don't advertise a removed instance. */
2062 if (next_instance &&
2063 next_instance->instance == instance)
2064 next_instance = NULL;
2066 err = hci_remove_adv_instance(hdev, instance);
2068 mgmt_advertising_removed(sk, hdev, instance);
2072 if (!req || !hdev_is_powered(hdev) ||
2073 hci_dev_test_flag(hdev, HCI_ADVERTISING))
2076 if (next_instance && !ext_adv_capable(hdev))
2077 __hci_req_schedule_adv_instance(req, next_instance->instance,
2081 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2082 bool use_rpa, u8 *own_addr_type)
2084 struct hci_dev *hdev = req->hdev;
2087 /* If privacy is enabled use a resolvable private address. If
2088 * current RPA has expired or there is something else than
2089 * the current RPA in use, then generate a new one.
2092 /* If Controller supports LL Privacy use own address type is
2095 if (use_ll_privacy(hdev) &&
2096 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2097 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2099 *own_addr_type = ADDR_LE_DEV_RANDOM;
2101 if (rpa_valid(hdev))
2104 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2106 bt_dev_err(hdev, "failed to generate new RPA");
2110 set_random_addr(req, &hdev->rpa);
2115 /* In case of required privacy without resolvable private address,
2116 * use an non-resolvable private address. This is useful for active
2117 * scanning and non-connectable advertising.
2119 if (require_privacy) {
2123 /* The non-resolvable private address is generated
2124 * from random six bytes with the two most significant
2127 get_random_bytes(&nrpa, 6);
2130 /* The non-resolvable private address shall not be
2131 * equal to the public address.
2133 if (bacmp(&hdev->bdaddr, &nrpa))
2137 *own_addr_type = ADDR_LE_DEV_RANDOM;
2138 set_random_addr(req, &nrpa);
2142 /* If forcing static address is in use or there is no public
2143 * address use the static address as random address (but skip
2144 * the HCI command if the current random address is already the
2147 * In case BR/EDR has been disabled on a dual-mode controller
2148 * and a static address has been configured, then use that
2149 * address instead of the public BR/EDR address.
2151 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2152 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2153 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2154 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2155 *own_addr_type = ADDR_LE_DEV_RANDOM;
2156 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2157 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2158 &hdev->static_addr);
2162 /* Neither privacy nor static address is being used so use a
2165 *own_addr_type = ADDR_LE_DEV_PUBLIC;
2170 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
2172 struct bdaddr_list *b;
2174 list_for_each_entry(b, &hdev->accept_list, list) {
2175 struct hci_conn *conn;
2177 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2181 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2188 void __hci_req_update_scan(struct hci_request *req)
2190 struct hci_dev *hdev = req->hdev;
2193 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2196 if (!hdev_is_powered(hdev))
2199 if (mgmt_powering_down(hdev))
2202 if (hdev->scanning_paused)
2205 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2206 disconnected_accept_list_entries(hdev))
2209 scan = SCAN_DISABLED;
2211 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2212 scan |= SCAN_INQUIRY;
2214 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2215 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2218 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2221 static int update_scan(struct hci_request *req, unsigned long opt)
2223 hci_dev_lock(req->hdev);
2224 __hci_req_update_scan(req);
2225 hci_dev_unlock(req->hdev);
2229 static void scan_update_work(struct work_struct *work)
2231 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2233 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2236 static int connectable_update(struct hci_request *req, unsigned long opt)
2238 struct hci_dev *hdev = req->hdev;
2242 __hci_req_update_scan(req);
2244 /* If BR/EDR is not enabled and we disable advertising as a
2245 * by-product of disabling connectable, we need to update the
2246 * advertising flags.
2248 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2249 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2251 /* Update the advertising parameters if necessary */
2252 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2253 !list_empty(&hdev->adv_instances)) {
2254 if (ext_adv_capable(hdev))
2255 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2257 __hci_req_enable_advertising(req);
2260 __hci_update_background_scan(req);
2262 hci_dev_unlock(hdev);
2267 static void connectable_update_work(struct work_struct *work)
2269 struct hci_dev *hdev = container_of(work, struct hci_dev,
2270 connectable_update);
2273 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2274 mgmt_set_connectable_complete(hdev, status);
2277 static u8 get_service_classes(struct hci_dev *hdev)
2279 struct bt_uuid *uuid;
2282 list_for_each_entry(uuid, &hdev->uuids, list)
2283 val |= uuid->svc_hint;
2288 void __hci_req_update_class(struct hci_request *req)
2290 struct hci_dev *hdev = req->hdev;
2293 bt_dev_dbg(hdev, "");
2295 if (!hdev_is_powered(hdev))
2298 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2301 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2304 cod[0] = hdev->minor_class;
2305 cod[1] = hdev->major_class;
2306 cod[2] = get_service_classes(hdev);
2308 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2311 if (memcmp(cod, hdev->dev_class, 3) == 0)
2314 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2317 static void write_iac(struct hci_request *req)
2319 struct hci_dev *hdev = req->hdev;
2320 struct hci_cp_write_current_iac_lap cp;
2322 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2325 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2326 /* Limited discoverable mode */
2327 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2328 cp.iac_lap[0] = 0x00; /* LIAC */
2329 cp.iac_lap[1] = 0x8b;
2330 cp.iac_lap[2] = 0x9e;
2331 cp.iac_lap[3] = 0x33; /* GIAC */
2332 cp.iac_lap[4] = 0x8b;
2333 cp.iac_lap[5] = 0x9e;
2335 /* General discoverable mode */
2337 cp.iac_lap[0] = 0x33; /* GIAC */
2338 cp.iac_lap[1] = 0x8b;
2339 cp.iac_lap[2] = 0x9e;
2342 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2343 (cp.num_iac * 3) + 1, &cp);
2346 static int discoverable_update(struct hci_request *req, unsigned long opt)
2348 struct hci_dev *hdev = req->hdev;
2352 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2354 __hci_req_update_scan(req);
2355 __hci_req_update_class(req);
2358 /* Advertising instances don't use the global discoverable setting, so
2359 * only update AD if advertising was enabled using Set Advertising.
2361 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2362 __hci_req_update_adv_data(req, 0x00);
2364 /* Discoverable mode affects the local advertising
2365 * address in limited privacy mode.
2367 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2368 if (ext_adv_capable(hdev))
2369 __hci_req_start_ext_adv(req, 0x00);
2371 __hci_req_enable_advertising(req);
2375 hci_dev_unlock(hdev);
2380 static void discoverable_update_work(struct work_struct *work)
2382 struct hci_dev *hdev = container_of(work, struct hci_dev,
2383 discoverable_update);
2386 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2387 mgmt_set_discoverable_complete(hdev, status);
2390 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2393 switch (conn->state) {
2396 if (conn->type == AMP_LINK) {
2397 struct hci_cp_disconn_phy_link cp;
2399 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2401 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2404 struct hci_cp_disconnect dc;
2406 dc.handle = cpu_to_le16(conn->handle);
2408 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2411 conn->state = BT_DISCONN;
2415 if (conn->type == LE_LINK) {
2416 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2418 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2420 } else if (conn->type == ACL_LINK) {
2421 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2423 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2428 if (conn->type == ACL_LINK) {
2429 struct hci_cp_reject_conn_req rej;
2431 bacpy(&rej.bdaddr, &conn->dst);
2432 rej.reason = reason;
2434 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2436 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2437 struct hci_cp_reject_sync_conn_req rej;
2439 bacpy(&rej.bdaddr, &conn->dst);
2441 /* SCO rejection has its own limited set of
2442 * allowed error values (0x0D-0x0F) which isn't
2443 * compatible with most values passed to this
2444 * function. To be safe hard-code one of the
2445 * values that's suitable for SCO.
2447 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2449 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2454 conn->state = BT_CLOSED;
2459 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2462 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2465 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2467 struct hci_request req;
2470 hci_req_init(&req, conn->hdev);
2472 __hci_abort_conn(&req, conn, reason);
2474 err = hci_req_run(&req, abort_conn_complete);
2475 if (err && err != -ENODATA) {
2476 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2483 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2485 hci_dev_lock(req->hdev);
2486 __hci_update_background_scan(req);
2487 hci_dev_unlock(req->hdev);
2491 static void bg_scan_update(struct work_struct *work)
2493 struct hci_dev *hdev = container_of(work, struct hci_dev,
2495 struct hci_conn *conn;
2499 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2505 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2507 hci_le_conn_failed(conn, status);
2509 hci_dev_unlock(hdev);
2512 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2514 hci_req_add_le_scan_disable(req, false);
2518 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2521 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2522 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2523 struct hci_cp_inquiry cp;
2525 if (test_bit(HCI_INQUIRY, &req->hdev->flags))
2528 bt_dev_dbg(req->hdev, "");
2530 hci_dev_lock(req->hdev);
2531 hci_inquiry_cache_flush(req->hdev);
2532 hci_dev_unlock(req->hdev);
2534 memset(&cp, 0, sizeof(cp));
2536 if (req->hdev->discovery.limited)
2537 memcpy(&cp.lap, liac, sizeof(cp.lap));
2539 memcpy(&cp.lap, giac, sizeof(cp.lap));
2543 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2548 static void le_scan_disable_work(struct work_struct *work)
2550 struct hci_dev *hdev = container_of(work, struct hci_dev,
2551 le_scan_disable.work);
2554 bt_dev_dbg(hdev, "");
2556 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2559 cancel_delayed_work(&hdev->le_scan_restart);
2561 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2563 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2568 hdev->discovery.scan_start = 0;
2570 /* If we were running LE only scan, change discovery state. If
2571 * we were running both LE and BR/EDR inquiry simultaneously,
2572 * and BR/EDR inquiry is already finished, stop discovery,
2573 * otherwise BR/EDR inquiry will stop discovery when finished.
2574 * If we will resolve remote device name, do not change
2578 if (hdev->discovery.type == DISCOV_TYPE_LE)
2579 goto discov_stopped;
2581 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2584 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2585 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2586 hdev->discovery.state != DISCOVERY_RESOLVING)
2587 goto discov_stopped;
2592 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2593 HCI_CMD_TIMEOUT, &status);
2595 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2596 goto discov_stopped;
2603 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2604 hci_dev_unlock(hdev);
2607 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2609 struct hci_dev *hdev = req->hdev;
2611 /* If controller is not scanning we are done. */
2612 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2615 if (hdev->scanning_paused) {
2616 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2620 hci_req_add_le_scan_disable(req, false);
2622 if (use_ext_scan(hdev)) {
2623 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2625 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2626 ext_enable_cp.enable = LE_SCAN_ENABLE;
2627 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2629 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2630 sizeof(ext_enable_cp), &ext_enable_cp);
2632 struct hci_cp_le_set_scan_enable cp;
2634 memset(&cp, 0, sizeof(cp));
2635 cp.enable = LE_SCAN_ENABLE;
2636 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2637 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2643 static void le_scan_restart_work(struct work_struct *work)
2645 struct hci_dev *hdev = container_of(work, struct hci_dev,
2646 le_scan_restart.work);
2647 unsigned long timeout, duration, scan_start, now;
2650 bt_dev_dbg(hdev, "");
2652 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2654 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2661 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2662 !hdev->discovery.scan_start)
2665 /* When the scan was started, hdev->le_scan_disable has been queued
2666 * after duration from scan_start. During scan restart this job
2667 * has been canceled, and we need to queue it again after proper
2668 * timeout, to make sure that scan does not run indefinitely.
2670 duration = hdev->discovery.scan_duration;
2671 scan_start = hdev->discovery.scan_start;
2673 if (now - scan_start <= duration) {
2676 if (now >= scan_start)
2677 elapsed = now - scan_start;
2679 elapsed = ULONG_MAX - scan_start + now;
2681 timeout = duration - elapsed;
2686 queue_delayed_work(hdev->req_workqueue,
2687 &hdev->le_scan_disable, timeout);
2690 hci_dev_unlock(hdev);
2693 static int active_scan(struct hci_request *req, unsigned long opt)
2695 uint16_t interval = opt;
2696 struct hci_dev *hdev = req->hdev;
2698 /* Accept list is not used for discovery */
2699 u8 filter_policy = 0x00;
2700 /* Default is to enable duplicates filter */
2701 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2702 /* Discovery doesn't require controller address resolution */
2703 bool addr_resolv = false;
2706 bt_dev_dbg(hdev, "");
2708 /* If controller is scanning, it means the background scanning is
2709 * running. Thus, we should temporarily stop it in order to set the
2710 * discovery scanning parameters.
2712 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2713 hci_req_add_le_scan_disable(req, false);
2714 cancel_interleave_scan(hdev);
2717 /* All active scans will be done with either a resolvable private
2718 * address (when privacy feature has been enabled) or non-resolvable
2721 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2724 own_addr_type = ADDR_LE_DEV_PUBLIC;
2726 if (hci_is_adv_monitoring(hdev)) {
2727 /* Duplicate filter should be disabled when some advertisement
2728 * monitor is activated, otherwise AdvMon can only receive one
2729 * advertisement for one peer(*) during active scanning, and
2730 * might report loss to these peers.
2732 * Note that different controllers have different meanings of
2733 * |duplicate|. Some of them consider packets with the same
2734 * address as duplicate, and others consider packets with the
2735 * same address and the same RSSI as duplicate. Although in the
2736 * latter case we don't need to disable duplicate filter, but
2737 * it is common to have active scanning for a short period of
2738 * time, the power impact should be neglectable.
2740 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2743 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2744 hdev->le_scan_window_discovery, own_addr_type,
2745 filter_policy, filter_dup, addr_resolv);
2749 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2753 bt_dev_dbg(req->hdev, "");
2755 err = active_scan(req, opt);
2759 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2762 static void start_discovery(struct hci_dev *hdev, u8 *status)
2764 unsigned long timeout;
2766 bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
2768 switch (hdev->discovery.type) {
2769 case DISCOV_TYPE_BREDR:
2770 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2771 hci_req_sync(hdev, bredr_inquiry,
2772 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2775 case DISCOV_TYPE_INTERLEAVED:
2776 /* When running simultaneous discovery, the LE scanning time
2777 * should occupy the whole discovery time sine BR/EDR inquiry
2778 * and LE scanning are scheduled by the controller.
2780 * For interleaving discovery in comparison, BR/EDR inquiry
2781 * and LE scanning are done sequentially with separate
2784 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2786 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2787 /* During simultaneous discovery, we double LE scan
2788 * interval. We must leave some time for the controller
2789 * to do BR/EDR inquiry.
2791 hci_req_sync(hdev, interleaved_discov,
2792 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
2797 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2798 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2799 HCI_CMD_TIMEOUT, status);
2801 case DISCOV_TYPE_LE:
2802 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2803 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2804 HCI_CMD_TIMEOUT, status);
2807 *status = HCI_ERROR_UNSPECIFIED;
2814 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
2816 /* When service discovery is used and the controller has a
2817 * strict duplicate filter, it is important to remember the
2818 * start and duration of the scan. This is required for
2819 * restarting scanning during the discovery phase.
2821 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2822 hdev->discovery.result_filtering) {
2823 hdev->discovery.scan_start = jiffies;
2824 hdev->discovery.scan_duration = timeout;
2827 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2831 bool hci_req_stop_discovery(struct hci_request *req)
2833 struct hci_dev *hdev = req->hdev;
2834 struct discovery_state *d = &hdev->discovery;
2835 struct hci_cp_remote_name_req_cancel cp;
2836 struct inquiry_entry *e;
2839 bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
2841 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2842 if (test_bit(HCI_INQUIRY, &hdev->flags))
2843 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2845 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2846 cancel_delayed_work(&hdev->le_scan_disable);
2847 cancel_delayed_work(&hdev->le_scan_restart);
2848 hci_req_add_le_scan_disable(req, false);
2853 /* Passive scanning */
2854 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2855 hci_req_add_le_scan_disable(req, false);
2860 /* No further actions needed for LE-only discovery */
2861 if (d->type == DISCOV_TYPE_LE)
2864 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2865 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2870 bacpy(&cp.bdaddr, &e->data.bdaddr);
2871 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2879 static void config_data_path_complete(struct hci_dev *hdev, u8 status,
2882 bt_dev_dbg(hdev, "status %u", status);
2885 int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec)
2887 struct hci_request req;
2889 __u8 vnd_len, *vnd_data = NULL;
2890 struct hci_op_configure_data_path *cmd = NULL;
2892 hci_req_init(&req, hdev);
2894 err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
2899 cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
2905 err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
2909 cmd->vnd_len = vnd_len;
2910 memcpy(cmd->vnd_data, vnd_data, vnd_len);
2912 cmd->direction = 0x00;
2913 hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2915 cmd->direction = 0x01;
2916 hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2918 err = hci_req_run(&req, config_data_path_complete);
2926 static int stop_discovery(struct hci_request *req, unsigned long opt)
2928 hci_dev_lock(req->hdev);
2929 hci_req_stop_discovery(req);
2930 hci_dev_unlock(req->hdev);
2935 static void discov_update(struct work_struct *work)
2937 struct hci_dev *hdev = container_of(work, struct hci_dev,
2941 switch (hdev->discovery.state) {
2942 case DISCOVERY_STARTING:
2943 start_discovery(hdev, &status);
2944 mgmt_start_discovery_complete(hdev, status);
2946 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2948 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2950 case DISCOVERY_STOPPING:
2951 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2952 mgmt_stop_discovery_complete(hdev, status);
2954 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2956 case DISCOVERY_STOPPED:
2962 static void discov_off(struct work_struct *work)
2964 struct hci_dev *hdev = container_of(work, struct hci_dev,
2967 bt_dev_dbg(hdev, "");
2971 /* When discoverable timeout triggers, then just make sure
2972 * the limited discoverable flag is cleared. Even in the case
2973 * of a timeout triggered from general discoverable, it is
2974 * safe to unconditionally clear the flag.
2976 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2977 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2978 hdev->discov_timeout = 0;
2980 hci_dev_unlock(hdev);
2982 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2983 mgmt_new_settings(hdev);
2986 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2988 struct hci_dev *hdev = req->hdev;
2993 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2994 !lmp_host_ssp_capable(hdev)) {
2997 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2999 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3002 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3003 sizeof(support), &support);
3007 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3008 lmp_bredr_capable(hdev)) {
3009 struct hci_cp_write_le_host_supported cp;
3014 /* Check first if we already have the right
3015 * host state (host features set)
3017 if (cp.le != lmp_host_le_capable(hdev) ||
3018 cp.simul != lmp_host_le_br_capable(hdev))
3019 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3023 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3024 /* Make sure the controller has a good default for
3025 * advertising data. This also applies to the case
3026 * where BR/EDR was toggled during the AUTO_OFF phase.
3028 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3029 list_empty(&hdev->adv_instances)) {
3032 if (ext_adv_capable(hdev)) {
3033 err = __hci_req_setup_ext_adv_instance(req,
3036 __hci_req_update_scan_rsp_data(req,
3040 __hci_req_update_adv_data(req, 0x00);
3041 __hci_req_update_scan_rsp_data(req, 0x00);
3044 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3045 if (!ext_adv_capable(hdev))
3046 __hci_req_enable_advertising(req);
3048 __hci_req_enable_ext_advertising(req,
3051 } else if (!list_empty(&hdev->adv_instances)) {
3052 struct adv_info *adv_instance;
3054 adv_instance = list_first_entry(&hdev->adv_instances,
3055 struct adv_info, list);
3056 __hci_req_schedule_adv_instance(req,
3057 adv_instance->instance,
3062 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3063 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3064 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3065 sizeof(link_sec), &link_sec);
3067 if (lmp_bredr_capable(hdev)) {
3068 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3069 __hci_req_write_fast_connectable(req, true);
3071 __hci_req_write_fast_connectable(req, false);
3072 __hci_req_update_scan(req);
3073 __hci_req_update_class(req);
3074 __hci_req_update_name(req);
3075 __hci_req_update_eir(req);
3078 hci_dev_unlock(hdev);
3082 int __hci_req_hci_power_on(struct hci_dev *hdev)
3084 /* Register the available SMP channels (BR/EDR and LE) only when
3085 * successfully powering on the controller. This late
3086 * registration is required so that LE SMP can clearly decide if
3087 * the public address or static address is used.
3091 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3095 void hci_request_setup(struct hci_dev *hdev)
3097 INIT_WORK(&hdev->discov_update, discov_update);
3098 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3099 INIT_WORK(&hdev->scan_update, scan_update_work);
3100 INIT_WORK(&hdev->connectable_update, connectable_update_work);
3101 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3102 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3103 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3104 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3105 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3106 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3109 void hci_request_cancel_all(struct hci_dev *hdev)
3111 hci_req_sync_cancel(hdev, ENODEV);
3113 cancel_work_sync(&hdev->discov_update);
3114 cancel_work_sync(&hdev->bg_scan_update);
3115 cancel_work_sync(&hdev->scan_update);
3116 cancel_work_sync(&hdev->connectable_update);
3117 cancel_work_sync(&hdev->discoverable_update);
3118 cancel_delayed_work_sync(&hdev->discov_off);
3119 cancel_delayed_work_sync(&hdev->le_scan_disable);
3120 cancel_delayed_work_sync(&hdev->le_scan_restart);
3122 if (hdev->adv_instance_timeout) {
3123 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3124 hdev->adv_instance_timeout = 0;
3127 cancel_interleave_scan(hdev);