2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2014 Intel Corporation
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
24 #include <linux/sched/signal.h>
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
31 #include "hci_request.h"
35 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
37 skb_queue_head_init(&req->cmd_q);
42 void hci_req_purge(struct hci_request *req)
44 skb_queue_purge(&req->cmd_q);
47 bool hci_req_status_pend(struct hci_dev *hdev)
49 return hdev->req_status == HCI_REQ_PEND;
52 static int req_run(struct hci_request *req, hci_req_complete_t complete,
53 hci_req_complete_skb_t complete_skb)
55 struct hci_dev *hdev = req->hdev;
59 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
61 /* If an error occurred during request building, remove all HCI
62 * commands queued on the HCI request queue.
65 skb_queue_purge(&req->cmd_q);
69 /* Do not allow empty requests */
70 if (skb_queue_empty(&req->cmd_q))
73 skb = skb_peek_tail(&req->cmd_q);
75 bt_cb(skb)->hci.req_complete = complete;
76 } else if (complete_skb) {
77 bt_cb(skb)->hci.req_complete_skb = complete_skb;
78 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
81 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
82 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
83 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
90 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
92 return req_run(req, complete, NULL);
95 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
97 return req_run(req, NULL, complete);
100 void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
103 bt_dev_dbg(hdev, "result 0x%2.2x", result);
105 if (hdev->req_status == HCI_REQ_PEND) {
106 hdev->req_result = result;
107 hdev->req_status = HCI_REQ_DONE;
109 hdev->req_skb = skb_get(skb);
110 wake_up_interruptible(&hdev->req_wait_q);
114 /* Execute request and wait for completion. */
115 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
117 unsigned long opt, u32 timeout, u8 *hci_status)
119 struct hci_request req;
122 bt_dev_dbg(hdev, "start");
124 hci_req_init(&req, hdev);
126 hdev->req_status = HCI_REQ_PEND;
128 err = func(&req, opt);
131 *hci_status = HCI_ERROR_UNSPECIFIED;
135 err = hci_req_run_skb(&req, hci_req_sync_complete);
137 hdev->req_status = 0;
139 /* ENODATA means the HCI request command queue is empty.
140 * This can happen when a request with conditionals doesn't
141 * trigger any commands to be sent. This is normal behavior
142 * and should not trigger an error return.
144 if (err == -ENODATA) {
151 *hci_status = HCI_ERROR_UNSPECIFIED;
156 err = wait_event_interruptible_timeout(hdev->req_wait_q,
157 hdev->req_status != HCI_REQ_PEND, timeout);
159 if (err == -ERESTARTSYS)
162 switch (hdev->req_status) {
164 err = -bt_to_errno(hdev->req_result);
166 *hci_status = hdev->req_result;
169 case HCI_REQ_CANCELED:
170 err = -hdev->req_result;
172 *hci_status = HCI_ERROR_UNSPECIFIED;
178 *hci_status = HCI_ERROR_UNSPECIFIED;
182 kfree_skb(hdev->req_skb);
183 hdev->req_skb = NULL;
184 hdev->req_status = hdev->req_result = 0;
186 bt_dev_dbg(hdev, "end: err %d", err);
191 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
193 unsigned long opt, u32 timeout, u8 *hci_status)
197 /* Serialize all requests */
198 hci_req_sync_lock(hdev);
199 /* check the state after obtaing the lock to protect the HCI_UP
200 * against any races from hci_dev_do_close when the controller
203 if (test_bit(HCI_UP, &hdev->flags))
204 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
207 hci_req_sync_unlock(hdev);
212 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
215 int len = HCI_COMMAND_HDR_SIZE + plen;
216 struct hci_command_hdr *hdr;
219 skb = bt_skb_alloc(len, GFP_ATOMIC);
223 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
224 hdr->opcode = cpu_to_le16(opcode);
228 skb_put_data(skb, param, plen);
230 bt_dev_dbg(hdev, "skb len %d", skb->len);
232 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
233 hci_skb_opcode(skb) = opcode;
238 /* Queue a command to an asynchronous HCI request */
239 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
240 const void *param, u8 event)
242 struct hci_dev *hdev = req->hdev;
245 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
247 /* If an error occurred during request building, there is no point in
248 * queueing the HCI command. We can simply return.
253 skb = hci_prepare_cmd(hdev, opcode, plen, param);
255 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
261 if (skb_queue_empty(&req->cmd_q))
262 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
264 bt_cb(skb)->hci.req_event = event;
266 skb_queue_tail(&req->cmd_q, skb);
269 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
272 hci_req_add_ev(req, opcode, plen, param, 0);
275 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
277 struct hci_dev *hdev = req->hdev;
278 struct hci_cp_write_page_scan_activity acp;
281 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
284 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
288 type = PAGE_SCAN_TYPE_INTERLACED;
290 /* 160 msec page scan interval */
291 acp.interval = cpu_to_le16(0x0100);
293 type = hdev->def_page_scan_type;
294 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
297 acp.window = cpu_to_le16(hdev->def_page_scan_window);
299 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
300 __cpu_to_le16(hdev->page_scan_window) != acp.window)
301 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
304 if (hdev->page_scan_type != type)
305 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
308 static void start_interleave_scan(struct hci_dev *hdev)
310 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
311 queue_delayed_work(hdev->req_workqueue,
312 &hdev->interleave_scan, 0);
315 static bool is_interleave_scanning(struct hci_dev *hdev)
317 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
320 static void cancel_interleave_scan(struct hci_dev *hdev)
322 bt_dev_dbg(hdev, "cancelling interleave scan");
324 cancel_delayed_work_sync(&hdev->interleave_scan);
326 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
329 /* Return true if interleave_scan wasn't started until exiting this function,
330 * otherwise, return false
332 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
334 /* Do interleaved scan only if all of the following are true:
335 * - There is at least one ADV monitor
336 * - At least one pending LE connection or one device to be scanned for
337 * - Monitor offloading is not supported
338 * If so, we should alternate between allowlist scan and one without
339 * any filters to save power.
341 bool use_interleaving = hci_is_adv_monitoring(hdev) &&
342 !(list_empty(&hdev->pend_le_conns) &&
343 list_empty(&hdev->pend_le_reports)) &&
344 hci_get_adv_monitor_offload_ext(hdev) ==
345 HCI_ADV_MONITOR_EXT_NONE;
346 bool is_interleaving = is_interleave_scanning(hdev);
348 if (use_interleaving && !is_interleaving) {
349 start_interleave_scan(hdev);
350 bt_dev_dbg(hdev, "starting interleave scan");
354 if (!use_interleaving && is_interleaving)
355 cancel_interleave_scan(hdev);
360 void __hci_req_update_name(struct hci_request *req)
362 struct hci_dev *hdev = req->hdev;
363 struct hci_cp_write_local_name cp;
365 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
367 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
370 void __hci_req_update_eir(struct hci_request *req)
372 struct hci_dev *hdev = req->hdev;
373 struct hci_cp_write_eir cp;
375 if (!hdev_is_powered(hdev))
378 if (!lmp_ext_inq_capable(hdev))
381 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
384 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
387 memset(&cp, 0, sizeof(cp));
389 eir_create(hdev, cp.data);
391 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
394 memcpy(hdev->eir, cp.data, sizeof(cp.data));
396 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
399 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
401 struct hci_dev *hdev = req->hdev;
403 if (hdev->scanning_paused) {
404 bt_dev_dbg(hdev, "Scanning is paused for suspend");
408 if (use_ext_scan(hdev)) {
409 struct hci_cp_le_set_ext_scan_enable cp;
411 memset(&cp, 0, sizeof(cp));
412 cp.enable = LE_SCAN_DISABLE;
413 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
416 struct hci_cp_le_set_scan_enable cp;
418 memset(&cp, 0, sizeof(cp));
419 cp.enable = LE_SCAN_DISABLE;
420 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
423 /* Disable address resolution */
424 if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
427 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
431 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
434 struct hci_cp_le_del_from_accept_list cp;
436 cp.bdaddr_type = bdaddr_type;
437 bacpy(&cp.bdaddr, bdaddr);
439 bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
441 hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
443 if (use_ll_privacy(req->hdev)) {
446 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
448 struct hci_cp_le_del_from_resolv_list cp;
450 cp.bdaddr_type = bdaddr_type;
451 bacpy(&cp.bdaddr, bdaddr);
453 hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
459 /* Adds connection to accept list if needed. On error, returns -1. */
460 static int add_to_accept_list(struct hci_request *req,
461 struct hci_conn_params *params, u8 *num_entries,
464 struct hci_cp_le_add_to_accept_list cp;
465 struct hci_dev *hdev = req->hdev;
467 /* Already in accept list */
468 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr,
472 /* Select filter policy to accept all advertising */
473 if (*num_entries >= hdev->le_accept_list_size)
476 /* Accept list can not be used with RPAs */
478 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
479 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
483 /* During suspend, only wakeable devices can be in accept list */
484 if (hdev->suspended &&
485 !test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, params->flags))
489 cp.bdaddr_type = params->addr_type;
490 bacpy(&cp.bdaddr, ¶ms->addr);
492 bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
494 hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
496 if (use_ll_privacy(hdev)) {
499 irk = hci_find_irk_by_addr(hdev, ¶ms->addr,
502 struct hci_cp_le_add_to_resolv_list cp;
504 cp.bdaddr_type = params->addr_type;
505 bacpy(&cp.bdaddr, ¶ms->addr);
506 memcpy(cp.peer_irk, irk->val, 16);
508 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
509 memcpy(cp.local_irk, hdev->irk, 16);
511 memset(cp.local_irk, 0, 16);
513 hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
521 static u8 update_accept_list(struct hci_request *req)
523 struct hci_dev *hdev = req->hdev;
524 struct hci_conn_params *params;
525 struct bdaddr_list *b;
527 bool pend_conn, pend_report;
528 /* We allow usage of accept list even with RPAs in suspend. In the worst
529 * case, we won't be able to wake from devices that use the privacy1.2
530 * features. Additionally, once we support privacy1.2 and IRK
531 * offloading, we can update this to also check for those conditions.
533 bool allow_rpa = hdev->suspended;
535 if (use_ll_privacy(hdev))
538 /* Go through the current accept list programmed into the
539 * controller one by one and check if that address is still
540 * in the list of pending connections or list of devices to
541 * report. If not present in either list, then queue the
542 * command to remove it from the controller.
544 list_for_each_entry(b, &hdev->le_accept_list, list) {
545 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
548 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
552 /* If the device is not likely to connect or report,
553 * remove it from the accept list.
555 if (!pend_conn && !pend_report) {
556 del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
560 /* Accept list can not be used with RPAs */
562 !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
563 hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
570 /* Since all no longer valid accept list entries have been
571 * removed, walk through the list of pending connections
572 * and ensure that any new device gets programmed into
575 * If the list of the devices is larger than the list of
576 * available accept list entries in the controller, then
577 * just abort and return filer policy value to not use the
580 list_for_each_entry(params, &hdev->pend_le_conns, action) {
581 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
585 /* After adding all new pending connections, walk through
586 * the list of pending reports and also add these to the
587 * accept list if there is still space. Abort if space runs out.
589 list_for_each_entry(params, &hdev->pend_le_reports, action) {
590 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
594 /* Use the allowlist unless the following conditions are all true:
595 * - We are not currently suspending
596 * - There are 1 or more ADV monitors registered and it's not offloaded
597 * - Interleaved scanning is not currently using the allowlist
599 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
600 hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
601 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
604 /* Select filter policy to use accept list */
608 static bool scan_use_rpa(struct hci_dev *hdev)
610 return hci_dev_test_flag(hdev, HCI_PRIVACY);
613 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
614 u16 window, u8 own_addr_type, u8 filter_policy,
615 bool filter_dup, bool addr_resolv)
617 struct hci_dev *hdev = req->hdev;
619 if (hdev->scanning_paused) {
620 bt_dev_dbg(hdev, "Scanning is paused for suspend");
624 if (use_ll_privacy(hdev) && addr_resolv) {
627 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
630 /* Use ext scanning if set ext scan param and ext scan enable is
633 if (use_ext_scan(hdev)) {
634 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
635 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
636 struct hci_cp_le_scan_phy_params *phy_params;
637 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
640 ext_param_cp = (void *)data;
641 phy_params = (void *)ext_param_cp->data;
643 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
644 ext_param_cp->own_addr_type = own_addr_type;
645 ext_param_cp->filter_policy = filter_policy;
647 plen = sizeof(*ext_param_cp);
649 if (scan_1m(hdev) || scan_2m(hdev)) {
650 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
652 memset(phy_params, 0, sizeof(*phy_params));
653 phy_params->type = type;
654 phy_params->interval = cpu_to_le16(interval);
655 phy_params->window = cpu_to_le16(window);
657 plen += sizeof(*phy_params);
661 if (scan_coded(hdev)) {
662 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
664 memset(phy_params, 0, sizeof(*phy_params));
665 phy_params->type = type;
666 phy_params->interval = cpu_to_le16(interval);
667 phy_params->window = cpu_to_le16(window);
669 plen += sizeof(*phy_params);
673 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
676 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
677 ext_enable_cp.enable = LE_SCAN_ENABLE;
678 ext_enable_cp.filter_dup = filter_dup;
680 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
681 sizeof(ext_enable_cp), &ext_enable_cp);
683 struct hci_cp_le_set_scan_param param_cp;
684 struct hci_cp_le_set_scan_enable enable_cp;
686 memset(¶m_cp, 0, sizeof(param_cp));
687 param_cp.type = type;
688 param_cp.interval = cpu_to_le16(interval);
689 param_cp.window = cpu_to_le16(window);
690 param_cp.own_address_type = own_addr_type;
691 param_cp.filter_policy = filter_policy;
692 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
695 memset(&enable_cp, 0, sizeof(enable_cp));
696 enable_cp.enable = LE_SCAN_ENABLE;
697 enable_cp.filter_dup = filter_dup;
698 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
703 /* Returns true if an le connection is in the scanning state */
704 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
706 struct hci_conn_hash *h = &hdev->conn_hash;
711 list_for_each_entry_rcu(c, &h->list, list) {
712 if (c->type == LE_LINK && c->state == BT_CONNECT &&
713 test_bit(HCI_CONN_SCANNING, &c->flags)) {
724 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
725 * controller based address resolution to be able to reconfigure
728 void hci_req_add_le_passive_scan(struct hci_request *req)
730 struct hci_dev *hdev = req->hdev;
733 u16 window, interval;
734 /* Default is to enable duplicates filter */
735 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
736 /* Background scanning should run with address resolution */
737 bool addr_resolv = true;
739 if (hdev->scanning_paused) {
740 bt_dev_dbg(hdev, "Scanning is paused for suspend");
744 /* Set require_privacy to false since no SCAN_REQ are send
745 * during passive scanning. Not using an non-resolvable address
746 * here is important so that peer devices using direct
747 * advertising with our address will be correctly reported
750 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
754 if (hdev->enable_advmon_interleave_scan &&
755 __hci_update_interleaved_scan(hdev))
758 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
759 /* Adding or removing entries from the accept list must
760 * happen before enabling scanning. The controller does
761 * not allow accept list modification while scanning.
763 filter_policy = update_accept_list(req);
765 /* When the controller is using random resolvable addresses and
766 * with that having LE privacy enabled, then controllers with
767 * Extended Scanner Filter Policies support can now enable support
768 * for handling directed advertising.
770 * So instead of using filter polices 0x00 (no accept list)
771 * and 0x01 (accept list enabled) use the new filter policies
772 * 0x02 (no accept list) and 0x03 (accept list enabled).
774 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
775 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
776 filter_policy |= 0x02;
778 if (hdev->suspended) {
779 window = hdev->le_scan_window_suspend;
780 interval = hdev->le_scan_int_suspend;
781 } else if (hci_is_le_conn_scanning(hdev)) {
782 window = hdev->le_scan_window_connect;
783 interval = hdev->le_scan_int_connect;
784 } else if (hci_is_adv_monitoring(hdev)) {
785 window = hdev->le_scan_window_adv_monitor;
786 interval = hdev->le_scan_int_adv_monitor;
788 /* Disable duplicates filter when scanning for advertisement
789 * monitor for the following reasons.
791 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
792 * controllers ignore RSSI_Sampling_Period when the duplicates
795 * For SW pattern filtering, when we're not doing interleaved
796 * scanning, it is necessary to disable duplicates filter,
797 * otherwise hosts can only receive one advertisement and it's
798 * impossible to know if a peer is still in range.
800 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
802 window = hdev->le_scan_window;
803 interval = hdev->le_scan_interval;
806 bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
808 hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
809 own_addr_type, filter_policy, filter_dup,
813 static void cancel_adv_timeout(struct hci_dev *hdev)
815 if (hdev->adv_instance_timeout) {
816 hdev->adv_instance_timeout = 0;
817 cancel_delayed_work(&hdev->adv_instance_expire);
821 /* This function requires the caller holds hdev->lock */
822 void __hci_req_pause_adv_instances(struct hci_request *req)
824 bt_dev_dbg(req->hdev, "Pausing advertising instances");
826 /* Call to disable any advertisements active on the controller.
827 * This will succeed even if no advertisements are configured.
829 __hci_req_disable_advertising(req);
831 /* If we are using software rotation, pause the loop */
832 if (!ext_adv_capable(req->hdev))
833 cancel_adv_timeout(req->hdev);
836 /* This function requires the caller holds hdev->lock */
837 static void __hci_req_resume_adv_instances(struct hci_request *req)
839 struct adv_info *adv;
841 bt_dev_dbg(req->hdev, "Resuming advertising instances");
843 if (ext_adv_capable(req->hdev)) {
844 /* Call for each tracked instance to be re-enabled */
845 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
846 __hci_req_enable_ext_advertising(req,
851 /* Schedule for most recent instance to be restarted and begin
852 * the software rotation loop
854 __hci_req_schedule_adv_instance(req,
855 req->hdev->cur_adv_instance,
860 /* This function requires the caller holds hdev->lock */
861 int hci_req_resume_adv_instances(struct hci_dev *hdev)
863 struct hci_request req;
865 hci_req_init(&req, hdev);
866 __hci_req_resume_adv_instances(&req);
868 return hci_req_run(&req, NULL);
871 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
873 return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
876 void __hci_req_disable_advertising(struct hci_request *req)
878 if (ext_adv_capable(req->hdev)) {
879 __hci_req_disable_ext_adv_instance(req, 0x00);
884 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
888 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
890 /* If privacy is not enabled don't use RPA */
891 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
894 /* If basic privacy mode is enabled use RPA */
895 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
898 /* If limited privacy mode is enabled don't use RPA if we're
899 * both discoverable and bondable.
901 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
902 hci_dev_test_flag(hdev, HCI_BONDABLE))
905 /* We're neither bondable nor discoverable in the limited
906 * privacy mode, therefore use RPA.
911 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
913 /* If there is no connection we are OK to advertise. */
914 if (hci_conn_num(hdev, LE_LINK) == 0)
917 /* Check le_states if there is any connection in peripheral role. */
918 if (hdev->conn_hash.le_num_peripheral > 0) {
919 /* Peripheral connection state and non connectable mode bit 20.
921 if (!connectable && !(hdev->le_states[2] & 0x10))
924 /* Peripheral connection state and connectable mode bit 38
925 * and scannable bit 21.
927 if (connectable && (!(hdev->le_states[4] & 0x40) ||
928 !(hdev->le_states[2] & 0x20)))
932 /* Check le_states if there is any connection in central role. */
933 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
934 /* Central connection state and non connectable mode bit 18. */
935 if (!connectable && !(hdev->le_states[2] & 0x02))
938 /* Central connection state and connectable mode bit 35 and
941 if (connectable && (!(hdev->le_states[4] & 0x08) ||
942 !(hdev->le_states[2] & 0x08)))
949 void __hci_req_enable_advertising(struct hci_request *req)
951 struct hci_dev *hdev = req->hdev;
952 struct adv_info *adv;
953 struct hci_cp_le_set_adv_param cp;
954 u8 own_addr_type, enable = 0x01;
956 u16 adv_min_interval, adv_max_interval;
959 flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
960 adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
962 /* If the "connectable" instance flag was not set, then choose between
963 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
965 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
966 mgmt_get_connectable(hdev);
968 if (!is_advertising_allowed(hdev, connectable))
971 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
972 __hci_req_disable_advertising(req);
974 /* Clear the HCI_LE_ADV bit temporarily so that the
975 * hci_update_random_address knows that it's safe to go ahead
976 * and write a new random address. The flag will be set back on
977 * as soon as the SET_ADV_ENABLE HCI command completes.
979 hci_dev_clear_flag(hdev, HCI_LE_ADV);
981 /* Set require_privacy to true only when non-connectable
982 * advertising is used. In that case it is fine to use a
983 * non-resolvable private address.
985 if (hci_update_random_address(req, !connectable,
986 adv_use_rpa(hdev, flags),
990 memset(&cp, 0, sizeof(cp));
993 adv_min_interval = adv->min_interval;
994 adv_max_interval = adv->max_interval;
996 adv_min_interval = hdev->le_adv_min_interval;
997 adv_max_interval = hdev->le_adv_max_interval;
1001 cp.type = LE_ADV_IND;
1003 if (adv_cur_instance_is_scannable(hdev))
1004 cp.type = LE_ADV_SCAN_IND;
1006 cp.type = LE_ADV_NONCONN_IND;
1008 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1009 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1010 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1011 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1015 cp.min_interval = cpu_to_le16(adv_min_interval);
1016 cp.max_interval = cpu_to_le16(adv_max_interval);
1017 cp.own_address_type = own_addr_type;
1018 cp.channel_map = hdev->le_adv_channel_map;
1020 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1022 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1025 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1027 struct hci_dev *hdev = req->hdev;
1030 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1033 if (ext_adv_capable(hdev)) {
1035 struct hci_cp_le_set_ext_scan_rsp_data cp;
1036 u8 data[HCI_MAX_EXT_AD_LENGTH];
1039 memset(&pdu, 0, sizeof(pdu));
1041 len = eir_create_scan_rsp(hdev, instance, pdu.data);
1043 if (hdev->scan_rsp_data_len == len &&
1044 !memcmp(pdu.data, hdev->scan_rsp_data, len))
1047 memcpy(hdev->scan_rsp_data, pdu.data, len);
1048 hdev->scan_rsp_data_len = len;
1050 pdu.cp.handle = instance;
1051 pdu.cp.length = len;
1052 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1053 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1055 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1056 sizeof(pdu.cp) + len, &pdu.cp);
1058 struct hci_cp_le_set_scan_rsp_data cp;
1060 memset(&cp, 0, sizeof(cp));
1062 len = eir_create_scan_rsp(hdev, instance, cp.data);
1064 if (hdev->scan_rsp_data_len == len &&
1065 !memcmp(cp.data, hdev->scan_rsp_data, len))
1068 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1069 hdev->scan_rsp_data_len = len;
1073 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1077 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1079 struct hci_dev *hdev = req->hdev;
1082 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1085 if (ext_adv_capable(hdev)) {
1087 struct hci_cp_le_set_ext_adv_data cp;
1088 u8 data[HCI_MAX_EXT_AD_LENGTH];
1091 memset(&pdu, 0, sizeof(pdu));
1093 len = eir_create_adv_data(hdev, instance, pdu.data);
1095 /* There's nothing to do if the data hasn't changed */
1096 if (hdev->adv_data_len == len &&
1097 memcmp(pdu.data, hdev->adv_data, len) == 0)
1100 memcpy(hdev->adv_data, pdu.data, len);
1101 hdev->adv_data_len = len;
1103 pdu.cp.length = len;
1104 pdu.cp.handle = instance;
1105 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1106 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1108 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1109 sizeof(pdu.cp) + len, &pdu.cp);
1111 struct hci_cp_le_set_adv_data cp;
1113 memset(&cp, 0, sizeof(cp));
1115 len = eir_create_adv_data(hdev, instance, cp.data);
1117 /* There's nothing to do if the data hasn't changed */
1118 if (hdev->adv_data_len == len &&
1119 memcmp(cp.data, hdev->adv_data, len) == 0)
1122 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1123 hdev->adv_data_len = len;
1127 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1131 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1133 struct hci_request req;
1135 hci_req_init(&req, hdev);
1136 __hci_req_update_adv_data(&req, instance);
1138 return hci_req_run(&req, NULL);
1141 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1144 BT_DBG("%s status %u", hdev->name, status);
1147 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1149 struct hci_request req;
1152 if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1155 hci_req_init(&req, hdev);
1157 hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1159 hci_req_run(&req, enable_addr_resolution_complete);
1162 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1164 bt_dev_dbg(hdev, "status %u", status);
1167 void hci_req_reenable_advertising(struct hci_dev *hdev)
1169 struct hci_request req;
1171 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1172 list_empty(&hdev->adv_instances))
1175 hci_req_init(&req, hdev);
1177 if (hdev->cur_adv_instance) {
1178 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1181 if (ext_adv_capable(hdev)) {
1182 __hci_req_start_ext_adv(&req, 0x00);
1184 __hci_req_update_adv_data(&req, 0x00);
1185 __hci_req_update_scan_rsp_data(&req, 0x00);
1186 __hci_req_enable_advertising(&req);
1190 hci_req_run(&req, adv_enable_complete);
1193 static void adv_timeout_expire(struct work_struct *work)
1195 struct hci_dev *hdev = container_of(work, struct hci_dev,
1196 adv_instance_expire.work);
1198 struct hci_request req;
1201 bt_dev_dbg(hdev, "");
1205 hdev->adv_instance_timeout = 0;
1207 instance = hdev->cur_adv_instance;
1208 if (instance == 0x00)
1211 hci_req_init(&req, hdev);
1213 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1215 if (list_empty(&hdev->adv_instances))
1216 __hci_req_disable_advertising(&req);
1218 hci_req_run(&req, NULL);
1221 hci_dev_unlock(hdev);
1224 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1227 struct hci_dev *hdev = req->hdev;
1232 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1233 hci_req_add_le_scan_disable(req, false);
1234 hci_req_add_le_passive_scan(req);
1236 switch (hdev->interleave_scan_state) {
1237 case INTERLEAVE_SCAN_ALLOWLIST:
1238 bt_dev_dbg(hdev, "next state: allowlist");
1239 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1241 case INTERLEAVE_SCAN_NO_FILTER:
1242 bt_dev_dbg(hdev, "next state: no filter");
1243 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1245 case INTERLEAVE_SCAN_NONE:
1246 BT_ERR("unexpected error");
1250 hci_dev_unlock(hdev);
1255 static void interleave_scan_work(struct work_struct *work)
1257 struct hci_dev *hdev = container_of(work, struct hci_dev,
1258 interleave_scan.work);
1260 unsigned long timeout;
1262 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1263 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1264 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1265 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1267 bt_dev_err(hdev, "unexpected error");
1271 hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1272 HCI_CMD_TIMEOUT, &status);
1274 /* Don't continue interleaving if it was canceled */
1275 if (is_interleave_scanning(hdev))
1276 queue_delayed_work(hdev->req_workqueue,
1277 &hdev->interleave_scan, timeout);
1280 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1281 bool use_rpa, struct adv_info *adv_instance,
1282 u8 *own_addr_type, bdaddr_t *rand_addr)
1286 bacpy(rand_addr, BDADDR_ANY);
1288 /* If privacy is enabled use a resolvable private address. If
1289 * current RPA has expired then generate a new one.
1292 /* If Controller supports LL Privacy use own address type is
1295 if (use_ll_privacy(hdev))
1296 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1298 *own_addr_type = ADDR_LE_DEV_RANDOM;
1301 if (adv_rpa_valid(adv_instance))
1304 if (rpa_valid(hdev))
1308 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1310 bt_dev_err(hdev, "failed to generate new RPA");
1314 bacpy(rand_addr, &hdev->rpa);
1319 /* In case of required privacy without resolvable private address,
1320 * use an non-resolvable private address. This is useful for
1321 * non-connectable advertising.
1323 if (require_privacy) {
1327 /* The non-resolvable private address is generated
1328 * from random six bytes with the two most significant
1331 get_random_bytes(&nrpa, 6);
1334 /* The non-resolvable private address shall not be
1335 * equal to the public address.
1337 if (bacmp(&hdev->bdaddr, &nrpa))
1341 *own_addr_type = ADDR_LE_DEV_RANDOM;
1342 bacpy(rand_addr, &nrpa);
1347 /* No privacy so use a public address. */
1348 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1353 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1355 hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1358 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1360 struct hci_dev *hdev = req->hdev;
1362 /* If we're advertising or initiating an LE connection we can't
1363 * go ahead and change the random address at this time. This is
1364 * because the eventual initiator address used for the
1365 * subsequently created connection will be undefined (some
1366 * controllers use the new address and others the one we had
1367 * when the operation started).
1369 * In this kind of scenario skip the update and let the random
1370 * address be updated at the next cycle.
1372 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1373 hci_lookup_le_connect(hdev)) {
1374 bt_dev_dbg(hdev, "Deferring random address update");
1375 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1379 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1382 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1384 struct hci_cp_le_set_ext_adv_params cp;
1385 struct hci_dev *hdev = req->hdev;
1388 bdaddr_t random_addr;
1391 struct adv_info *adv_instance;
1395 adv_instance = hci_find_adv_instance(hdev, instance);
1399 adv_instance = NULL;
1402 flags = hci_adv_instance_flags(hdev, instance);
1404 /* If the "connectable" instance flag was not set, then choose between
1405 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1407 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1408 mgmt_get_connectable(hdev);
1410 if (!is_advertising_allowed(hdev, connectable))
1413 /* Set require_privacy to true only when non-connectable
1414 * advertising is used. In that case it is fine to use a
1415 * non-resolvable private address.
1417 err = hci_get_random_address(hdev, !connectable,
1418 adv_use_rpa(hdev, flags), adv_instance,
1419 &own_addr_type, &random_addr);
1423 memset(&cp, 0, sizeof(cp));
1426 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
1427 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
1428 cp.tx_power = adv_instance->tx_power;
1430 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1431 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1432 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1435 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1439 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1441 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1442 } else if (hci_adv_instance_is_scannable(hdev, instance) ||
1443 (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
1445 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1447 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1450 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1452 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1455 cp.own_addr_type = own_addr_type;
1456 cp.channel_map = hdev->le_adv_channel_map;
1457 cp.handle = instance;
1459 if (flags & MGMT_ADV_FLAG_SEC_2M) {
1460 cp.primary_phy = HCI_ADV_PHY_1M;
1461 cp.secondary_phy = HCI_ADV_PHY_2M;
1462 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1463 cp.primary_phy = HCI_ADV_PHY_CODED;
1464 cp.secondary_phy = HCI_ADV_PHY_CODED;
1466 /* In all other cases use 1M */
1467 cp.primary_phy = HCI_ADV_PHY_1M;
1468 cp.secondary_phy = HCI_ADV_PHY_1M;
1471 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1473 if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
1474 own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
1475 bacmp(&random_addr, BDADDR_ANY)) {
1476 struct hci_cp_le_set_adv_set_rand_addr cp;
1478 /* Check if random address need to be updated */
1480 if (!bacmp(&random_addr, &adv_instance->random_addr))
1483 if (!bacmp(&random_addr, &hdev->random_addr))
1485 /* Instance 0x00 doesn't have an adv_info, instead it
1486 * uses hdev->random_addr to track its address so
1487 * whenever it needs to be updated this also set the
1488 * random address since hdev->random_addr is shared with
1489 * scan state machine.
1491 set_random_addr(req, &random_addr);
1494 memset(&cp, 0, sizeof(cp));
1496 cp.handle = instance;
1497 bacpy(&cp.bdaddr, &random_addr);
1500 HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1507 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1509 struct hci_dev *hdev = req->hdev;
1510 struct hci_cp_le_set_ext_adv_enable *cp;
1511 struct hci_cp_ext_adv_set *adv_set;
1512 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1513 struct adv_info *adv_instance;
1516 adv_instance = hci_find_adv_instance(hdev, instance);
1520 adv_instance = NULL;
1524 adv_set = (void *) cp->data;
1526 memset(cp, 0, sizeof(*cp));
1529 cp->num_of_sets = 0x01;
1531 memset(adv_set, 0, sizeof(*adv_set));
1533 adv_set->handle = instance;
1535 /* Set duration per instance since controller is responsible for
1538 if (adv_instance && adv_instance->duration) {
1539 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
1541 /* Time = N * 10 ms */
1542 adv_set->duration = cpu_to_le16(duration / 10);
1545 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1546 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1552 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
1554 struct hci_dev *hdev = req->hdev;
1555 struct hci_cp_le_set_ext_adv_enable *cp;
1556 struct hci_cp_ext_adv_set *adv_set;
1557 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1560 /* If request specifies an instance that doesn't exist, fail */
1561 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1564 memset(data, 0, sizeof(data));
1567 adv_set = (void *)cp->data;
1569 /* Instance 0x00 indicates all advertising instances will be disabled */
1570 cp->num_of_sets = !!instance;
1573 adv_set->handle = instance;
1575 req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
1576 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
1581 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
1583 struct hci_dev *hdev = req->hdev;
1585 /* If request specifies an instance that doesn't exist, fail */
1586 if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1589 hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
1594 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1596 struct hci_dev *hdev = req->hdev;
1597 struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
1600 /* If instance isn't pending, the chip knows about it, and it's safe to
1603 if (adv_instance && !adv_instance->pending)
1604 __hci_req_disable_ext_adv_instance(req, instance);
1606 err = __hci_req_setup_ext_adv_instance(req, instance);
1610 __hci_req_update_scan_rsp_data(req, instance);
1611 __hci_req_enable_ext_advertising(req, instance);
1616 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1619 struct hci_dev *hdev = req->hdev;
1620 struct adv_info *adv_instance = NULL;
1623 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1624 list_empty(&hdev->adv_instances))
1627 if (hdev->adv_instance_timeout)
1630 adv_instance = hci_find_adv_instance(hdev, instance);
1634 /* A zero timeout means unlimited advertising. As long as there is
1635 * only one instance, duration should be ignored. We still set a timeout
1636 * in case further instances are being added later on.
1638 * If the remaining lifetime of the instance is more than the duration
1639 * then the timeout corresponds to the duration, otherwise it will be
1640 * reduced to the remaining instance lifetime.
1642 if (adv_instance->timeout == 0 ||
1643 adv_instance->duration <= adv_instance->remaining_time)
1644 timeout = adv_instance->duration;
1646 timeout = adv_instance->remaining_time;
1648 /* The remaining time is being reduced unless the instance is being
1649 * advertised without time limit.
1651 if (adv_instance->timeout)
1652 adv_instance->remaining_time =
1653 adv_instance->remaining_time - timeout;
1655 /* Only use work for scheduling instances with legacy advertising */
1656 if (!ext_adv_capable(hdev)) {
1657 hdev->adv_instance_timeout = timeout;
1658 queue_delayed_work(hdev->req_workqueue,
1659 &hdev->adv_instance_expire,
1660 msecs_to_jiffies(timeout * 1000));
1663 /* If we're just re-scheduling the same instance again then do not
1664 * execute any HCI commands. This happens when a single instance is
1667 if (!force && hdev->cur_adv_instance == instance &&
1668 hci_dev_test_flag(hdev, HCI_LE_ADV))
1671 hdev->cur_adv_instance = instance;
1672 if (ext_adv_capable(hdev)) {
1673 __hci_req_start_ext_adv(req, instance);
1675 __hci_req_update_adv_data(req, instance);
1676 __hci_req_update_scan_rsp_data(req, instance);
1677 __hci_req_enable_advertising(req);
1683 /* For a single instance:
1684 * - force == true: The instance will be removed even when its remaining
1685 * lifetime is not zero.
1686 * - force == false: the instance will be deactivated but kept stored unless
1687 * the remaining lifetime is zero.
1689 * For instance == 0x00:
1690 * - force == true: All instances will be removed regardless of their timeout
1692 * - force == false: Only instances that have a timeout will be removed.
1694 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1695 struct hci_request *req, u8 instance,
1698 struct adv_info *adv_instance, *n, *next_instance = NULL;
1702 /* Cancel any timeout concerning the removed instance(s). */
1703 if (!instance || hdev->cur_adv_instance == instance)
1704 cancel_adv_timeout(hdev);
1706 /* Get the next instance to advertise BEFORE we remove
1707 * the current one. This can be the same instance again
1708 * if there is only one instance.
1710 if (instance && hdev->cur_adv_instance == instance)
1711 next_instance = hci_get_next_instance(hdev, instance);
1713 if (instance == 0x00) {
1714 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1716 if (!(force || adv_instance->timeout))
1719 rem_inst = adv_instance->instance;
1720 err = hci_remove_adv_instance(hdev, rem_inst);
1722 mgmt_advertising_removed(sk, hdev, rem_inst);
1725 adv_instance = hci_find_adv_instance(hdev, instance);
1727 if (force || (adv_instance && adv_instance->timeout &&
1728 !adv_instance->remaining_time)) {
1729 /* Don't advertise a removed instance. */
1730 if (next_instance &&
1731 next_instance->instance == instance)
1732 next_instance = NULL;
1734 err = hci_remove_adv_instance(hdev, instance);
1736 mgmt_advertising_removed(sk, hdev, instance);
1740 if (!req || !hdev_is_powered(hdev) ||
1741 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1744 if (next_instance && !ext_adv_capable(hdev))
1745 __hci_req_schedule_adv_instance(req, next_instance->instance,
1749 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1750 bool use_rpa, u8 *own_addr_type)
1752 struct hci_dev *hdev = req->hdev;
1755 /* If privacy is enabled use a resolvable private address. If
1756 * current RPA has expired or there is something else than
1757 * the current RPA in use, then generate a new one.
1760 /* If Controller supports LL Privacy use own address type is
1763 if (use_ll_privacy(hdev))
1764 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1766 *own_addr_type = ADDR_LE_DEV_RANDOM;
1768 if (rpa_valid(hdev))
1771 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1773 bt_dev_err(hdev, "failed to generate new RPA");
1777 set_random_addr(req, &hdev->rpa);
1782 /* In case of required privacy without resolvable private address,
1783 * use an non-resolvable private address. This is useful for active
1784 * scanning and non-connectable advertising.
1786 if (require_privacy) {
1790 /* The non-resolvable private address is generated
1791 * from random six bytes with the two most significant
1794 get_random_bytes(&nrpa, 6);
1797 /* The non-resolvable private address shall not be
1798 * equal to the public address.
1800 if (bacmp(&hdev->bdaddr, &nrpa))
1804 *own_addr_type = ADDR_LE_DEV_RANDOM;
1805 set_random_addr(req, &nrpa);
1809 /* If forcing static address is in use or there is no public
1810 * address use the static address as random address (but skip
1811 * the HCI command if the current random address is already the
1814 * In case BR/EDR has been disabled on a dual-mode controller
1815 * and a static address has been configured, then use that
1816 * address instead of the public BR/EDR address.
1818 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1819 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1820 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1821 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1822 *own_addr_type = ADDR_LE_DEV_RANDOM;
1823 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1824 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1825 &hdev->static_addr);
1829 /* Neither privacy nor static address is being used so use a
1832 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1837 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
1839 struct bdaddr_list *b;
1841 list_for_each_entry(b, &hdev->accept_list, list) {
1842 struct hci_conn *conn;
1844 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1848 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1855 void __hci_req_update_scan(struct hci_request *req)
1857 struct hci_dev *hdev = req->hdev;
1860 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1863 if (!hdev_is_powered(hdev))
1866 if (mgmt_powering_down(hdev))
1869 if (hdev->scanning_paused)
1872 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1873 disconnected_accept_list_entries(hdev))
1876 scan = SCAN_DISABLED;
1878 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1879 scan |= SCAN_INQUIRY;
1881 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1882 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1885 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1888 static int update_scan(struct hci_request *req, unsigned long opt)
1890 hci_dev_lock(req->hdev);
1891 __hci_req_update_scan(req);
1892 hci_dev_unlock(req->hdev);
1896 static void scan_update_work(struct work_struct *work)
1898 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1900 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1903 static u8 get_service_classes(struct hci_dev *hdev)
1905 struct bt_uuid *uuid;
1908 list_for_each_entry(uuid, &hdev->uuids, list)
1909 val |= uuid->svc_hint;
1914 void __hci_req_update_class(struct hci_request *req)
1916 struct hci_dev *hdev = req->hdev;
1919 bt_dev_dbg(hdev, "");
1921 if (!hdev_is_powered(hdev))
1924 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1927 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1930 cod[0] = hdev->minor_class;
1931 cod[1] = hdev->major_class;
1932 cod[2] = get_service_classes(hdev);
1934 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1937 if (memcmp(cod, hdev->dev_class, 3) == 0)
1940 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1943 static void write_iac(struct hci_request *req)
1945 struct hci_dev *hdev = req->hdev;
1946 struct hci_cp_write_current_iac_lap cp;
1948 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1951 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1952 /* Limited discoverable mode */
1953 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1954 cp.iac_lap[0] = 0x00; /* LIAC */
1955 cp.iac_lap[1] = 0x8b;
1956 cp.iac_lap[2] = 0x9e;
1957 cp.iac_lap[3] = 0x33; /* GIAC */
1958 cp.iac_lap[4] = 0x8b;
1959 cp.iac_lap[5] = 0x9e;
1961 /* General discoverable mode */
1963 cp.iac_lap[0] = 0x33; /* GIAC */
1964 cp.iac_lap[1] = 0x8b;
1965 cp.iac_lap[2] = 0x9e;
1968 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1969 (cp.num_iac * 3) + 1, &cp);
1972 static int discoverable_update(struct hci_request *req, unsigned long opt)
1974 struct hci_dev *hdev = req->hdev;
1978 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1980 __hci_req_update_scan(req);
1981 __hci_req_update_class(req);
1984 /* Advertising instances don't use the global discoverable setting, so
1985 * only update AD if advertising was enabled using Set Advertising.
1987 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1988 __hci_req_update_adv_data(req, 0x00);
1990 /* Discoverable mode affects the local advertising
1991 * address in limited privacy mode.
1993 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
1994 if (ext_adv_capable(hdev))
1995 __hci_req_start_ext_adv(req, 0x00);
1997 __hci_req_enable_advertising(req);
2001 hci_dev_unlock(hdev);
2006 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2009 switch (conn->state) {
2012 if (conn->type == AMP_LINK) {
2013 struct hci_cp_disconn_phy_link cp;
2015 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2017 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2020 struct hci_cp_disconnect dc;
2022 dc.handle = cpu_to_le16(conn->handle);
2024 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2027 conn->state = BT_DISCONN;
2031 if (conn->type == LE_LINK) {
2032 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2034 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2036 } else if (conn->type == ACL_LINK) {
2037 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2039 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2044 if (conn->type == ACL_LINK) {
2045 struct hci_cp_reject_conn_req rej;
2047 bacpy(&rej.bdaddr, &conn->dst);
2048 rej.reason = reason;
2050 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2052 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2053 struct hci_cp_reject_sync_conn_req rej;
2055 bacpy(&rej.bdaddr, &conn->dst);
2057 /* SCO rejection has its own limited set of
2058 * allowed error values (0x0D-0x0F) which isn't
2059 * compatible with most values passed to this
2060 * function. To be safe hard-code one of the
2061 * values that's suitable for SCO.
2063 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2065 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2070 conn->state = BT_CLOSED;
2075 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2078 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2081 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2083 struct hci_request req;
2086 hci_req_init(&req, conn->hdev);
2088 __hci_abort_conn(&req, conn, reason);
2090 err = hci_req_run(&req, abort_conn_complete);
2091 if (err && err != -ENODATA) {
2092 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2099 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2101 hci_req_add_le_scan_disable(req, false);
2105 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2108 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2109 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2110 struct hci_cp_inquiry cp;
2112 if (test_bit(HCI_INQUIRY, &req->hdev->flags))
2115 bt_dev_dbg(req->hdev, "");
2117 hci_dev_lock(req->hdev);
2118 hci_inquiry_cache_flush(req->hdev);
2119 hci_dev_unlock(req->hdev);
2121 memset(&cp, 0, sizeof(cp));
2123 if (req->hdev->discovery.limited)
2124 memcpy(&cp.lap, liac, sizeof(cp.lap));
2126 memcpy(&cp.lap, giac, sizeof(cp.lap));
2130 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2135 static void le_scan_disable_work(struct work_struct *work)
2137 struct hci_dev *hdev = container_of(work, struct hci_dev,
2138 le_scan_disable.work);
2141 bt_dev_dbg(hdev, "");
2143 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2146 cancel_delayed_work(&hdev->le_scan_restart);
2148 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2150 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2155 hdev->discovery.scan_start = 0;
2157 /* If we were running LE only scan, change discovery state. If
2158 * we were running both LE and BR/EDR inquiry simultaneously,
2159 * and BR/EDR inquiry is already finished, stop discovery,
2160 * otherwise BR/EDR inquiry will stop discovery when finished.
2161 * If we will resolve remote device name, do not change
2165 if (hdev->discovery.type == DISCOV_TYPE_LE)
2166 goto discov_stopped;
2168 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2171 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2172 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2173 hdev->discovery.state != DISCOVERY_RESOLVING)
2174 goto discov_stopped;
2179 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2180 HCI_CMD_TIMEOUT, &status);
2182 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2183 goto discov_stopped;
2190 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2191 hci_dev_unlock(hdev);
2194 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2196 struct hci_dev *hdev = req->hdev;
2198 /* If controller is not scanning we are done. */
2199 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2202 if (hdev->scanning_paused) {
2203 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2207 hci_req_add_le_scan_disable(req, false);
2209 if (use_ext_scan(hdev)) {
2210 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2212 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2213 ext_enable_cp.enable = LE_SCAN_ENABLE;
2214 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2216 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2217 sizeof(ext_enable_cp), &ext_enable_cp);
2219 struct hci_cp_le_set_scan_enable cp;
2221 memset(&cp, 0, sizeof(cp));
2222 cp.enable = LE_SCAN_ENABLE;
2223 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2224 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2230 static void le_scan_restart_work(struct work_struct *work)
2232 struct hci_dev *hdev = container_of(work, struct hci_dev,
2233 le_scan_restart.work);
2234 unsigned long timeout, duration, scan_start, now;
2237 bt_dev_dbg(hdev, "");
2239 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2241 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2248 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2249 !hdev->discovery.scan_start)
2252 /* When the scan was started, hdev->le_scan_disable has been queued
2253 * after duration from scan_start. During scan restart this job
2254 * has been canceled, and we need to queue it again after proper
2255 * timeout, to make sure that scan does not run indefinitely.
2257 duration = hdev->discovery.scan_duration;
2258 scan_start = hdev->discovery.scan_start;
2260 if (now - scan_start <= duration) {
2263 if (now >= scan_start)
2264 elapsed = now - scan_start;
2266 elapsed = ULONG_MAX - scan_start + now;
2268 timeout = duration - elapsed;
2273 queue_delayed_work(hdev->req_workqueue,
2274 &hdev->le_scan_disable, timeout);
2277 hci_dev_unlock(hdev);
2280 static int active_scan(struct hci_request *req, unsigned long opt)
2282 uint16_t interval = opt;
2283 struct hci_dev *hdev = req->hdev;
2285 /* Accept list is not used for discovery */
2286 u8 filter_policy = 0x00;
2287 /* Default is to enable duplicates filter */
2288 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2289 /* Discovery doesn't require controller address resolution */
2290 bool addr_resolv = false;
2293 bt_dev_dbg(hdev, "");
2295 /* If controller is scanning, it means the background scanning is
2296 * running. Thus, we should temporarily stop it in order to set the
2297 * discovery scanning parameters.
2299 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2300 hci_req_add_le_scan_disable(req, false);
2301 cancel_interleave_scan(hdev);
2304 /* All active scans will be done with either a resolvable private
2305 * address (when privacy feature has been enabled) or non-resolvable
2308 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2311 own_addr_type = ADDR_LE_DEV_PUBLIC;
2313 if (hci_is_adv_monitoring(hdev)) {
2314 /* Duplicate filter should be disabled when some advertisement
2315 * monitor is activated, otherwise AdvMon can only receive one
2316 * advertisement for one peer(*) during active scanning, and
2317 * might report loss to these peers.
2319 * Note that different controllers have different meanings of
2320 * |duplicate|. Some of them consider packets with the same
2321 * address as duplicate, and others consider packets with the
2322 * same address and the same RSSI as duplicate. Although in the
2323 * latter case we don't need to disable duplicate filter, but
2324 * it is common to have active scanning for a short period of
2325 * time, the power impact should be neglectable.
2327 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2330 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2331 hdev->le_scan_window_discovery, own_addr_type,
2332 filter_policy, filter_dup, addr_resolv);
2336 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2340 bt_dev_dbg(req->hdev, "");
2342 err = active_scan(req, opt);
2346 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2349 static void start_discovery(struct hci_dev *hdev, u8 *status)
2351 unsigned long timeout;
2353 bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
2355 switch (hdev->discovery.type) {
2356 case DISCOV_TYPE_BREDR:
2357 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2358 hci_req_sync(hdev, bredr_inquiry,
2359 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2362 case DISCOV_TYPE_INTERLEAVED:
2363 /* When running simultaneous discovery, the LE scanning time
2364 * should occupy the whole discovery time sine BR/EDR inquiry
2365 * and LE scanning are scheduled by the controller.
2367 * For interleaving discovery in comparison, BR/EDR inquiry
2368 * and LE scanning are done sequentially with separate
2371 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2373 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2374 /* During simultaneous discovery, we double LE scan
2375 * interval. We must leave some time for the controller
2376 * to do BR/EDR inquiry.
2378 hci_req_sync(hdev, interleaved_discov,
2379 hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
2384 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2385 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2386 HCI_CMD_TIMEOUT, status);
2388 case DISCOV_TYPE_LE:
2389 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2390 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2391 HCI_CMD_TIMEOUT, status);
2394 *status = HCI_ERROR_UNSPECIFIED;
2401 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
2403 /* When service discovery is used and the controller has a
2404 * strict duplicate filter, it is important to remember the
2405 * start and duration of the scan. This is required for
2406 * restarting scanning during the discovery phase.
2408 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2409 hdev->discovery.result_filtering) {
2410 hdev->discovery.scan_start = jiffies;
2411 hdev->discovery.scan_duration = timeout;
2414 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2418 bool hci_req_stop_discovery(struct hci_request *req)
2420 struct hci_dev *hdev = req->hdev;
2421 struct discovery_state *d = &hdev->discovery;
2422 struct hci_cp_remote_name_req_cancel cp;
2423 struct inquiry_entry *e;
2426 bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
2428 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2429 if (test_bit(HCI_INQUIRY, &hdev->flags))
2430 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2432 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2433 cancel_delayed_work(&hdev->le_scan_disable);
2434 cancel_delayed_work(&hdev->le_scan_restart);
2435 hci_req_add_le_scan_disable(req, false);
2440 /* Passive scanning */
2441 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2442 hci_req_add_le_scan_disable(req, false);
2447 /* No further actions needed for LE-only discovery */
2448 if (d->type == DISCOV_TYPE_LE)
2451 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2452 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2457 bacpy(&cp.bdaddr, &e->data.bdaddr);
2458 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2466 static void config_data_path_complete(struct hci_dev *hdev, u8 status,
2469 bt_dev_dbg(hdev, "status %u", status);
2472 int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec)
2474 struct hci_request req;
2476 __u8 vnd_len, *vnd_data = NULL;
2477 struct hci_op_configure_data_path *cmd = NULL;
2479 hci_req_init(&req, hdev);
2481 err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
2486 cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
2492 err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
2496 cmd->vnd_len = vnd_len;
2497 memcpy(cmd->vnd_data, vnd_data, vnd_len);
2499 cmd->direction = 0x00;
2500 hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2502 cmd->direction = 0x01;
2503 hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2505 err = hci_req_run(&req, config_data_path_complete);
2513 static int stop_discovery(struct hci_request *req, unsigned long opt)
2515 hci_dev_lock(req->hdev);
2516 hci_req_stop_discovery(req);
2517 hci_dev_unlock(req->hdev);
2522 static void discov_update(struct work_struct *work)
2524 struct hci_dev *hdev = container_of(work, struct hci_dev,
2528 switch (hdev->discovery.state) {
2529 case DISCOVERY_STARTING:
2530 start_discovery(hdev, &status);
2531 mgmt_start_discovery_complete(hdev, status);
2533 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2535 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2537 case DISCOVERY_STOPPING:
2538 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2539 mgmt_stop_discovery_complete(hdev, status);
2541 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2543 case DISCOVERY_STOPPED:
2549 static void discov_off(struct work_struct *work)
2551 struct hci_dev *hdev = container_of(work, struct hci_dev,
2554 bt_dev_dbg(hdev, "");
2558 /* When discoverable timeout triggers, then just make sure
2559 * the limited discoverable flag is cleared. Even in the case
2560 * of a timeout triggered from general discoverable, it is
2561 * safe to unconditionally clear the flag.
2563 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2564 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2565 hdev->discov_timeout = 0;
2567 hci_dev_unlock(hdev);
2569 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2570 mgmt_new_settings(hdev);
2573 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2575 struct hci_dev *hdev = req->hdev;
2580 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2581 !lmp_host_ssp_capable(hdev)) {
2584 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2586 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2589 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2590 sizeof(support), &support);
2594 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2595 lmp_bredr_capable(hdev)) {
2596 struct hci_cp_write_le_host_supported cp;
2601 /* Check first if we already have the right
2602 * host state (host features set)
2604 if (cp.le != lmp_host_le_capable(hdev) ||
2605 cp.simul != lmp_host_le_br_capable(hdev))
2606 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2610 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2611 /* Make sure the controller has a good default for
2612 * advertising data. This also applies to the case
2613 * where BR/EDR was toggled during the AUTO_OFF phase.
2615 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2616 list_empty(&hdev->adv_instances)) {
2619 if (ext_adv_capable(hdev)) {
2620 err = __hci_req_setup_ext_adv_instance(req,
2623 __hci_req_update_scan_rsp_data(req,
2627 __hci_req_update_adv_data(req, 0x00);
2628 __hci_req_update_scan_rsp_data(req, 0x00);
2631 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2632 if (!ext_adv_capable(hdev))
2633 __hci_req_enable_advertising(req);
2635 __hci_req_enable_ext_advertising(req,
2638 } else if (!list_empty(&hdev->adv_instances)) {
2639 struct adv_info *adv_instance;
2641 adv_instance = list_first_entry(&hdev->adv_instances,
2642 struct adv_info, list);
2643 __hci_req_schedule_adv_instance(req,
2644 adv_instance->instance,
2649 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2650 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2651 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2652 sizeof(link_sec), &link_sec);
2654 if (lmp_bredr_capable(hdev)) {
2655 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2656 __hci_req_write_fast_connectable(req, true);
2658 __hci_req_write_fast_connectable(req, false);
2659 __hci_req_update_scan(req);
2660 __hci_req_update_class(req);
2661 __hci_req_update_name(req);
2662 __hci_req_update_eir(req);
2665 hci_dev_unlock(hdev);
2669 int __hci_req_hci_power_on(struct hci_dev *hdev)
2671 /* Register the available SMP channels (BR/EDR and LE) only when
2672 * successfully powering on the controller. This late
2673 * registration is required so that LE SMP can clearly decide if
2674 * the public address or static address is used.
2678 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2682 void hci_request_setup(struct hci_dev *hdev)
2684 INIT_WORK(&hdev->discov_update, discov_update);
2685 INIT_WORK(&hdev->scan_update, scan_update_work);
2686 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2687 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2688 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2689 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2690 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
2693 void hci_request_cancel_all(struct hci_dev *hdev)
2695 hci_cmd_sync_cancel(hdev, ENODEV);
2697 cancel_work_sync(&hdev->discov_update);
2698 cancel_work_sync(&hdev->scan_update);
2699 cancel_delayed_work_sync(&hdev->discov_off);
2700 cancel_delayed_work_sync(&hdev->le_scan_disable);
2701 cancel_delayed_work_sync(&hdev->le_scan_restart);
2703 if (hdev->adv_instance_timeout) {
2704 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2705 hdev->adv_instance_timeout = 0;
2708 cancel_interleave_scan(hdev);