Bluetooth: Add helper for serialized HCI command execution
[linux-2.6-block.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32 #include "msft.h"
33 #include "eir.h"
34
35 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
36 {
37         skb_queue_head_init(&req->cmd_q);
38         req->hdev = hdev;
39         req->err = 0;
40 }
41
42 void hci_req_purge(struct hci_request *req)
43 {
44         skb_queue_purge(&req->cmd_q);
45 }
46
47 bool hci_req_status_pend(struct hci_dev *hdev)
48 {
49         return hdev->req_status == HCI_REQ_PEND;
50 }
51
52 static int req_run(struct hci_request *req, hci_req_complete_t complete,
53                    hci_req_complete_skb_t complete_skb)
54 {
55         struct hci_dev *hdev = req->hdev;
56         struct sk_buff *skb;
57         unsigned long flags;
58
59         bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
60
61         /* If an error occurred during request building, remove all HCI
62          * commands queued on the HCI request queue.
63          */
64         if (req->err) {
65                 skb_queue_purge(&req->cmd_q);
66                 return req->err;
67         }
68
69         /* Do not allow empty requests */
70         if (skb_queue_empty(&req->cmd_q))
71                 return -ENODATA;
72
73         skb = skb_peek_tail(&req->cmd_q);
74         if (complete) {
75                 bt_cb(skb)->hci.req_complete = complete;
76         } else if (complete_skb) {
77                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
78                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
79         }
80
81         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
82         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
83         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
84
85         queue_work(hdev->workqueue, &hdev->cmd_work);
86
87         return 0;
88 }
89
90 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
91 {
92         return req_run(req, complete, NULL);
93 }
94
95 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
96 {
97         return req_run(req, NULL, complete);
98 }
99
100 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
101                                   struct sk_buff *skb)
102 {
103         bt_dev_dbg(hdev, "result 0x%2.2x", result);
104
105         if (hdev->req_status == HCI_REQ_PEND) {
106                 hdev->req_result = result;
107                 hdev->req_status = HCI_REQ_DONE;
108                 if (skb)
109                         hdev->req_skb = skb_get(skb);
110                 wake_up_interruptible(&hdev->req_wait_q);
111         }
112 }
113
114 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
115 {
116         bt_dev_dbg(hdev, "err 0x%2.2x", err);
117
118         if (hdev->req_status == HCI_REQ_PEND) {
119                 hdev->req_result = err;
120                 hdev->req_status = HCI_REQ_CANCELED;
121                 wake_up_interruptible(&hdev->req_wait_q);
122         }
123 }
124
125 /* Execute request and wait for completion. */
126 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
127                                                      unsigned long opt),
128                    unsigned long opt, u32 timeout, u8 *hci_status)
129 {
130         struct hci_request req;
131         int err = 0;
132
133         bt_dev_dbg(hdev, "start");
134
135         hci_req_init(&req, hdev);
136
137         hdev->req_status = HCI_REQ_PEND;
138
139         err = func(&req, opt);
140         if (err) {
141                 if (hci_status)
142                         *hci_status = HCI_ERROR_UNSPECIFIED;
143                 return err;
144         }
145
146         err = hci_req_run_skb(&req, hci_req_sync_complete);
147         if (err < 0) {
148                 hdev->req_status = 0;
149
150                 /* ENODATA means the HCI request command queue is empty.
151                  * This can happen when a request with conditionals doesn't
152                  * trigger any commands to be sent. This is normal behavior
153                  * and should not trigger an error return.
154                  */
155                 if (err == -ENODATA) {
156                         if (hci_status)
157                                 *hci_status = 0;
158                         return 0;
159                 }
160
161                 if (hci_status)
162                         *hci_status = HCI_ERROR_UNSPECIFIED;
163
164                 return err;
165         }
166
167         err = wait_event_interruptible_timeout(hdev->req_wait_q,
168                         hdev->req_status != HCI_REQ_PEND, timeout);
169
170         if (err == -ERESTARTSYS)
171                 return -EINTR;
172
173         switch (hdev->req_status) {
174         case HCI_REQ_DONE:
175                 err = -bt_to_errno(hdev->req_result);
176                 if (hci_status)
177                         *hci_status = hdev->req_result;
178                 break;
179
180         case HCI_REQ_CANCELED:
181                 err = -hdev->req_result;
182                 if (hci_status)
183                         *hci_status = HCI_ERROR_UNSPECIFIED;
184                 break;
185
186         default:
187                 err = -ETIMEDOUT;
188                 if (hci_status)
189                         *hci_status = HCI_ERROR_UNSPECIFIED;
190                 break;
191         }
192
193         kfree_skb(hdev->req_skb);
194         hdev->req_skb = NULL;
195         hdev->req_status = hdev->req_result = 0;
196
197         bt_dev_dbg(hdev, "end: err %d", err);
198
199         return err;
200 }
201
202 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
203                                                   unsigned long opt),
204                  unsigned long opt, u32 timeout, u8 *hci_status)
205 {
206         int ret;
207
208         /* Serialize all requests */
209         hci_req_sync_lock(hdev);
210         /* check the state after obtaing the lock to protect the HCI_UP
211          * against any races from hci_dev_do_close when the controller
212          * gets removed.
213          */
214         if (test_bit(HCI_UP, &hdev->flags))
215                 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
216         else
217                 ret = -ENETDOWN;
218         hci_req_sync_unlock(hdev);
219
220         return ret;
221 }
222
223 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
224                                 const void *param)
225 {
226         int len = HCI_COMMAND_HDR_SIZE + plen;
227         struct hci_command_hdr *hdr;
228         struct sk_buff *skb;
229
230         skb = bt_skb_alloc(len, GFP_ATOMIC);
231         if (!skb)
232                 return NULL;
233
234         hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
235         hdr->opcode = cpu_to_le16(opcode);
236         hdr->plen   = plen;
237
238         if (plen)
239                 skb_put_data(skb, param, plen);
240
241         bt_dev_dbg(hdev, "skb len %d", skb->len);
242
243         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
244         hci_skb_opcode(skb) = opcode;
245
246         return skb;
247 }
248
249 /* Queue a command to an asynchronous HCI request */
250 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
251                     const void *param, u8 event)
252 {
253         struct hci_dev *hdev = req->hdev;
254         struct sk_buff *skb;
255
256         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
257
258         /* If an error occurred during request building, there is no point in
259          * queueing the HCI command. We can simply return.
260          */
261         if (req->err)
262                 return;
263
264         skb = hci_prepare_cmd(hdev, opcode, plen, param);
265         if (!skb) {
266                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
267                            opcode);
268                 req->err = -ENOMEM;
269                 return;
270         }
271
272         if (skb_queue_empty(&req->cmd_q))
273                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
274
275         bt_cb(skb)->hci.req_event = event;
276
277         skb_queue_tail(&req->cmd_q, skb);
278 }
279
280 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
281                  const void *param)
282 {
283         hci_req_add_ev(req, opcode, plen, param, 0);
284 }
285
286 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
287 {
288         struct hci_dev *hdev = req->hdev;
289         struct hci_cp_write_page_scan_activity acp;
290         u8 type;
291
292         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
293                 return;
294
295         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
296                 return;
297
298         if (enable) {
299                 type = PAGE_SCAN_TYPE_INTERLACED;
300
301                 /* 160 msec page scan interval */
302                 acp.interval = cpu_to_le16(0x0100);
303         } else {
304                 type = hdev->def_page_scan_type;
305                 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
306         }
307
308         acp.window = cpu_to_le16(hdev->def_page_scan_window);
309
310         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
311             __cpu_to_le16(hdev->page_scan_window) != acp.window)
312                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
313                             sizeof(acp), &acp);
314
315         if (hdev->page_scan_type != type)
316                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
317 }
318
319 static void start_interleave_scan(struct hci_dev *hdev)
320 {
321         hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
322         queue_delayed_work(hdev->req_workqueue,
323                            &hdev->interleave_scan, 0);
324 }
325
326 static bool is_interleave_scanning(struct hci_dev *hdev)
327 {
328         return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
329 }
330
331 static void cancel_interleave_scan(struct hci_dev *hdev)
332 {
333         bt_dev_dbg(hdev, "cancelling interleave scan");
334
335         cancel_delayed_work_sync(&hdev->interleave_scan);
336
337         hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
338 }
339
340 /* Return true if interleave_scan wasn't started until exiting this function,
341  * otherwise, return false
342  */
343 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
344 {
345         /* Do interleaved scan only if all of the following are true:
346          * - There is at least one ADV monitor
347          * - At least one pending LE connection or one device to be scanned for
348          * - Monitor offloading is not supported
349          * If so, we should alternate between allowlist scan and one without
350          * any filters to save power.
351          */
352         bool use_interleaving = hci_is_adv_monitoring(hdev) &&
353                                 !(list_empty(&hdev->pend_le_conns) &&
354                                   list_empty(&hdev->pend_le_reports)) &&
355                                 hci_get_adv_monitor_offload_ext(hdev) ==
356                                     HCI_ADV_MONITOR_EXT_NONE;
357         bool is_interleaving = is_interleave_scanning(hdev);
358
359         if (use_interleaving && !is_interleaving) {
360                 start_interleave_scan(hdev);
361                 bt_dev_dbg(hdev, "starting interleave scan");
362                 return true;
363         }
364
365         if (!use_interleaving && is_interleaving)
366                 cancel_interleave_scan(hdev);
367
368         return false;
369 }
370
371 /* This function controls the background scanning based on hdev->pend_le_conns
372  * list. If there are pending LE connection we start the background scanning,
373  * otherwise we stop it.
374  *
375  * This function requires the caller holds hdev->lock.
376  */
377 static void __hci_update_background_scan(struct hci_request *req)
378 {
379         struct hci_dev *hdev = req->hdev;
380
381         if (!test_bit(HCI_UP, &hdev->flags) ||
382             test_bit(HCI_INIT, &hdev->flags) ||
383             hci_dev_test_flag(hdev, HCI_SETUP) ||
384             hci_dev_test_flag(hdev, HCI_CONFIG) ||
385             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
386             hci_dev_test_flag(hdev, HCI_UNREGISTER))
387                 return;
388
389         /* No point in doing scanning if LE support hasn't been enabled */
390         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
391                 return;
392
393         /* If discovery is active don't interfere with it */
394         if (hdev->discovery.state != DISCOVERY_STOPPED)
395                 return;
396
397         /* Reset RSSI and UUID filters when starting background scanning
398          * since these filters are meant for service discovery only.
399          *
400          * The Start Discovery and Start Service Discovery operations
401          * ensure to set proper values for RSSI threshold and UUID
402          * filter list. So it is safe to just reset them here.
403          */
404         hci_discovery_filter_clear(hdev);
405
406         bt_dev_dbg(hdev, "ADV monitoring is %s",
407                    hci_is_adv_monitoring(hdev) ? "on" : "off");
408
409         if (list_empty(&hdev->pend_le_conns) &&
410             list_empty(&hdev->pend_le_reports) &&
411             !hci_is_adv_monitoring(hdev)) {
412                 /* If there is no pending LE connections or devices
413                  * to be scanned for or no ADV monitors, we should stop the
414                  * background scanning.
415                  */
416
417                 /* If controller is not scanning we are done. */
418                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
419                         return;
420
421                 hci_req_add_le_scan_disable(req, false);
422
423                 bt_dev_dbg(hdev, "stopping background scanning");
424         } else {
425                 /* If there is at least one pending LE connection, we should
426                  * keep the background scan running.
427                  */
428
429                 /* If controller is connecting, we should not start scanning
430                  * since some controllers are not able to scan and connect at
431                  * the same time.
432                  */
433                 if (hci_lookup_le_connect(hdev))
434                         return;
435
436                 /* If controller is currently scanning, we stop it to ensure we
437                  * don't miss any advertising (due to duplicates filter).
438                  */
439                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
440                         hci_req_add_le_scan_disable(req, false);
441
442                 hci_req_add_le_passive_scan(req);
443                 bt_dev_dbg(hdev, "starting background scanning");
444         }
445 }
446
447 void __hci_req_update_name(struct hci_request *req)
448 {
449         struct hci_dev *hdev = req->hdev;
450         struct hci_cp_write_local_name cp;
451
452         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
453
454         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
455 }
456
457 void __hci_req_update_eir(struct hci_request *req)
458 {
459         struct hci_dev *hdev = req->hdev;
460         struct hci_cp_write_eir cp;
461
462         if (!hdev_is_powered(hdev))
463                 return;
464
465         if (!lmp_ext_inq_capable(hdev))
466                 return;
467
468         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
469                 return;
470
471         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
472                 return;
473
474         memset(&cp, 0, sizeof(cp));
475
476         eir_create(hdev, cp.data);
477
478         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
479                 return;
480
481         memcpy(hdev->eir, cp.data, sizeof(cp.data));
482
483         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
484 }
485
486 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
487 {
488         struct hci_dev *hdev = req->hdev;
489
490         if (hdev->scanning_paused) {
491                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
492                 return;
493         }
494
495         if (hdev->suspended)
496                 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
497
498         if (use_ext_scan(hdev)) {
499                 struct hci_cp_le_set_ext_scan_enable cp;
500
501                 memset(&cp, 0, sizeof(cp));
502                 cp.enable = LE_SCAN_DISABLE;
503                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
504                             &cp);
505         } else {
506                 struct hci_cp_le_set_scan_enable cp;
507
508                 memset(&cp, 0, sizeof(cp));
509                 cp.enable = LE_SCAN_DISABLE;
510                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
511         }
512
513         /* Disable address resolution */
514         if (use_ll_privacy(hdev) &&
515             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
516             hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
517                 __u8 enable = 0x00;
518
519                 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
520         }
521 }
522
523 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
524                                  u8 bdaddr_type)
525 {
526         struct hci_cp_le_del_from_accept_list cp;
527
528         cp.bdaddr_type = bdaddr_type;
529         bacpy(&cp.bdaddr, bdaddr);
530
531         bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
532                    cp.bdaddr_type);
533         hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
534
535         if (use_ll_privacy(req->hdev) &&
536             hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
537                 struct smp_irk *irk;
538
539                 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
540                 if (irk) {
541                         struct hci_cp_le_del_from_resolv_list cp;
542
543                         cp.bdaddr_type = bdaddr_type;
544                         bacpy(&cp.bdaddr, bdaddr);
545
546                         hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
547                                     sizeof(cp), &cp);
548                 }
549         }
550 }
551
552 /* Adds connection to accept list if needed. On error, returns -1. */
553 static int add_to_accept_list(struct hci_request *req,
554                               struct hci_conn_params *params, u8 *num_entries,
555                               bool allow_rpa)
556 {
557         struct hci_cp_le_add_to_accept_list cp;
558         struct hci_dev *hdev = req->hdev;
559
560         /* Already in accept list */
561         if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
562                                    params->addr_type))
563                 return 0;
564
565         /* Select filter policy to accept all advertising */
566         if (*num_entries >= hdev->le_accept_list_size)
567                 return -1;
568
569         /* Accept list can not be used with RPAs */
570         if (!allow_rpa &&
571             !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
572             hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
573                 return -1;
574         }
575
576         /* During suspend, only wakeable devices can be in accept list */
577         if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
578                                                    params->current_flags))
579                 return 0;
580
581         *num_entries += 1;
582         cp.bdaddr_type = params->addr_type;
583         bacpy(&cp.bdaddr, &params->addr);
584
585         bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
586                    cp.bdaddr_type);
587         hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
588
589         if (use_ll_privacy(hdev) &&
590             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
591                 struct smp_irk *irk;
592
593                 irk = hci_find_irk_by_addr(hdev, &params->addr,
594                                            params->addr_type);
595                 if (irk) {
596                         struct hci_cp_le_add_to_resolv_list cp;
597
598                         cp.bdaddr_type = params->addr_type;
599                         bacpy(&cp.bdaddr, &params->addr);
600                         memcpy(cp.peer_irk, irk->val, 16);
601
602                         if (hci_dev_test_flag(hdev, HCI_PRIVACY))
603                                 memcpy(cp.local_irk, hdev->irk, 16);
604                         else
605                                 memset(cp.local_irk, 0, 16);
606
607                         hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
608                                     sizeof(cp), &cp);
609                 }
610         }
611
612         return 0;
613 }
614
615 static u8 update_accept_list(struct hci_request *req)
616 {
617         struct hci_dev *hdev = req->hdev;
618         struct hci_conn_params *params;
619         struct bdaddr_list *b;
620         u8 num_entries = 0;
621         bool pend_conn, pend_report;
622         /* We allow usage of accept list even with RPAs in suspend. In the worst
623          * case, we won't be able to wake from devices that use the privacy1.2
624          * features. Additionally, once we support privacy1.2 and IRK
625          * offloading, we can update this to also check for those conditions.
626          */
627         bool allow_rpa = hdev->suspended;
628
629         if (use_ll_privacy(hdev) &&
630             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
631                 allow_rpa = true;
632
633         /* Go through the current accept list programmed into the
634          * controller one by one and check if that address is still
635          * in the list of pending connections or list of devices to
636          * report. If not present in either list, then queue the
637          * command to remove it from the controller.
638          */
639         list_for_each_entry(b, &hdev->le_accept_list, list) {
640                 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
641                                                       &b->bdaddr,
642                                                       b->bdaddr_type);
643                 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
644                                                         &b->bdaddr,
645                                                         b->bdaddr_type);
646
647                 /* If the device is not likely to connect or report,
648                  * remove it from the accept list.
649                  */
650                 if (!pend_conn && !pend_report) {
651                         del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
652                         continue;
653                 }
654
655                 /* Accept list can not be used with RPAs */
656                 if (!allow_rpa &&
657                     !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
658                     hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
659                         return 0x00;
660                 }
661
662                 num_entries++;
663         }
664
665         /* Since all no longer valid accept list entries have been
666          * removed, walk through the list of pending connections
667          * and ensure that any new device gets programmed into
668          * the controller.
669          *
670          * If the list of the devices is larger than the list of
671          * available accept list entries in the controller, then
672          * just abort and return filer policy value to not use the
673          * accept list.
674          */
675         list_for_each_entry(params, &hdev->pend_le_conns, action) {
676                 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
677                         return 0x00;
678         }
679
680         /* After adding all new pending connections, walk through
681          * the list of pending reports and also add these to the
682          * accept list if there is still space. Abort if space runs out.
683          */
684         list_for_each_entry(params, &hdev->pend_le_reports, action) {
685                 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
686                         return 0x00;
687         }
688
689         /* Use the allowlist unless the following conditions are all true:
690          * - We are not currently suspending
691          * - There are 1 or more ADV monitors registered and it's not offloaded
692          * - Interleaved scanning is not currently using the allowlist
693          */
694         if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
695             hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
696             hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
697                 return 0x00;
698
699         /* Select filter policy to use accept list */
700         return 0x01;
701 }
702
703 static bool scan_use_rpa(struct hci_dev *hdev)
704 {
705         return hci_dev_test_flag(hdev, HCI_PRIVACY);
706 }
707
708 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
709                                u16 window, u8 own_addr_type, u8 filter_policy,
710                                bool filter_dup, bool addr_resolv)
711 {
712         struct hci_dev *hdev = req->hdev;
713
714         if (hdev->scanning_paused) {
715                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
716                 return;
717         }
718
719         if (use_ll_privacy(hdev) &&
720             hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
721             addr_resolv) {
722                 u8 enable = 0x01;
723
724                 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
725         }
726
727         /* Use ext scanning if set ext scan param and ext scan enable is
728          * supported
729          */
730         if (use_ext_scan(hdev)) {
731                 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
732                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
733                 struct hci_cp_le_scan_phy_params *phy_params;
734                 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
735                 u32 plen;
736
737                 ext_param_cp = (void *)data;
738                 phy_params = (void *)ext_param_cp->data;
739
740                 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
741                 ext_param_cp->own_addr_type = own_addr_type;
742                 ext_param_cp->filter_policy = filter_policy;
743
744                 plen = sizeof(*ext_param_cp);
745
746                 if (scan_1m(hdev) || scan_2m(hdev)) {
747                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
748
749                         memset(phy_params, 0, sizeof(*phy_params));
750                         phy_params->type = type;
751                         phy_params->interval = cpu_to_le16(interval);
752                         phy_params->window = cpu_to_le16(window);
753
754                         plen += sizeof(*phy_params);
755                         phy_params++;
756                 }
757
758                 if (scan_coded(hdev)) {
759                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
760
761                         memset(phy_params, 0, sizeof(*phy_params));
762                         phy_params->type = type;
763                         phy_params->interval = cpu_to_le16(interval);
764                         phy_params->window = cpu_to_le16(window);
765
766                         plen += sizeof(*phy_params);
767                         phy_params++;
768                 }
769
770                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
771                             plen, ext_param_cp);
772
773                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
774                 ext_enable_cp.enable = LE_SCAN_ENABLE;
775                 ext_enable_cp.filter_dup = filter_dup;
776
777                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
778                             sizeof(ext_enable_cp), &ext_enable_cp);
779         } else {
780                 struct hci_cp_le_set_scan_param param_cp;
781                 struct hci_cp_le_set_scan_enable enable_cp;
782
783                 memset(&param_cp, 0, sizeof(param_cp));
784                 param_cp.type = type;
785                 param_cp.interval = cpu_to_le16(interval);
786                 param_cp.window = cpu_to_le16(window);
787                 param_cp.own_address_type = own_addr_type;
788                 param_cp.filter_policy = filter_policy;
789                 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
790                             &param_cp);
791
792                 memset(&enable_cp, 0, sizeof(enable_cp));
793                 enable_cp.enable = LE_SCAN_ENABLE;
794                 enable_cp.filter_dup = filter_dup;
795                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
796                             &enable_cp);
797         }
798 }
799
800 /* Returns true if an le connection is in the scanning state */
801 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
802 {
803         struct hci_conn_hash *h = &hdev->conn_hash;
804         struct hci_conn  *c;
805
806         rcu_read_lock();
807
808         list_for_each_entry_rcu(c, &h->list, list) {
809                 if (c->type == LE_LINK && c->state == BT_CONNECT &&
810                     test_bit(HCI_CONN_SCANNING, &c->flags)) {
811                         rcu_read_unlock();
812                         return true;
813                 }
814         }
815
816         rcu_read_unlock();
817
818         return false;
819 }
820
821 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
822  * controller based address resolution to be able to reconfigure
823  * resolving list.
824  */
825 void hci_req_add_le_passive_scan(struct hci_request *req)
826 {
827         struct hci_dev *hdev = req->hdev;
828         u8 own_addr_type;
829         u8 filter_policy;
830         u16 window, interval;
831         /* Default is to enable duplicates filter */
832         u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
833         /* Background scanning should run with address resolution */
834         bool addr_resolv = true;
835
836         if (hdev->scanning_paused) {
837                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
838                 return;
839         }
840
841         /* Set require_privacy to false since no SCAN_REQ are send
842          * during passive scanning. Not using an non-resolvable address
843          * here is important so that peer devices using direct
844          * advertising with our address will be correctly reported
845          * by the controller.
846          */
847         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
848                                       &own_addr_type))
849                 return;
850
851         if (hdev->enable_advmon_interleave_scan &&
852             __hci_update_interleaved_scan(hdev))
853                 return;
854
855         bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
856         /* Adding or removing entries from the accept list must
857          * happen before enabling scanning. The controller does
858          * not allow accept list modification while scanning.
859          */
860         filter_policy = update_accept_list(req);
861
862         /* When the controller is using random resolvable addresses and
863          * with that having LE privacy enabled, then controllers with
864          * Extended Scanner Filter Policies support can now enable support
865          * for handling directed advertising.
866          *
867          * So instead of using filter polices 0x00 (no accept list)
868          * and 0x01 (accept list enabled) use the new filter policies
869          * 0x02 (no accept list) and 0x03 (accept list enabled).
870          */
871         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
872             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
873                 filter_policy |= 0x02;
874
875         if (hdev->suspended) {
876                 window = hdev->le_scan_window_suspend;
877                 interval = hdev->le_scan_int_suspend;
878
879                 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
880         } else if (hci_is_le_conn_scanning(hdev)) {
881                 window = hdev->le_scan_window_connect;
882                 interval = hdev->le_scan_int_connect;
883         } else if (hci_is_adv_monitoring(hdev)) {
884                 window = hdev->le_scan_window_adv_monitor;
885                 interval = hdev->le_scan_int_adv_monitor;
886
887                 /* Disable duplicates filter when scanning for advertisement
888                  * monitor for the following reasons.
889                  *
890                  * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
891                  * controllers ignore RSSI_Sampling_Period when the duplicates
892                  * filter is enabled.
893                  *
894                  * For SW pattern filtering, when we're not doing interleaved
895                  * scanning, it is necessary to disable duplicates filter,
896                  * otherwise hosts can only receive one advertisement and it's
897                  * impossible to know if a peer is still in range.
898                  */
899                 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
900         } else {
901                 window = hdev->le_scan_window;
902                 interval = hdev->le_scan_interval;
903         }
904
905         bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
906                    filter_policy);
907         hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
908                            own_addr_type, filter_policy, filter_dup,
909                            addr_resolv);
910 }
911
912 static void hci_req_clear_event_filter(struct hci_request *req)
913 {
914         struct hci_cp_set_event_filter f;
915
916         if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
917                 return;
918
919         if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
920                 memset(&f, 0, sizeof(f));
921                 f.flt_type = HCI_FLT_CLEAR_ALL;
922                 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
923         }
924 }
925
926 static void hci_req_set_event_filter(struct hci_request *req)
927 {
928         struct bdaddr_list_with_flags *b;
929         struct hci_cp_set_event_filter f;
930         struct hci_dev *hdev = req->hdev;
931         u8 scan = SCAN_DISABLED;
932         bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
933
934         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
935                 return;
936
937         /* Always clear event filter when starting */
938         hci_req_clear_event_filter(req);
939
940         list_for_each_entry(b, &hdev->accept_list, list) {
941                 if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
942                                         b->current_flags))
943                         continue;
944
945                 memset(&f, 0, sizeof(f));
946                 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
947                 f.flt_type = HCI_FLT_CONN_SETUP;
948                 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
949                 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
950
951                 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
952                 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
953                 scan = SCAN_PAGE;
954         }
955
956         if (scan && !scanning) {
957                 set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
958                 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
959         } else if (!scan && scanning) {
960                 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
961                 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
962         }
963 }
964
965 static void cancel_adv_timeout(struct hci_dev *hdev)
966 {
967         if (hdev->adv_instance_timeout) {
968                 hdev->adv_instance_timeout = 0;
969                 cancel_delayed_work(&hdev->adv_instance_expire);
970         }
971 }
972
973 /* This function requires the caller holds hdev->lock */
974 void __hci_req_pause_adv_instances(struct hci_request *req)
975 {
976         bt_dev_dbg(req->hdev, "Pausing advertising instances");
977
978         /* Call to disable any advertisements active on the controller.
979          * This will succeed even if no advertisements are configured.
980          */
981         __hci_req_disable_advertising(req);
982
983         /* If we are using software rotation, pause the loop */
984         if (!ext_adv_capable(req->hdev))
985                 cancel_adv_timeout(req->hdev);
986 }
987
988 /* This function requires the caller holds hdev->lock */
989 static void __hci_req_resume_adv_instances(struct hci_request *req)
990 {
991         struct adv_info *adv;
992
993         bt_dev_dbg(req->hdev, "Resuming advertising instances");
994
995         if (ext_adv_capable(req->hdev)) {
996                 /* Call for each tracked instance to be re-enabled */
997                 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
998                         __hci_req_enable_ext_advertising(req,
999                                                          adv->instance);
1000                 }
1001
1002         } else {
1003                 /* Schedule for most recent instance to be restarted and begin
1004                  * the software rotation loop
1005                  */
1006                 __hci_req_schedule_adv_instance(req,
1007                                                 req->hdev->cur_adv_instance,
1008                                                 true);
1009         }
1010 }
1011
1012 /* This function requires the caller holds hdev->lock */
1013 int hci_req_resume_adv_instances(struct hci_dev *hdev)
1014 {
1015         struct hci_request req;
1016
1017         hci_req_init(&req, hdev);
1018         __hci_req_resume_adv_instances(&req);
1019
1020         return hci_req_run(&req, NULL);
1021 }
1022
1023 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1024 {
1025         bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1026                    status);
1027         if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1028             test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1029                 clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
1030                 clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1031                 wake_up(&hdev->suspend_wait_q);
1032         }
1033
1034         if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
1035                 clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1036                 wake_up(&hdev->suspend_wait_q);
1037         }
1038 }
1039
1040 static void hci_req_prepare_adv_monitor_suspend(struct hci_request *req,
1041                                                 bool suspending)
1042 {
1043         struct hci_dev *hdev = req->hdev;
1044
1045         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1046         case HCI_ADV_MONITOR_EXT_MSFT:
1047                 if (suspending)
1048                         msft_suspend(hdev);
1049                 else
1050                         msft_resume(hdev);
1051                 break;
1052         default:
1053                 return;
1054         }
1055
1056         /* No need to block when enabling since it's on resume path */
1057         if (hdev->suspended && suspending)
1058                 set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
1059 }
1060
1061 /* Call with hci_dev_lock */
1062 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1063 {
1064         int old_state;
1065         struct hci_conn *conn;
1066         struct hci_request req;
1067         u8 page_scan;
1068         int disconnect_counter;
1069
1070         if (next == hdev->suspend_state) {
1071                 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1072                 goto done;
1073         }
1074
1075         hdev->suspend_state = next;
1076         hci_req_init(&req, hdev);
1077
1078         if (next == BT_SUSPEND_DISCONNECT) {
1079                 /* Mark device as suspended */
1080                 hdev->suspended = true;
1081
1082                 /* Pause discovery if not already stopped */
1083                 old_state = hdev->discovery.state;
1084                 if (old_state != DISCOVERY_STOPPED) {
1085                         set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
1086                         hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1087                         queue_work(hdev->req_workqueue, &hdev->discov_update);
1088                 }
1089
1090                 hdev->discovery_paused = true;
1091                 hdev->discovery_old_state = old_state;
1092
1093                 /* Stop directed advertising */
1094                 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
1095                 if (old_state) {
1096                         set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
1097                         cancel_delayed_work(&hdev->discov_off);
1098                         queue_delayed_work(hdev->req_workqueue,
1099                                            &hdev->discov_off, 0);
1100                 }
1101
1102                 /* Pause other advertisements */
1103                 if (hdev->adv_instance_cnt)
1104                         __hci_req_pause_adv_instances(&req);
1105
1106                 hdev->advertising_paused = true;
1107                 hdev->advertising_old_state = old_state;
1108
1109                 /* Disable page scan if enabled */
1110                 if (test_bit(HCI_PSCAN, &hdev->flags)) {
1111                         page_scan = SCAN_DISABLED;
1112                         hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
1113                                     &page_scan);
1114                         set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1115                 }
1116
1117                 /* Disable LE passive scan if enabled */
1118                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1119                         cancel_interleave_scan(hdev);
1120                         hci_req_add_le_scan_disable(&req, false);
1121                 }
1122
1123                 /* Disable advertisement filters */
1124                 hci_req_prepare_adv_monitor_suspend(&req, true);
1125
1126                 /* Prevent disconnects from causing scanning to be re-enabled */
1127                 hdev->scanning_paused = true;
1128
1129                 /* Run commands before disconnecting */
1130                 hci_req_run(&req, suspend_req_complete);
1131
1132                 disconnect_counter = 0;
1133                 /* Soft disconnect everything (power off) */
1134                 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1135                         hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1136                         disconnect_counter++;
1137                 }
1138
1139                 if (disconnect_counter > 0) {
1140                         bt_dev_dbg(hdev,
1141                                    "Had %d disconnects. Will wait on them",
1142                                    disconnect_counter);
1143                         set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1144                 }
1145         } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
1146                 /* Unpause to take care of updating scanning params */
1147                 hdev->scanning_paused = false;
1148                 /* Enable event filter for paired devices */
1149                 hci_req_set_event_filter(&req);
1150                 /* Enable passive scan at lower duty cycle */
1151                 __hci_update_background_scan(&req);
1152                 /* Pause scan changes again. */
1153                 hdev->scanning_paused = true;
1154                 hci_req_run(&req, suspend_req_complete);
1155         } else {
1156                 hdev->suspended = false;
1157                 hdev->scanning_paused = false;
1158
1159                 /* Clear any event filters and restore scan state */
1160                 hci_req_clear_event_filter(&req);
1161                 __hci_req_update_scan(&req);
1162
1163                 /* Reset passive/background scanning to normal */
1164                 __hci_update_background_scan(&req);
1165                 /* Enable all of the advertisement filters */
1166                 hci_req_prepare_adv_monitor_suspend(&req, false);
1167
1168                 /* Unpause directed advertising */
1169                 hdev->advertising_paused = false;
1170                 if (hdev->advertising_old_state) {
1171                         set_bit(SUSPEND_UNPAUSE_ADVERTISING,
1172                                 hdev->suspend_tasks);
1173                         hci_dev_set_flag(hdev, HCI_ADVERTISING);
1174                         queue_work(hdev->req_workqueue,
1175                                    &hdev->discoverable_update);
1176                         hdev->advertising_old_state = 0;
1177                 }
1178
1179                 /* Resume other advertisements */
1180                 if (hdev->adv_instance_cnt)
1181                         __hci_req_resume_adv_instances(&req);
1182
1183                 /* Unpause discovery */
1184                 hdev->discovery_paused = false;
1185                 if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
1186                     hdev->discovery_old_state != DISCOVERY_STOPPING) {
1187                         set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
1188                         hci_discovery_set_state(hdev, DISCOVERY_STARTING);
1189                         queue_work(hdev->req_workqueue, &hdev->discov_update);
1190                 }
1191
1192                 hci_req_run(&req, suspend_req_complete);
1193         }
1194
1195         hdev->suspend_state = next;
1196
1197 done:
1198         clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1199         wake_up(&hdev->suspend_wait_q);
1200 }
1201
1202 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
1203 {
1204         return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
1205 }
1206
1207 void __hci_req_disable_advertising(struct hci_request *req)
1208 {
1209         if (ext_adv_capable(req->hdev)) {
1210                 __hci_req_disable_ext_adv_instance(req, 0x00);
1211
1212         } else {
1213                 u8 enable = 0x00;
1214
1215                 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1216         }
1217 }
1218
1219 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1220 {
1221         /* If privacy is not enabled don't use RPA */
1222         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1223                 return false;
1224
1225         /* If basic privacy mode is enabled use RPA */
1226         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1227                 return true;
1228
1229         /* If limited privacy mode is enabled don't use RPA if we're
1230          * both discoverable and bondable.
1231          */
1232         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1233             hci_dev_test_flag(hdev, HCI_BONDABLE))
1234                 return false;
1235
1236         /* We're neither bondable nor discoverable in the limited
1237          * privacy mode, therefore use RPA.
1238          */
1239         return true;
1240 }
1241
1242 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1243 {
1244         /* If there is no connection we are OK to advertise. */
1245         if (hci_conn_num(hdev, LE_LINK) == 0)
1246                 return true;
1247
1248         /* Check le_states if there is any connection in peripheral role. */
1249         if (hdev->conn_hash.le_num_peripheral > 0) {
1250                 /* Peripheral connection state and non connectable mode bit 20.
1251                  */
1252                 if (!connectable && !(hdev->le_states[2] & 0x10))
1253                         return false;
1254
1255                 /* Peripheral connection state and connectable mode bit 38
1256                  * and scannable bit 21.
1257                  */
1258                 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1259                                     !(hdev->le_states[2] & 0x20)))
1260                         return false;
1261         }
1262
1263         /* Check le_states if there is any connection in central role. */
1264         if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
1265                 /* Central connection state and non connectable mode bit 18. */
1266                 if (!connectable && !(hdev->le_states[2] & 0x02))
1267                         return false;
1268
1269                 /* Central connection state and connectable mode bit 35 and
1270                  * scannable 19.
1271                  */
1272                 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1273                                     !(hdev->le_states[2] & 0x08)))
1274                         return false;
1275         }
1276
1277         return true;
1278 }
1279
1280 void __hci_req_enable_advertising(struct hci_request *req)
1281 {
1282         struct hci_dev *hdev = req->hdev;
1283         struct adv_info *adv;
1284         struct hci_cp_le_set_adv_param cp;
1285         u8 own_addr_type, enable = 0x01;
1286         bool connectable;
1287         u16 adv_min_interval, adv_max_interval;
1288         u32 flags;
1289
1290         flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
1291         adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1292
1293         /* If the "connectable" instance flag was not set, then choose between
1294          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1295          */
1296         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1297                       mgmt_get_connectable(hdev);
1298
1299         if (!is_advertising_allowed(hdev, connectable))
1300                 return;
1301
1302         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1303                 __hci_req_disable_advertising(req);
1304
1305         /* Clear the HCI_LE_ADV bit temporarily so that the
1306          * hci_update_random_address knows that it's safe to go ahead
1307          * and write a new random address. The flag will be set back on
1308          * as soon as the SET_ADV_ENABLE HCI command completes.
1309          */
1310         hci_dev_clear_flag(hdev, HCI_LE_ADV);
1311
1312         /* Set require_privacy to true only when non-connectable
1313          * advertising is used. In that case it is fine to use a
1314          * non-resolvable private address.
1315          */
1316         if (hci_update_random_address(req, !connectable,
1317                                       adv_use_rpa(hdev, flags),
1318                                       &own_addr_type) < 0)
1319                 return;
1320
1321         memset(&cp, 0, sizeof(cp));
1322
1323         if (adv) {
1324                 adv_min_interval = adv->min_interval;
1325                 adv_max_interval = adv->max_interval;
1326         } else {
1327                 adv_min_interval = hdev->le_adv_min_interval;
1328                 adv_max_interval = hdev->le_adv_max_interval;
1329         }
1330
1331         if (connectable) {
1332                 cp.type = LE_ADV_IND;
1333         } else {
1334                 if (adv_cur_instance_is_scannable(hdev))
1335                         cp.type = LE_ADV_SCAN_IND;
1336                 else
1337                         cp.type = LE_ADV_NONCONN_IND;
1338
1339                 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1340                     hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1341                         adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1342                         adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1343                 }
1344         }
1345
1346         cp.min_interval = cpu_to_le16(adv_min_interval);
1347         cp.max_interval = cpu_to_le16(adv_max_interval);
1348         cp.own_address_type = own_addr_type;
1349         cp.channel_map = hdev->le_adv_channel_map;
1350
1351         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1352
1353         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1354 }
1355
1356 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1357 {
1358         struct hci_dev *hdev = req->hdev;
1359         u8 len;
1360
1361         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1362                 return;
1363
1364         if (ext_adv_capable(hdev)) {
1365                 struct {
1366                         struct hci_cp_le_set_ext_scan_rsp_data cp;
1367                         u8 data[HCI_MAX_EXT_AD_LENGTH];
1368                 } pdu;
1369
1370                 memset(&pdu, 0, sizeof(pdu));
1371
1372                 len = eir_create_scan_rsp(hdev, instance, pdu.data);
1373
1374                 if (hdev->scan_rsp_data_len == len &&
1375                     !memcmp(pdu.data, hdev->scan_rsp_data, len))
1376                         return;
1377
1378                 memcpy(hdev->scan_rsp_data, pdu.data, len);
1379                 hdev->scan_rsp_data_len = len;
1380
1381                 pdu.cp.handle = instance;
1382                 pdu.cp.length = len;
1383                 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1384                 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1385
1386                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1387                             sizeof(pdu.cp) + len, &pdu.cp);
1388         } else {
1389                 struct hci_cp_le_set_scan_rsp_data cp;
1390
1391                 memset(&cp, 0, sizeof(cp));
1392
1393                 len = eir_create_scan_rsp(hdev, instance, cp.data);
1394
1395                 if (hdev->scan_rsp_data_len == len &&
1396                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1397                         return;
1398
1399                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1400                 hdev->scan_rsp_data_len = len;
1401
1402                 cp.length = len;
1403
1404                 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1405         }
1406 }
1407
1408 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1409 {
1410         struct hci_dev *hdev = req->hdev;
1411         u8 len;
1412
1413         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1414                 return;
1415
1416         if (ext_adv_capable(hdev)) {
1417                 struct {
1418                         struct hci_cp_le_set_ext_adv_data cp;
1419                         u8 data[HCI_MAX_EXT_AD_LENGTH];
1420                 } pdu;
1421
1422                 memset(&pdu, 0, sizeof(pdu));
1423
1424                 len = eir_create_adv_data(hdev, instance, pdu.data);
1425
1426                 /* There's nothing to do if the data hasn't changed */
1427                 if (hdev->adv_data_len == len &&
1428                     memcmp(pdu.data, hdev->adv_data, len) == 0)
1429                         return;
1430
1431                 memcpy(hdev->adv_data, pdu.data, len);
1432                 hdev->adv_data_len = len;
1433
1434                 pdu.cp.length = len;
1435                 pdu.cp.handle = instance;
1436                 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1437                 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1438
1439                 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1440                             sizeof(pdu.cp) + len, &pdu.cp);
1441         } else {
1442                 struct hci_cp_le_set_adv_data cp;
1443
1444                 memset(&cp, 0, sizeof(cp));
1445
1446                 len = eir_create_adv_data(hdev, instance, cp.data);
1447
1448                 /* There's nothing to do if the data hasn't changed */
1449                 if (hdev->adv_data_len == len &&
1450                     memcmp(cp.data, hdev->adv_data, len) == 0)
1451                         return;
1452
1453                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1454                 hdev->adv_data_len = len;
1455
1456                 cp.length = len;
1457
1458                 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1459         }
1460 }
1461
1462 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1463 {
1464         struct hci_request req;
1465
1466         hci_req_init(&req, hdev);
1467         __hci_req_update_adv_data(&req, instance);
1468
1469         return hci_req_run(&req, NULL);
1470 }
1471
1472 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1473                                             u16 opcode)
1474 {
1475         BT_DBG("%s status %u", hdev->name, status);
1476 }
1477
1478 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1479 {
1480         struct hci_request req;
1481         __u8 enable = 0x00;
1482
1483         if (!use_ll_privacy(hdev) &&
1484             !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1485                 return;
1486
1487         hci_req_init(&req, hdev);
1488
1489         hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1490
1491         hci_req_run(&req, enable_addr_resolution_complete);
1492 }
1493
1494 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1495 {
1496         bt_dev_dbg(hdev, "status %u", status);
1497 }
1498
1499 void hci_req_reenable_advertising(struct hci_dev *hdev)
1500 {
1501         struct hci_request req;
1502
1503         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1504             list_empty(&hdev->adv_instances))
1505                 return;
1506
1507         hci_req_init(&req, hdev);
1508
1509         if (hdev->cur_adv_instance) {
1510                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1511                                                 true);
1512         } else {
1513                 if (ext_adv_capable(hdev)) {
1514                         __hci_req_start_ext_adv(&req, 0x00);
1515                 } else {
1516                         __hci_req_update_adv_data(&req, 0x00);
1517                         __hci_req_update_scan_rsp_data(&req, 0x00);
1518                         __hci_req_enable_advertising(&req);
1519                 }
1520         }
1521
1522         hci_req_run(&req, adv_enable_complete);
1523 }
1524
1525 static void adv_timeout_expire(struct work_struct *work)
1526 {
1527         struct hci_dev *hdev = container_of(work, struct hci_dev,
1528                                             adv_instance_expire.work);
1529
1530         struct hci_request req;
1531         u8 instance;
1532
1533         bt_dev_dbg(hdev, "");
1534
1535         hci_dev_lock(hdev);
1536
1537         hdev->adv_instance_timeout = 0;
1538
1539         instance = hdev->cur_adv_instance;
1540         if (instance == 0x00)
1541                 goto unlock;
1542
1543         hci_req_init(&req, hdev);
1544
1545         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1546
1547         if (list_empty(&hdev->adv_instances))
1548                 __hci_req_disable_advertising(&req);
1549
1550         hci_req_run(&req, NULL);
1551
1552 unlock:
1553         hci_dev_unlock(hdev);
1554 }
1555
1556 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1557                                            unsigned long opt)
1558 {
1559         struct hci_dev *hdev = req->hdev;
1560         int ret = 0;
1561
1562         hci_dev_lock(hdev);
1563
1564         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1565                 hci_req_add_le_scan_disable(req, false);
1566         hci_req_add_le_passive_scan(req);
1567
1568         switch (hdev->interleave_scan_state) {
1569         case INTERLEAVE_SCAN_ALLOWLIST:
1570                 bt_dev_dbg(hdev, "next state: allowlist");
1571                 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1572                 break;
1573         case INTERLEAVE_SCAN_NO_FILTER:
1574                 bt_dev_dbg(hdev, "next state: no filter");
1575                 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1576                 break;
1577         case INTERLEAVE_SCAN_NONE:
1578                 BT_ERR("unexpected error");
1579                 ret = -1;
1580         }
1581
1582         hci_dev_unlock(hdev);
1583
1584         return ret;
1585 }
1586
1587 static void interleave_scan_work(struct work_struct *work)
1588 {
1589         struct hci_dev *hdev = container_of(work, struct hci_dev,
1590                                             interleave_scan.work);
1591         u8 status;
1592         unsigned long timeout;
1593
1594         if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1595                 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1596         } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1597                 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1598         } else {
1599                 bt_dev_err(hdev, "unexpected error");
1600                 return;
1601         }
1602
1603         hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1604                      HCI_CMD_TIMEOUT, &status);
1605
1606         /* Don't continue interleaving if it was canceled */
1607         if (is_interleave_scanning(hdev))
1608                 queue_delayed_work(hdev->req_workqueue,
1609                                    &hdev->interleave_scan, timeout);
1610 }
1611
1612 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1613                            bool use_rpa, struct adv_info *adv_instance,
1614                            u8 *own_addr_type, bdaddr_t *rand_addr)
1615 {
1616         int err;
1617
1618         bacpy(rand_addr, BDADDR_ANY);
1619
1620         /* If privacy is enabled use a resolvable private address. If
1621          * current RPA has expired then generate a new one.
1622          */
1623         if (use_rpa) {
1624                 /* If Controller supports LL Privacy use own address type is
1625                  * 0x03
1626                  */
1627                 if (use_ll_privacy(hdev) &&
1628                     hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
1629                         *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1630                 else
1631                         *own_addr_type = ADDR_LE_DEV_RANDOM;
1632
1633                 if (adv_instance) {
1634                         if (adv_rpa_valid(adv_instance))
1635                                 return 0;
1636                 } else {
1637                         if (rpa_valid(hdev))
1638                                 return 0;
1639                 }
1640
1641                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1642                 if (err < 0) {
1643                         bt_dev_err(hdev, "failed to generate new RPA");
1644                         return err;
1645                 }
1646
1647                 bacpy(rand_addr, &hdev->rpa);
1648
1649                 return 0;
1650         }
1651
1652         /* In case of required privacy without resolvable private address,
1653          * use an non-resolvable private address. This is useful for
1654          * non-connectable advertising.
1655          */
1656         if (require_privacy) {
1657                 bdaddr_t nrpa;
1658
1659                 while (true) {
1660                         /* The non-resolvable private address is generated
1661                          * from random six bytes with the two most significant
1662                          * bits cleared.
1663                          */
1664                         get_random_bytes(&nrpa, 6);
1665                         nrpa.b[5] &= 0x3f;
1666
1667                         /* The non-resolvable private address shall not be
1668                          * equal to the public address.
1669                          */
1670                         if (bacmp(&hdev->bdaddr, &nrpa))
1671                                 break;
1672                 }
1673
1674                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1675                 bacpy(rand_addr, &nrpa);
1676
1677                 return 0;
1678         }
1679
1680         /* No privacy so use a public address. */
1681         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1682
1683         return 0;
1684 }
1685
1686 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1687 {
1688         hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1689 }
1690
1691 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1692 {
1693         struct hci_dev *hdev = req->hdev;
1694
1695         /* If we're advertising or initiating an LE connection we can't
1696          * go ahead and change the random address at this time. This is
1697          * because the eventual initiator address used for the
1698          * subsequently created connection will be undefined (some
1699          * controllers use the new address and others the one we had
1700          * when the operation started).
1701          *
1702          * In this kind of scenario skip the update and let the random
1703          * address be updated at the next cycle.
1704          */
1705         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1706             hci_lookup_le_connect(hdev)) {
1707                 bt_dev_dbg(hdev, "Deferring random address update");
1708                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1709                 return;
1710         }
1711
1712         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1713 }
1714
1715 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1716 {
1717         struct hci_cp_le_set_ext_adv_params cp;
1718         struct hci_dev *hdev = req->hdev;
1719         bool connectable;
1720         u32 flags;
1721         bdaddr_t random_addr;
1722         u8 own_addr_type;
1723         int err;
1724         struct adv_info *adv_instance;
1725         bool secondary_adv;
1726
1727         if (instance > 0) {
1728                 adv_instance = hci_find_adv_instance(hdev, instance);
1729                 if (!adv_instance)
1730                         return -EINVAL;
1731         } else {
1732                 adv_instance = NULL;
1733         }
1734
1735         flags = hci_adv_instance_flags(hdev, instance);
1736
1737         /* If the "connectable" instance flag was not set, then choose between
1738          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1739          */
1740         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1741                       mgmt_get_connectable(hdev);
1742
1743         if (!is_advertising_allowed(hdev, connectable))
1744                 return -EPERM;
1745
1746         /* Set require_privacy to true only when non-connectable
1747          * advertising is used. In that case it is fine to use a
1748          * non-resolvable private address.
1749          */
1750         err = hci_get_random_address(hdev, !connectable,
1751                                      adv_use_rpa(hdev, flags), adv_instance,
1752                                      &own_addr_type, &random_addr);
1753         if (err < 0)
1754                 return err;
1755
1756         memset(&cp, 0, sizeof(cp));
1757
1758         if (adv_instance) {
1759                 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
1760                 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
1761                 cp.tx_power = adv_instance->tx_power;
1762         } else {
1763                 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1764                 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1765                 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1766         }
1767
1768         secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1769
1770         if (connectable) {
1771                 if (secondary_adv)
1772                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1773                 else
1774                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1775         } else if (hci_adv_instance_is_scannable(hdev, instance) ||
1776                    (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
1777                 if (secondary_adv)
1778                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1779                 else
1780                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1781         } else {
1782                 if (secondary_adv)
1783                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1784                 else
1785                         cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1786         }
1787
1788         cp.own_addr_type = own_addr_type;
1789         cp.channel_map = hdev->le_adv_channel_map;
1790         cp.handle = instance;
1791
1792         if (flags & MGMT_ADV_FLAG_SEC_2M) {
1793                 cp.primary_phy = HCI_ADV_PHY_1M;
1794                 cp.secondary_phy = HCI_ADV_PHY_2M;
1795         } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1796                 cp.primary_phy = HCI_ADV_PHY_CODED;
1797                 cp.secondary_phy = HCI_ADV_PHY_CODED;
1798         } else {
1799                 /* In all other cases use 1M */
1800                 cp.primary_phy = HCI_ADV_PHY_1M;
1801                 cp.secondary_phy = HCI_ADV_PHY_1M;
1802         }
1803
1804         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1805
1806         if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1807             bacmp(&random_addr, BDADDR_ANY)) {
1808                 struct hci_cp_le_set_adv_set_rand_addr cp;
1809
1810                 /* Check if random address need to be updated */
1811                 if (adv_instance) {
1812                         if (!bacmp(&random_addr, &adv_instance->random_addr))
1813                                 return 0;
1814                 } else {
1815                         if (!bacmp(&random_addr, &hdev->random_addr))
1816                                 return 0;
1817                         /* Instance 0x00 doesn't have an adv_info, instead it
1818                          * uses hdev->random_addr to track its address so
1819                          * whenever it needs to be updated this also set the
1820                          * random address since hdev->random_addr is shared with
1821                          * scan state machine.
1822                          */
1823                         set_random_addr(req, &random_addr);
1824                 }
1825
1826                 memset(&cp, 0, sizeof(cp));
1827
1828                 cp.handle = instance;
1829                 bacpy(&cp.bdaddr, &random_addr);
1830
1831                 hci_req_add(req,
1832                             HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1833                             sizeof(cp), &cp);
1834         }
1835
1836         return 0;
1837 }
1838
1839 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1840 {
1841         struct hci_dev *hdev = req->hdev;
1842         struct hci_cp_le_set_ext_adv_enable *cp;
1843         struct hci_cp_ext_adv_set *adv_set;
1844         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1845         struct adv_info *adv_instance;
1846
1847         if (instance > 0) {
1848                 adv_instance = hci_find_adv_instance(hdev, instance);
1849                 if (!adv_instance)
1850                         return -EINVAL;
1851         } else {
1852                 adv_instance = NULL;
1853         }
1854
1855         cp = (void *) data;
1856         adv_set = (void *) cp->data;
1857
1858         memset(cp, 0, sizeof(*cp));
1859
1860         cp->enable = 0x01;
1861         cp->num_of_sets = 0x01;
1862
1863         memset(adv_set, 0, sizeof(*adv_set));
1864
1865         adv_set->handle = instance;
1866
1867         /* Set duration per instance since controller is responsible for
1868          * scheduling it.
1869          */
1870         if (adv_instance && adv_instance->duration) {
1871                 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
1872
1873                 /* Time = N * 10 ms */
1874                 adv_set->duration = cpu_to_le16(duration / 10);
1875         }
1876
1877         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1878                     sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1879                     data);
1880
1881         return 0;
1882 }
1883
1884 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
1885 {
1886         struct hci_dev *hdev = req->hdev;
1887         struct hci_cp_le_set_ext_adv_enable *cp;
1888         struct hci_cp_ext_adv_set *adv_set;
1889         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1890         u8 req_size;
1891
1892         /* If request specifies an instance that doesn't exist, fail */
1893         if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1894                 return -EINVAL;
1895
1896         memset(data, 0, sizeof(data));
1897
1898         cp = (void *)data;
1899         adv_set = (void *)cp->data;
1900
1901         /* Instance 0x00 indicates all advertising instances will be disabled */
1902         cp->num_of_sets = !!instance;
1903         cp->enable = 0x00;
1904
1905         adv_set->handle = instance;
1906
1907         req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
1908         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
1909
1910         return 0;
1911 }
1912
1913 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
1914 {
1915         struct hci_dev *hdev = req->hdev;
1916
1917         /* If request specifies an instance that doesn't exist, fail */
1918         if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1919                 return -EINVAL;
1920
1921         hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
1922
1923         return 0;
1924 }
1925
1926 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1927 {
1928         struct hci_dev *hdev = req->hdev;
1929         struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
1930         int err;
1931
1932         /* If instance isn't pending, the chip knows about it, and it's safe to
1933          * disable
1934          */
1935         if (adv_instance && !adv_instance->pending)
1936                 __hci_req_disable_ext_adv_instance(req, instance);
1937
1938         err = __hci_req_setup_ext_adv_instance(req, instance);
1939         if (err < 0)
1940                 return err;
1941
1942         __hci_req_update_scan_rsp_data(req, instance);
1943         __hci_req_enable_ext_advertising(req, instance);
1944
1945         return 0;
1946 }
1947
1948 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1949                                     bool force)
1950 {
1951         struct hci_dev *hdev = req->hdev;
1952         struct adv_info *adv_instance = NULL;
1953         u16 timeout;
1954
1955         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1956             list_empty(&hdev->adv_instances))
1957                 return -EPERM;
1958
1959         if (hdev->adv_instance_timeout)
1960                 return -EBUSY;
1961
1962         adv_instance = hci_find_adv_instance(hdev, instance);
1963         if (!adv_instance)
1964                 return -ENOENT;
1965
1966         /* A zero timeout means unlimited advertising. As long as there is
1967          * only one instance, duration should be ignored. We still set a timeout
1968          * in case further instances are being added later on.
1969          *
1970          * If the remaining lifetime of the instance is more than the duration
1971          * then the timeout corresponds to the duration, otherwise it will be
1972          * reduced to the remaining instance lifetime.
1973          */
1974         if (adv_instance->timeout == 0 ||
1975             adv_instance->duration <= adv_instance->remaining_time)
1976                 timeout = adv_instance->duration;
1977         else
1978                 timeout = adv_instance->remaining_time;
1979
1980         /* The remaining time is being reduced unless the instance is being
1981          * advertised without time limit.
1982          */
1983         if (adv_instance->timeout)
1984                 adv_instance->remaining_time =
1985                                 adv_instance->remaining_time - timeout;
1986
1987         /* Only use work for scheduling instances with legacy advertising */
1988         if (!ext_adv_capable(hdev)) {
1989                 hdev->adv_instance_timeout = timeout;
1990                 queue_delayed_work(hdev->req_workqueue,
1991                            &hdev->adv_instance_expire,
1992                            msecs_to_jiffies(timeout * 1000));
1993         }
1994
1995         /* If we're just re-scheduling the same instance again then do not
1996          * execute any HCI commands. This happens when a single instance is
1997          * being advertised.
1998          */
1999         if (!force && hdev->cur_adv_instance == instance &&
2000             hci_dev_test_flag(hdev, HCI_LE_ADV))
2001                 return 0;
2002
2003         hdev->cur_adv_instance = instance;
2004         if (ext_adv_capable(hdev)) {
2005                 __hci_req_start_ext_adv(req, instance);
2006         } else {
2007                 __hci_req_update_adv_data(req, instance);
2008                 __hci_req_update_scan_rsp_data(req, instance);
2009                 __hci_req_enable_advertising(req);
2010         }
2011
2012         return 0;
2013 }
2014
2015 /* For a single instance:
2016  * - force == true: The instance will be removed even when its remaining
2017  *   lifetime is not zero.
2018  * - force == false: the instance will be deactivated but kept stored unless
2019  *   the remaining lifetime is zero.
2020  *
2021  * For instance == 0x00:
2022  * - force == true: All instances will be removed regardless of their timeout
2023  *   setting.
2024  * - force == false: Only instances that have a timeout will be removed.
2025  */
2026 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
2027                                 struct hci_request *req, u8 instance,
2028                                 bool force)
2029 {
2030         struct adv_info *adv_instance, *n, *next_instance = NULL;
2031         int err;
2032         u8 rem_inst;
2033
2034         /* Cancel any timeout concerning the removed instance(s). */
2035         if (!instance || hdev->cur_adv_instance == instance)
2036                 cancel_adv_timeout(hdev);
2037
2038         /* Get the next instance to advertise BEFORE we remove
2039          * the current one. This can be the same instance again
2040          * if there is only one instance.
2041          */
2042         if (instance && hdev->cur_adv_instance == instance)
2043                 next_instance = hci_get_next_instance(hdev, instance);
2044
2045         if (instance == 0x00) {
2046                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2047                                          list) {
2048                         if (!(force || adv_instance->timeout))
2049                                 continue;
2050
2051                         rem_inst = adv_instance->instance;
2052                         err = hci_remove_adv_instance(hdev, rem_inst);
2053                         if (!err)
2054                                 mgmt_advertising_removed(sk, hdev, rem_inst);
2055                 }
2056         } else {
2057                 adv_instance = hci_find_adv_instance(hdev, instance);
2058
2059                 if (force || (adv_instance && adv_instance->timeout &&
2060                               !adv_instance->remaining_time)) {
2061                         /* Don't advertise a removed instance. */
2062                         if (next_instance &&
2063                             next_instance->instance == instance)
2064                                 next_instance = NULL;
2065
2066                         err = hci_remove_adv_instance(hdev, instance);
2067                         if (!err)
2068                                 mgmt_advertising_removed(sk, hdev, instance);
2069                 }
2070         }
2071
2072         if (!req || !hdev_is_powered(hdev) ||
2073             hci_dev_test_flag(hdev, HCI_ADVERTISING))
2074                 return;
2075
2076         if (next_instance && !ext_adv_capable(hdev))
2077                 __hci_req_schedule_adv_instance(req, next_instance->instance,
2078                                                 false);
2079 }
2080
2081 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2082                               bool use_rpa, u8 *own_addr_type)
2083 {
2084         struct hci_dev *hdev = req->hdev;
2085         int err;
2086
2087         /* If privacy is enabled use a resolvable private address. If
2088          * current RPA has expired or there is something else than
2089          * the current RPA in use, then generate a new one.
2090          */
2091         if (use_rpa) {
2092                 /* If Controller supports LL Privacy use own address type is
2093                  * 0x03
2094                  */
2095                 if (use_ll_privacy(hdev) &&
2096                     hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
2097                         *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
2098                 else
2099                         *own_addr_type = ADDR_LE_DEV_RANDOM;
2100
2101                 if (rpa_valid(hdev))
2102                         return 0;
2103
2104                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2105                 if (err < 0) {
2106                         bt_dev_err(hdev, "failed to generate new RPA");
2107                         return err;
2108                 }
2109
2110                 set_random_addr(req, &hdev->rpa);
2111
2112                 return 0;
2113         }
2114
2115         /* In case of required privacy without resolvable private address,
2116          * use an non-resolvable private address. This is useful for active
2117          * scanning and non-connectable advertising.
2118          */
2119         if (require_privacy) {
2120                 bdaddr_t nrpa;
2121
2122                 while (true) {
2123                         /* The non-resolvable private address is generated
2124                          * from random six bytes with the two most significant
2125                          * bits cleared.
2126                          */
2127                         get_random_bytes(&nrpa, 6);
2128                         nrpa.b[5] &= 0x3f;
2129
2130                         /* The non-resolvable private address shall not be
2131                          * equal to the public address.
2132                          */
2133                         if (bacmp(&hdev->bdaddr, &nrpa))
2134                                 break;
2135                 }
2136
2137                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2138                 set_random_addr(req, &nrpa);
2139                 return 0;
2140         }
2141
2142         /* If forcing static address is in use or there is no public
2143          * address use the static address as random address (but skip
2144          * the HCI command if the current random address is already the
2145          * static one.
2146          *
2147          * In case BR/EDR has been disabled on a dual-mode controller
2148          * and a static address has been configured, then use that
2149          * address instead of the public BR/EDR address.
2150          */
2151         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2152             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2153             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2154              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2155                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2156                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2157                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2158                                     &hdev->static_addr);
2159                 return 0;
2160         }
2161
2162         /* Neither privacy nor static address is being used so use a
2163          * public address.
2164          */
2165         *own_addr_type = ADDR_LE_DEV_PUBLIC;
2166
2167         return 0;
2168 }
2169
2170 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
2171 {
2172         struct bdaddr_list *b;
2173
2174         list_for_each_entry(b, &hdev->accept_list, list) {
2175                 struct hci_conn *conn;
2176
2177                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2178                 if (!conn)
2179                         return true;
2180
2181                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2182                         return true;
2183         }
2184
2185         return false;
2186 }
2187
2188 void __hci_req_update_scan(struct hci_request *req)
2189 {
2190         struct hci_dev *hdev = req->hdev;
2191         u8 scan;
2192
2193         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2194                 return;
2195
2196         if (!hdev_is_powered(hdev))
2197                 return;
2198
2199         if (mgmt_powering_down(hdev))
2200                 return;
2201
2202         if (hdev->scanning_paused)
2203                 return;
2204
2205         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2206             disconnected_accept_list_entries(hdev))
2207                 scan = SCAN_PAGE;
2208         else
2209                 scan = SCAN_DISABLED;
2210
2211         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2212                 scan |= SCAN_INQUIRY;
2213
2214         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2215             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2216                 return;
2217
2218         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2219 }
2220
2221 static int update_scan(struct hci_request *req, unsigned long opt)
2222 {
2223         hci_dev_lock(req->hdev);
2224         __hci_req_update_scan(req);
2225         hci_dev_unlock(req->hdev);
2226         return 0;
2227 }
2228
2229 static void scan_update_work(struct work_struct *work)
2230 {
2231         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2232
2233         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2234 }
2235
2236 static int connectable_update(struct hci_request *req, unsigned long opt)
2237 {
2238         struct hci_dev *hdev = req->hdev;
2239
2240         hci_dev_lock(hdev);
2241
2242         __hci_req_update_scan(req);
2243
2244         /* If BR/EDR is not enabled and we disable advertising as a
2245          * by-product of disabling connectable, we need to update the
2246          * advertising flags.
2247          */
2248         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2249                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2250
2251         /* Update the advertising parameters if necessary */
2252         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2253             !list_empty(&hdev->adv_instances)) {
2254                 if (ext_adv_capable(hdev))
2255                         __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2256                 else
2257                         __hci_req_enable_advertising(req);
2258         }
2259
2260         __hci_update_background_scan(req);
2261
2262         hci_dev_unlock(hdev);
2263
2264         return 0;
2265 }
2266
2267 static void connectable_update_work(struct work_struct *work)
2268 {
2269         struct hci_dev *hdev = container_of(work, struct hci_dev,
2270                                             connectable_update);
2271         u8 status;
2272
2273         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2274         mgmt_set_connectable_complete(hdev, status);
2275 }
2276
2277 static u8 get_service_classes(struct hci_dev *hdev)
2278 {
2279         struct bt_uuid *uuid;
2280         u8 val = 0;
2281
2282         list_for_each_entry(uuid, &hdev->uuids, list)
2283                 val |= uuid->svc_hint;
2284
2285         return val;
2286 }
2287
2288 void __hci_req_update_class(struct hci_request *req)
2289 {
2290         struct hci_dev *hdev = req->hdev;
2291         u8 cod[3];
2292
2293         bt_dev_dbg(hdev, "");
2294
2295         if (!hdev_is_powered(hdev))
2296                 return;
2297
2298         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2299                 return;
2300
2301         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2302                 return;
2303
2304         cod[0] = hdev->minor_class;
2305         cod[1] = hdev->major_class;
2306         cod[2] = get_service_classes(hdev);
2307
2308         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2309                 cod[1] |= 0x20;
2310
2311         if (memcmp(cod, hdev->dev_class, 3) == 0)
2312                 return;
2313
2314         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2315 }
2316
2317 static void write_iac(struct hci_request *req)
2318 {
2319         struct hci_dev *hdev = req->hdev;
2320         struct hci_cp_write_current_iac_lap cp;
2321
2322         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2323                 return;
2324
2325         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2326                 /* Limited discoverable mode */
2327                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2328                 cp.iac_lap[0] = 0x00;   /* LIAC */
2329                 cp.iac_lap[1] = 0x8b;
2330                 cp.iac_lap[2] = 0x9e;
2331                 cp.iac_lap[3] = 0x33;   /* GIAC */
2332                 cp.iac_lap[4] = 0x8b;
2333                 cp.iac_lap[5] = 0x9e;
2334         } else {
2335                 /* General discoverable mode */
2336                 cp.num_iac = 1;
2337                 cp.iac_lap[0] = 0x33;   /* GIAC */
2338                 cp.iac_lap[1] = 0x8b;
2339                 cp.iac_lap[2] = 0x9e;
2340         }
2341
2342         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2343                     (cp.num_iac * 3) + 1, &cp);
2344 }
2345
2346 static int discoverable_update(struct hci_request *req, unsigned long opt)
2347 {
2348         struct hci_dev *hdev = req->hdev;
2349
2350         hci_dev_lock(hdev);
2351
2352         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2353                 write_iac(req);
2354                 __hci_req_update_scan(req);
2355                 __hci_req_update_class(req);
2356         }
2357
2358         /* Advertising instances don't use the global discoverable setting, so
2359          * only update AD if advertising was enabled using Set Advertising.
2360          */
2361         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2362                 __hci_req_update_adv_data(req, 0x00);
2363
2364                 /* Discoverable mode affects the local advertising
2365                  * address in limited privacy mode.
2366                  */
2367                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2368                         if (ext_adv_capable(hdev))
2369                                 __hci_req_start_ext_adv(req, 0x00);
2370                         else
2371                                 __hci_req_enable_advertising(req);
2372                 }
2373         }
2374
2375         hci_dev_unlock(hdev);
2376
2377         return 0;
2378 }
2379
2380 static void discoverable_update_work(struct work_struct *work)
2381 {
2382         struct hci_dev *hdev = container_of(work, struct hci_dev,
2383                                             discoverable_update);
2384         u8 status;
2385
2386         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2387         mgmt_set_discoverable_complete(hdev, status);
2388 }
2389
2390 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2391                       u8 reason)
2392 {
2393         switch (conn->state) {
2394         case BT_CONNECTED:
2395         case BT_CONFIG:
2396                 if (conn->type == AMP_LINK) {
2397                         struct hci_cp_disconn_phy_link cp;
2398
2399                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2400                         cp.reason = reason;
2401                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2402                                     &cp);
2403                 } else {
2404                         struct hci_cp_disconnect dc;
2405
2406                         dc.handle = cpu_to_le16(conn->handle);
2407                         dc.reason = reason;
2408                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2409                 }
2410
2411                 conn->state = BT_DISCONN;
2412
2413                 break;
2414         case BT_CONNECT:
2415                 if (conn->type == LE_LINK) {
2416                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2417                                 break;
2418                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2419                                     0, NULL);
2420                 } else if (conn->type == ACL_LINK) {
2421                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2422                                 break;
2423                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2424                                     6, &conn->dst);
2425                 }
2426                 break;
2427         case BT_CONNECT2:
2428                 if (conn->type == ACL_LINK) {
2429                         struct hci_cp_reject_conn_req rej;
2430
2431                         bacpy(&rej.bdaddr, &conn->dst);
2432                         rej.reason = reason;
2433
2434                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2435                                     sizeof(rej), &rej);
2436                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2437                         struct hci_cp_reject_sync_conn_req rej;
2438
2439                         bacpy(&rej.bdaddr, &conn->dst);
2440
2441                         /* SCO rejection has its own limited set of
2442                          * allowed error values (0x0D-0x0F) which isn't
2443                          * compatible with most values passed to this
2444                          * function. To be safe hard-code one of the
2445                          * values that's suitable for SCO.
2446                          */
2447                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2448
2449                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2450                                     sizeof(rej), &rej);
2451                 }
2452                 break;
2453         default:
2454                 conn->state = BT_CLOSED;
2455                 break;
2456         }
2457 }
2458
2459 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2460 {
2461         if (status)
2462                 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2463 }
2464
2465 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2466 {
2467         struct hci_request req;
2468         int err;
2469
2470         hci_req_init(&req, conn->hdev);
2471
2472         __hci_abort_conn(&req, conn, reason);
2473
2474         err = hci_req_run(&req, abort_conn_complete);
2475         if (err && err != -ENODATA) {
2476                 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2477                 return err;
2478         }
2479
2480         return 0;
2481 }
2482
2483 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2484 {
2485         hci_dev_lock(req->hdev);
2486         __hci_update_background_scan(req);
2487         hci_dev_unlock(req->hdev);
2488         return 0;
2489 }
2490
2491 static void bg_scan_update(struct work_struct *work)
2492 {
2493         struct hci_dev *hdev = container_of(work, struct hci_dev,
2494                                             bg_scan_update);
2495         struct hci_conn *conn;
2496         u8 status;
2497         int err;
2498
2499         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2500         if (!err)
2501                 return;
2502
2503         hci_dev_lock(hdev);
2504
2505         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2506         if (conn)
2507                 hci_le_conn_failed(conn, status);
2508
2509         hci_dev_unlock(hdev);
2510 }
2511
2512 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2513 {
2514         hci_req_add_le_scan_disable(req, false);
2515         return 0;
2516 }
2517
2518 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2519 {
2520         u8 length = opt;
2521         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2522         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2523         struct hci_cp_inquiry cp;
2524
2525         if (test_bit(HCI_INQUIRY, &req->hdev->flags))
2526                 return 0;
2527
2528         bt_dev_dbg(req->hdev, "");
2529
2530         hci_dev_lock(req->hdev);
2531         hci_inquiry_cache_flush(req->hdev);
2532         hci_dev_unlock(req->hdev);
2533
2534         memset(&cp, 0, sizeof(cp));
2535
2536         if (req->hdev->discovery.limited)
2537                 memcpy(&cp.lap, liac, sizeof(cp.lap));
2538         else
2539                 memcpy(&cp.lap, giac, sizeof(cp.lap));
2540
2541         cp.length = length;
2542
2543         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2544
2545         return 0;
2546 }
2547
2548 static void le_scan_disable_work(struct work_struct *work)
2549 {
2550         struct hci_dev *hdev = container_of(work, struct hci_dev,
2551                                             le_scan_disable.work);
2552         u8 status;
2553
2554         bt_dev_dbg(hdev, "");
2555
2556         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2557                 return;
2558
2559         cancel_delayed_work(&hdev->le_scan_restart);
2560
2561         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2562         if (status) {
2563                 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2564                            status);
2565                 return;
2566         }
2567
2568         hdev->discovery.scan_start = 0;
2569
2570         /* If we were running LE only scan, change discovery state. If
2571          * we were running both LE and BR/EDR inquiry simultaneously,
2572          * and BR/EDR inquiry is already finished, stop discovery,
2573          * otherwise BR/EDR inquiry will stop discovery when finished.
2574          * If we will resolve remote device name, do not change
2575          * discovery state.
2576          */
2577
2578         if (hdev->discovery.type == DISCOV_TYPE_LE)
2579                 goto discov_stopped;
2580
2581         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2582                 return;
2583
2584         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2585                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2586                     hdev->discovery.state != DISCOVERY_RESOLVING)
2587                         goto discov_stopped;
2588
2589                 return;
2590         }
2591
2592         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2593                      HCI_CMD_TIMEOUT, &status);
2594         if (status) {
2595                 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2596                 goto discov_stopped;
2597         }
2598
2599         return;
2600
2601 discov_stopped:
2602         hci_dev_lock(hdev);
2603         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2604         hci_dev_unlock(hdev);
2605 }
2606
2607 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2608 {
2609         struct hci_dev *hdev = req->hdev;
2610
2611         /* If controller is not scanning we are done. */
2612         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2613                 return 0;
2614
2615         if (hdev->scanning_paused) {
2616                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2617                 return 0;
2618         }
2619
2620         hci_req_add_le_scan_disable(req, false);
2621
2622         if (use_ext_scan(hdev)) {
2623                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2624
2625                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2626                 ext_enable_cp.enable = LE_SCAN_ENABLE;
2627                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2628
2629                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2630                             sizeof(ext_enable_cp), &ext_enable_cp);
2631         } else {
2632                 struct hci_cp_le_set_scan_enable cp;
2633
2634                 memset(&cp, 0, sizeof(cp));
2635                 cp.enable = LE_SCAN_ENABLE;
2636                 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2637                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2638         }
2639
2640         return 0;
2641 }
2642
2643 static void le_scan_restart_work(struct work_struct *work)
2644 {
2645         struct hci_dev *hdev = container_of(work, struct hci_dev,
2646                                             le_scan_restart.work);
2647         unsigned long timeout, duration, scan_start, now;
2648         u8 status;
2649
2650         bt_dev_dbg(hdev, "");
2651
2652         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2653         if (status) {
2654                 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2655                            status);
2656                 return;
2657         }
2658
2659         hci_dev_lock(hdev);
2660
2661         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2662             !hdev->discovery.scan_start)
2663                 goto unlock;
2664
2665         /* When the scan was started, hdev->le_scan_disable has been queued
2666          * after duration from scan_start. During scan restart this job
2667          * has been canceled, and we need to queue it again after proper
2668          * timeout, to make sure that scan does not run indefinitely.
2669          */
2670         duration = hdev->discovery.scan_duration;
2671         scan_start = hdev->discovery.scan_start;
2672         now = jiffies;
2673         if (now - scan_start <= duration) {
2674                 int elapsed;
2675
2676                 if (now >= scan_start)
2677                         elapsed = now - scan_start;
2678                 else
2679                         elapsed = ULONG_MAX - scan_start + now;
2680
2681                 timeout = duration - elapsed;
2682         } else {
2683                 timeout = 0;
2684         }
2685
2686         queue_delayed_work(hdev->req_workqueue,
2687                            &hdev->le_scan_disable, timeout);
2688
2689 unlock:
2690         hci_dev_unlock(hdev);
2691 }
2692
2693 static int active_scan(struct hci_request *req, unsigned long opt)
2694 {
2695         uint16_t interval = opt;
2696         struct hci_dev *hdev = req->hdev;
2697         u8 own_addr_type;
2698         /* Accept list is not used for discovery */
2699         u8 filter_policy = 0x00;
2700         /* Default is to enable duplicates filter */
2701         u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2702         /* Discovery doesn't require controller address resolution */
2703         bool addr_resolv = false;
2704         int err;
2705
2706         bt_dev_dbg(hdev, "");
2707
2708         /* If controller is scanning, it means the background scanning is
2709          * running. Thus, we should temporarily stop it in order to set the
2710          * discovery scanning parameters.
2711          */
2712         if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2713                 hci_req_add_le_scan_disable(req, false);
2714                 cancel_interleave_scan(hdev);
2715         }
2716
2717         /* All active scans will be done with either a resolvable private
2718          * address (when privacy feature has been enabled) or non-resolvable
2719          * private address.
2720          */
2721         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2722                                         &own_addr_type);
2723         if (err < 0)
2724                 own_addr_type = ADDR_LE_DEV_PUBLIC;
2725
2726         if (hci_is_adv_monitoring(hdev)) {
2727                 /* Duplicate filter should be disabled when some advertisement
2728                  * monitor is activated, otherwise AdvMon can only receive one
2729                  * advertisement for one peer(*) during active scanning, and
2730                  * might report loss to these peers.
2731                  *
2732                  * Note that different controllers have different meanings of
2733                  * |duplicate|. Some of them consider packets with the same
2734                  * address as duplicate, and others consider packets with the
2735                  * same address and the same RSSI as duplicate. Although in the
2736                  * latter case we don't need to disable duplicate filter, but
2737                  * it is common to have active scanning for a short period of
2738                  * time, the power impact should be neglectable.
2739                  */
2740                 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2741         }
2742
2743         hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2744                            hdev->le_scan_window_discovery, own_addr_type,
2745                            filter_policy, filter_dup, addr_resolv);
2746         return 0;
2747 }
2748
2749 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2750 {
2751         int err;
2752
2753         bt_dev_dbg(req->hdev, "");
2754
2755         err = active_scan(req, opt);
2756         if (err)
2757                 return err;
2758
2759         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2760 }
2761
2762 static void start_discovery(struct hci_dev *hdev, u8 *status)
2763 {
2764         unsigned long timeout;
2765
2766         bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
2767
2768         switch (hdev->discovery.type) {
2769         case DISCOV_TYPE_BREDR:
2770                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2771                         hci_req_sync(hdev, bredr_inquiry,
2772                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2773                                      status);
2774                 return;
2775         case DISCOV_TYPE_INTERLEAVED:
2776                 /* When running simultaneous discovery, the LE scanning time
2777                  * should occupy the whole discovery time sine BR/EDR inquiry
2778                  * and LE scanning are scheduled by the controller.
2779                  *
2780                  * For interleaving discovery in comparison, BR/EDR inquiry
2781                  * and LE scanning are done sequentially with separate
2782                  * timeouts.
2783                  */
2784                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2785                              &hdev->quirks)) {
2786                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2787                         /* During simultaneous discovery, we double LE scan
2788                          * interval. We must leave some time for the controller
2789                          * to do BR/EDR inquiry.
2790                          */
2791                         hci_req_sync(hdev, interleaved_discov,
2792                                      hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
2793                                      status);
2794                         break;
2795                 }
2796
2797                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2798                 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2799                              HCI_CMD_TIMEOUT, status);
2800                 break;
2801         case DISCOV_TYPE_LE:
2802                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2803                 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2804                              HCI_CMD_TIMEOUT, status);
2805                 break;
2806         default:
2807                 *status = HCI_ERROR_UNSPECIFIED;
2808                 return;
2809         }
2810
2811         if (*status)
2812                 return;
2813
2814         bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
2815
2816         /* When service discovery is used and the controller has a
2817          * strict duplicate filter, it is important to remember the
2818          * start and duration of the scan. This is required for
2819          * restarting scanning during the discovery phase.
2820          */
2821         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2822                      hdev->discovery.result_filtering) {
2823                 hdev->discovery.scan_start = jiffies;
2824                 hdev->discovery.scan_duration = timeout;
2825         }
2826
2827         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2828                            timeout);
2829 }
2830
2831 bool hci_req_stop_discovery(struct hci_request *req)
2832 {
2833         struct hci_dev *hdev = req->hdev;
2834         struct discovery_state *d = &hdev->discovery;
2835         struct hci_cp_remote_name_req_cancel cp;
2836         struct inquiry_entry *e;
2837         bool ret = false;
2838
2839         bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
2840
2841         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2842                 if (test_bit(HCI_INQUIRY, &hdev->flags))
2843                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2844
2845                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2846                         cancel_delayed_work(&hdev->le_scan_disable);
2847                         cancel_delayed_work(&hdev->le_scan_restart);
2848                         hci_req_add_le_scan_disable(req, false);
2849                 }
2850
2851                 ret = true;
2852         } else {
2853                 /* Passive scanning */
2854                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2855                         hci_req_add_le_scan_disable(req, false);
2856                         ret = true;
2857                 }
2858         }
2859
2860         /* No further actions needed for LE-only discovery */
2861         if (d->type == DISCOV_TYPE_LE)
2862                 return ret;
2863
2864         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2865                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2866                                                      NAME_PENDING);
2867                 if (!e)
2868                         return ret;
2869
2870                 bacpy(&cp.bdaddr, &e->data.bdaddr);
2871                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2872                             &cp);
2873                 ret = true;
2874         }
2875
2876         return ret;
2877 }
2878
2879 static void config_data_path_complete(struct hci_dev *hdev, u8 status,
2880                                       u16 opcode)
2881 {
2882         bt_dev_dbg(hdev, "status %u", status);
2883 }
2884
2885 int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec)
2886 {
2887         struct hci_request req;
2888         int err;
2889         __u8 vnd_len, *vnd_data = NULL;
2890         struct hci_op_configure_data_path *cmd = NULL;
2891
2892         hci_req_init(&req, hdev);
2893
2894         err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
2895                                           &vnd_data);
2896         if (err < 0)
2897                 goto error;
2898
2899         cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
2900         if (!cmd) {
2901                 err = -ENOMEM;
2902                 goto error;
2903         }
2904
2905         err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
2906         if (err < 0)
2907                 goto error;
2908
2909         cmd->vnd_len = vnd_len;
2910         memcpy(cmd->vnd_data, vnd_data, vnd_len);
2911
2912         cmd->direction = 0x00;
2913         hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2914
2915         cmd->direction = 0x01;
2916         hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2917
2918         err = hci_req_run(&req, config_data_path_complete);
2919 error:
2920
2921         kfree(cmd);
2922         kfree(vnd_data);
2923         return err;
2924 }
2925
2926 static int stop_discovery(struct hci_request *req, unsigned long opt)
2927 {
2928         hci_dev_lock(req->hdev);
2929         hci_req_stop_discovery(req);
2930         hci_dev_unlock(req->hdev);
2931
2932         return 0;
2933 }
2934
2935 static void discov_update(struct work_struct *work)
2936 {
2937         struct hci_dev *hdev = container_of(work, struct hci_dev,
2938                                             discov_update);
2939         u8 status = 0;
2940
2941         switch (hdev->discovery.state) {
2942         case DISCOVERY_STARTING:
2943                 start_discovery(hdev, &status);
2944                 mgmt_start_discovery_complete(hdev, status);
2945                 if (status)
2946                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2947                 else
2948                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2949                 break;
2950         case DISCOVERY_STOPPING:
2951                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2952                 mgmt_stop_discovery_complete(hdev, status);
2953                 if (!status)
2954                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2955                 break;
2956         case DISCOVERY_STOPPED:
2957         default:
2958                 return;
2959         }
2960 }
2961
2962 static void discov_off(struct work_struct *work)
2963 {
2964         struct hci_dev *hdev = container_of(work, struct hci_dev,
2965                                             discov_off.work);
2966
2967         bt_dev_dbg(hdev, "");
2968
2969         hci_dev_lock(hdev);
2970
2971         /* When discoverable timeout triggers, then just make sure
2972          * the limited discoverable flag is cleared. Even in the case
2973          * of a timeout triggered from general discoverable, it is
2974          * safe to unconditionally clear the flag.
2975          */
2976         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2977         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2978         hdev->discov_timeout = 0;
2979
2980         hci_dev_unlock(hdev);
2981
2982         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2983         mgmt_new_settings(hdev);
2984 }
2985
2986 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2987 {
2988         struct hci_dev *hdev = req->hdev;
2989         u8 link_sec;
2990
2991         hci_dev_lock(hdev);
2992
2993         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2994             !lmp_host_ssp_capable(hdev)) {
2995                 u8 mode = 0x01;
2996
2997                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2998
2999                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
3000                         u8 support = 0x01;
3001
3002                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
3003                                     sizeof(support), &support);
3004                 }
3005         }
3006
3007         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
3008             lmp_bredr_capable(hdev)) {
3009                 struct hci_cp_write_le_host_supported cp;
3010
3011                 cp.le = 0x01;
3012                 cp.simul = 0x00;
3013
3014                 /* Check first if we already have the right
3015                  * host state (host features set)
3016                  */
3017                 if (cp.le != lmp_host_le_capable(hdev) ||
3018                     cp.simul != lmp_host_le_br_capable(hdev))
3019                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3020                                     sizeof(cp), &cp);
3021         }
3022
3023         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
3024                 /* Make sure the controller has a good default for
3025                  * advertising data. This also applies to the case
3026                  * where BR/EDR was toggled during the AUTO_OFF phase.
3027                  */
3028                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3029                     list_empty(&hdev->adv_instances)) {
3030                         int err;
3031
3032                         if (ext_adv_capable(hdev)) {
3033                                 err = __hci_req_setup_ext_adv_instance(req,
3034                                                                        0x00);
3035                                 if (!err)
3036                                         __hci_req_update_scan_rsp_data(req,
3037                                                                        0x00);
3038                         } else {
3039                                 err = 0;
3040                                 __hci_req_update_adv_data(req, 0x00);
3041                                 __hci_req_update_scan_rsp_data(req, 0x00);
3042                         }
3043
3044                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
3045                                 if (!ext_adv_capable(hdev))
3046                                         __hci_req_enable_advertising(req);
3047                                 else if (!err)
3048                                         __hci_req_enable_ext_advertising(req,
3049                                                                          0x00);
3050                         }
3051                 } else if (!list_empty(&hdev->adv_instances)) {
3052                         struct adv_info *adv_instance;
3053
3054                         adv_instance = list_first_entry(&hdev->adv_instances,
3055                                                         struct adv_info, list);
3056                         __hci_req_schedule_adv_instance(req,
3057                                                         adv_instance->instance,
3058                                                         true);
3059                 }
3060         }
3061
3062         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3063         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3064                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
3065                             sizeof(link_sec), &link_sec);
3066
3067         if (lmp_bredr_capable(hdev)) {
3068                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3069                         __hci_req_write_fast_connectable(req, true);
3070                 else
3071                         __hci_req_write_fast_connectable(req, false);
3072                 __hci_req_update_scan(req);
3073                 __hci_req_update_class(req);
3074                 __hci_req_update_name(req);
3075                 __hci_req_update_eir(req);
3076         }
3077
3078         hci_dev_unlock(hdev);
3079         return 0;
3080 }
3081
3082 int __hci_req_hci_power_on(struct hci_dev *hdev)
3083 {
3084         /* Register the available SMP channels (BR/EDR and LE) only when
3085          * successfully powering on the controller. This late
3086          * registration is required so that LE SMP can clearly decide if
3087          * the public address or static address is used.
3088          */
3089         smp_register(hdev);
3090
3091         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3092                               NULL);
3093 }
3094
3095 void hci_request_setup(struct hci_dev *hdev)
3096 {
3097         INIT_WORK(&hdev->discov_update, discov_update);
3098         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3099         INIT_WORK(&hdev->scan_update, scan_update_work);
3100         INIT_WORK(&hdev->connectable_update, connectable_update_work);
3101         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3102         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3103         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3104         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3105         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3106         INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
3107 }
3108
3109 void hci_request_cancel_all(struct hci_dev *hdev)
3110 {
3111         hci_req_sync_cancel(hdev, ENODEV);
3112
3113         cancel_work_sync(&hdev->discov_update);
3114         cancel_work_sync(&hdev->bg_scan_update);
3115         cancel_work_sync(&hdev->scan_update);
3116         cancel_work_sync(&hdev->connectable_update);
3117         cancel_work_sync(&hdev->discoverable_update);
3118         cancel_delayed_work_sync(&hdev->discov_off);
3119         cancel_delayed_work_sync(&hdev->le_scan_disable);
3120         cancel_delayed_work_sync(&hdev->le_scan_restart);
3121
3122         if (hdev->adv_instance_timeout) {
3123                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3124                 hdev->adv_instance_timeout = 0;
3125         }
3126
3127         cancel_interleave_scan(hdev);
3128 }