Bluetooth: hci_core: Rework hci_conn_params flags
[linux-2.6-block.git] / net / bluetooth / hci_request.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32 #include "msft.h"
33 #include "eir.h"
34
35 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
36 {
37         skb_queue_head_init(&req->cmd_q);
38         req->hdev = hdev;
39         req->err = 0;
40 }
41
42 void hci_req_purge(struct hci_request *req)
43 {
44         skb_queue_purge(&req->cmd_q);
45 }
46
47 bool hci_req_status_pend(struct hci_dev *hdev)
48 {
49         return hdev->req_status == HCI_REQ_PEND;
50 }
51
52 static int req_run(struct hci_request *req, hci_req_complete_t complete,
53                    hci_req_complete_skb_t complete_skb)
54 {
55         struct hci_dev *hdev = req->hdev;
56         struct sk_buff *skb;
57         unsigned long flags;
58
59         bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
60
61         /* If an error occurred during request building, remove all HCI
62          * commands queued on the HCI request queue.
63          */
64         if (req->err) {
65                 skb_queue_purge(&req->cmd_q);
66                 return req->err;
67         }
68
69         /* Do not allow empty requests */
70         if (skb_queue_empty(&req->cmd_q))
71                 return -ENODATA;
72
73         skb = skb_peek_tail(&req->cmd_q);
74         if (complete) {
75                 bt_cb(skb)->hci.req_complete = complete;
76         } else if (complete_skb) {
77                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
78                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
79         }
80
81         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
82         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
83         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
84
85         queue_work(hdev->workqueue, &hdev->cmd_work);
86
87         return 0;
88 }
89
90 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
91 {
92         return req_run(req, complete, NULL);
93 }
94
95 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
96 {
97         return req_run(req, NULL, complete);
98 }
99
100 void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
101                            struct sk_buff *skb)
102 {
103         bt_dev_dbg(hdev, "result 0x%2.2x", result);
104
105         if (hdev->req_status == HCI_REQ_PEND) {
106                 hdev->req_result = result;
107                 hdev->req_status = HCI_REQ_DONE;
108                 if (skb)
109                         hdev->req_skb = skb_get(skb);
110                 wake_up_interruptible(&hdev->req_wait_q);
111         }
112 }
113
114 /* Execute request and wait for completion. */
115 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
116                                                      unsigned long opt),
117                    unsigned long opt, u32 timeout, u8 *hci_status)
118 {
119         struct hci_request req;
120         int err = 0;
121
122         bt_dev_dbg(hdev, "start");
123
124         hci_req_init(&req, hdev);
125
126         hdev->req_status = HCI_REQ_PEND;
127
128         err = func(&req, opt);
129         if (err) {
130                 if (hci_status)
131                         *hci_status = HCI_ERROR_UNSPECIFIED;
132                 return err;
133         }
134
135         err = hci_req_run_skb(&req, hci_req_sync_complete);
136         if (err < 0) {
137                 hdev->req_status = 0;
138
139                 /* ENODATA means the HCI request command queue is empty.
140                  * This can happen when a request with conditionals doesn't
141                  * trigger any commands to be sent. This is normal behavior
142                  * and should not trigger an error return.
143                  */
144                 if (err == -ENODATA) {
145                         if (hci_status)
146                                 *hci_status = 0;
147                         return 0;
148                 }
149
150                 if (hci_status)
151                         *hci_status = HCI_ERROR_UNSPECIFIED;
152
153                 return err;
154         }
155
156         err = wait_event_interruptible_timeout(hdev->req_wait_q,
157                         hdev->req_status != HCI_REQ_PEND, timeout);
158
159         if (err == -ERESTARTSYS)
160                 return -EINTR;
161
162         switch (hdev->req_status) {
163         case HCI_REQ_DONE:
164                 err = -bt_to_errno(hdev->req_result);
165                 if (hci_status)
166                         *hci_status = hdev->req_result;
167                 break;
168
169         case HCI_REQ_CANCELED:
170                 err = -hdev->req_result;
171                 if (hci_status)
172                         *hci_status = HCI_ERROR_UNSPECIFIED;
173                 break;
174
175         default:
176                 err = -ETIMEDOUT;
177                 if (hci_status)
178                         *hci_status = HCI_ERROR_UNSPECIFIED;
179                 break;
180         }
181
182         kfree_skb(hdev->req_skb);
183         hdev->req_skb = NULL;
184         hdev->req_status = hdev->req_result = 0;
185
186         bt_dev_dbg(hdev, "end: err %d", err);
187
188         return err;
189 }
190
191 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
192                                                   unsigned long opt),
193                  unsigned long opt, u32 timeout, u8 *hci_status)
194 {
195         int ret;
196
197         /* Serialize all requests */
198         hci_req_sync_lock(hdev);
199         /* check the state after obtaing the lock to protect the HCI_UP
200          * against any races from hci_dev_do_close when the controller
201          * gets removed.
202          */
203         if (test_bit(HCI_UP, &hdev->flags))
204                 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
205         else
206                 ret = -ENETDOWN;
207         hci_req_sync_unlock(hdev);
208
209         return ret;
210 }
211
212 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
213                                 const void *param)
214 {
215         int len = HCI_COMMAND_HDR_SIZE + plen;
216         struct hci_command_hdr *hdr;
217         struct sk_buff *skb;
218
219         skb = bt_skb_alloc(len, GFP_ATOMIC);
220         if (!skb)
221                 return NULL;
222
223         hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
224         hdr->opcode = cpu_to_le16(opcode);
225         hdr->plen   = plen;
226
227         if (plen)
228                 skb_put_data(skb, param, plen);
229
230         bt_dev_dbg(hdev, "skb len %d", skb->len);
231
232         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
233         hci_skb_opcode(skb) = opcode;
234
235         return skb;
236 }
237
238 /* Queue a command to an asynchronous HCI request */
239 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
240                     const void *param, u8 event)
241 {
242         struct hci_dev *hdev = req->hdev;
243         struct sk_buff *skb;
244
245         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
246
247         /* If an error occurred during request building, there is no point in
248          * queueing the HCI command. We can simply return.
249          */
250         if (req->err)
251                 return;
252
253         skb = hci_prepare_cmd(hdev, opcode, plen, param);
254         if (!skb) {
255                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
256                            opcode);
257                 req->err = -ENOMEM;
258                 return;
259         }
260
261         if (skb_queue_empty(&req->cmd_q))
262                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
263
264         bt_cb(skb)->hci.req_event = event;
265
266         skb_queue_tail(&req->cmd_q, skb);
267 }
268
269 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
270                  const void *param)
271 {
272         hci_req_add_ev(req, opcode, plen, param, 0);
273 }
274
275 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
276 {
277         struct hci_dev *hdev = req->hdev;
278         struct hci_cp_write_page_scan_activity acp;
279         u8 type;
280
281         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
282                 return;
283
284         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
285                 return;
286
287         if (enable) {
288                 type = PAGE_SCAN_TYPE_INTERLACED;
289
290                 /* 160 msec page scan interval */
291                 acp.interval = cpu_to_le16(0x0100);
292         } else {
293                 type = hdev->def_page_scan_type;
294                 acp.interval = cpu_to_le16(hdev->def_page_scan_int);
295         }
296
297         acp.window = cpu_to_le16(hdev->def_page_scan_window);
298
299         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
300             __cpu_to_le16(hdev->page_scan_window) != acp.window)
301                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
302                             sizeof(acp), &acp);
303
304         if (hdev->page_scan_type != type)
305                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
306 }
307
308 static void start_interleave_scan(struct hci_dev *hdev)
309 {
310         hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
311         queue_delayed_work(hdev->req_workqueue,
312                            &hdev->interleave_scan, 0);
313 }
314
315 static bool is_interleave_scanning(struct hci_dev *hdev)
316 {
317         return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
318 }
319
320 static void cancel_interleave_scan(struct hci_dev *hdev)
321 {
322         bt_dev_dbg(hdev, "cancelling interleave scan");
323
324         cancel_delayed_work_sync(&hdev->interleave_scan);
325
326         hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
327 }
328
329 /* Return true if interleave_scan wasn't started until exiting this function,
330  * otherwise, return false
331  */
332 static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
333 {
334         /* Do interleaved scan only if all of the following are true:
335          * - There is at least one ADV monitor
336          * - At least one pending LE connection or one device to be scanned for
337          * - Monitor offloading is not supported
338          * If so, we should alternate between allowlist scan and one without
339          * any filters to save power.
340          */
341         bool use_interleaving = hci_is_adv_monitoring(hdev) &&
342                                 !(list_empty(&hdev->pend_le_conns) &&
343                                   list_empty(&hdev->pend_le_reports)) &&
344                                 hci_get_adv_monitor_offload_ext(hdev) ==
345                                     HCI_ADV_MONITOR_EXT_NONE;
346         bool is_interleaving = is_interleave_scanning(hdev);
347
348         if (use_interleaving && !is_interleaving) {
349                 start_interleave_scan(hdev);
350                 bt_dev_dbg(hdev, "starting interleave scan");
351                 return true;
352         }
353
354         if (!use_interleaving && is_interleaving)
355                 cancel_interleave_scan(hdev);
356
357         return false;
358 }
359
360 void __hci_req_update_name(struct hci_request *req)
361 {
362         struct hci_dev *hdev = req->hdev;
363         struct hci_cp_write_local_name cp;
364
365         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
366
367         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
368 }
369
370 void __hci_req_update_eir(struct hci_request *req)
371 {
372         struct hci_dev *hdev = req->hdev;
373         struct hci_cp_write_eir cp;
374
375         if (!hdev_is_powered(hdev))
376                 return;
377
378         if (!lmp_ext_inq_capable(hdev))
379                 return;
380
381         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
382                 return;
383
384         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
385                 return;
386
387         memset(&cp, 0, sizeof(cp));
388
389         eir_create(hdev, cp.data);
390
391         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
392                 return;
393
394         memcpy(hdev->eir, cp.data, sizeof(cp.data));
395
396         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
397 }
398
399 void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
400 {
401         struct hci_dev *hdev = req->hdev;
402
403         if (hdev->scanning_paused) {
404                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
405                 return;
406         }
407
408         if (use_ext_scan(hdev)) {
409                 struct hci_cp_le_set_ext_scan_enable cp;
410
411                 memset(&cp, 0, sizeof(cp));
412                 cp.enable = LE_SCAN_DISABLE;
413                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
414                             &cp);
415         } else {
416                 struct hci_cp_le_set_scan_enable cp;
417
418                 memset(&cp, 0, sizeof(cp));
419                 cp.enable = LE_SCAN_DISABLE;
420                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
421         }
422
423         /* Disable address resolution */
424         if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
425                 __u8 enable = 0x00;
426
427                 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
428         }
429 }
430
431 static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
432                                  u8 bdaddr_type)
433 {
434         struct hci_cp_le_del_from_accept_list cp;
435
436         cp.bdaddr_type = bdaddr_type;
437         bacpy(&cp.bdaddr, bdaddr);
438
439         bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
440                    cp.bdaddr_type);
441         hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
442
443         if (use_ll_privacy(req->hdev)) {
444                 struct smp_irk *irk;
445
446                 irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
447                 if (irk) {
448                         struct hci_cp_le_del_from_resolv_list cp;
449
450                         cp.bdaddr_type = bdaddr_type;
451                         bacpy(&cp.bdaddr, bdaddr);
452
453                         hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
454                                     sizeof(cp), &cp);
455                 }
456         }
457 }
458
459 /* Adds connection to accept list if needed. On error, returns -1. */
460 static int add_to_accept_list(struct hci_request *req,
461                               struct hci_conn_params *params, u8 *num_entries,
462                               bool allow_rpa)
463 {
464         struct hci_cp_le_add_to_accept_list cp;
465         struct hci_dev *hdev = req->hdev;
466
467         /* Already in accept list */
468         if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
469                                    params->addr_type))
470                 return 0;
471
472         /* Select filter policy to accept all advertising */
473         if (*num_entries >= hdev->le_accept_list_size)
474                 return -1;
475
476         /* Accept list can not be used with RPAs */
477         if (!allow_rpa &&
478             !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
479             hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
480                 return -1;
481         }
482
483         /* During suspend, only wakeable devices can be in accept list */
484         if (hdev->suspended &&
485             !test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, params->flags))
486                 return 0;
487
488         *num_entries += 1;
489         cp.bdaddr_type = params->addr_type;
490         bacpy(&cp.bdaddr, &params->addr);
491
492         bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
493                    cp.bdaddr_type);
494         hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
495
496         if (use_ll_privacy(hdev)) {
497                 struct smp_irk *irk;
498
499                 irk = hci_find_irk_by_addr(hdev, &params->addr,
500                                            params->addr_type);
501                 if (irk) {
502                         struct hci_cp_le_add_to_resolv_list cp;
503
504                         cp.bdaddr_type = params->addr_type;
505                         bacpy(&cp.bdaddr, &params->addr);
506                         memcpy(cp.peer_irk, irk->val, 16);
507
508                         if (hci_dev_test_flag(hdev, HCI_PRIVACY))
509                                 memcpy(cp.local_irk, hdev->irk, 16);
510                         else
511                                 memset(cp.local_irk, 0, 16);
512
513                         hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST,
514                                     sizeof(cp), &cp);
515                 }
516         }
517
518         return 0;
519 }
520
521 static u8 update_accept_list(struct hci_request *req)
522 {
523         struct hci_dev *hdev = req->hdev;
524         struct hci_conn_params *params;
525         struct bdaddr_list *b;
526         u8 num_entries = 0;
527         bool pend_conn, pend_report;
528         /* We allow usage of accept list even with RPAs in suspend. In the worst
529          * case, we won't be able to wake from devices that use the privacy1.2
530          * features. Additionally, once we support privacy1.2 and IRK
531          * offloading, we can update this to also check for those conditions.
532          */
533         bool allow_rpa = hdev->suspended;
534
535         if (use_ll_privacy(hdev))
536                 allow_rpa = true;
537
538         /* Go through the current accept list programmed into the
539          * controller one by one and check if that address is still
540          * in the list of pending connections or list of devices to
541          * report. If not present in either list, then queue the
542          * command to remove it from the controller.
543          */
544         list_for_each_entry(b, &hdev->le_accept_list, list) {
545                 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
546                                                       &b->bdaddr,
547                                                       b->bdaddr_type);
548                 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
549                                                         &b->bdaddr,
550                                                         b->bdaddr_type);
551
552                 /* If the device is not likely to connect or report,
553                  * remove it from the accept list.
554                  */
555                 if (!pend_conn && !pend_report) {
556                         del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
557                         continue;
558                 }
559
560                 /* Accept list can not be used with RPAs */
561                 if (!allow_rpa &&
562                     !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
563                     hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
564                         return 0x00;
565                 }
566
567                 num_entries++;
568         }
569
570         /* Since all no longer valid accept list entries have been
571          * removed, walk through the list of pending connections
572          * and ensure that any new device gets programmed into
573          * the controller.
574          *
575          * If the list of the devices is larger than the list of
576          * available accept list entries in the controller, then
577          * just abort and return filer policy value to not use the
578          * accept list.
579          */
580         list_for_each_entry(params, &hdev->pend_le_conns, action) {
581                 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
582                         return 0x00;
583         }
584
585         /* After adding all new pending connections, walk through
586          * the list of pending reports and also add these to the
587          * accept list if there is still space. Abort if space runs out.
588          */
589         list_for_each_entry(params, &hdev->pend_le_reports, action) {
590                 if (add_to_accept_list(req, params, &num_entries, allow_rpa))
591                         return 0x00;
592         }
593
594         /* Use the allowlist unless the following conditions are all true:
595          * - We are not currently suspending
596          * - There are 1 or more ADV monitors registered and it's not offloaded
597          * - Interleaved scanning is not currently using the allowlist
598          */
599         if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
600             hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
601             hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
602                 return 0x00;
603
604         /* Select filter policy to use accept list */
605         return 0x01;
606 }
607
608 static bool scan_use_rpa(struct hci_dev *hdev)
609 {
610         return hci_dev_test_flag(hdev, HCI_PRIVACY);
611 }
612
613 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
614                                u16 window, u8 own_addr_type, u8 filter_policy,
615                                bool filter_dup, bool addr_resolv)
616 {
617         struct hci_dev *hdev = req->hdev;
618
619         if (hdev->scanning_paused) {
620                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
621                 return;
622         }
623
624         if (use_ll_privacy(hdev) && addr_resolv) {
625                 u8 enable = 0x01;
626
627                 hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
628         }
629
630         /* Use ext scanning if set ext scan param and ext scan enable is
631          * supported
632          */
633         if (use_ext_scan(hdev)) {
634                 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
635                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
636                 struct hci_cp_le_scan_phy_params *phy_params;
637                 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
638                 u32 plen;
639
640                 ext_param_cp = (void *)data;
641                 phy_params = (void *)ext_param_cp->data;
642
643                 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
644                 ext_param_cp->own_addr_type = own_addr_type;
645                 ext_param_cp->filter_policy = filter_policy;
646
647                 plen = sizeof(*ext_param_cp);
648
649                 if (scan_1m(hdev) || scan_2m(hdev)) {
650                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
651
652                         memset(phy_params, 0, sizeof(*phy_params));
653                         phy_params->type = type;
654                         phy_params->interval = cpu_to_le16(interval);
655                         phy_params->window = cpu_to_le16(window);
656
657                         plen += sizeof(*phy_params);
658                         phy_params++;
659                 }
660
661                 if (scan_coded(hdev)) {
662                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
663
664                         memset(phy_params, 0, sizeof(*phy_params));
665                         phy_params->type = type;
666                         phy_params->interval = cpu_to_le16(interval);
667                         phy_params->window = cpu_to_le16(window);
668
669                         plen += sizeof(*phy_params);
670                         phy_params++;
671                 }
672
673                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
674                             plen, ext_param_cp);
675
676                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
677                 ext_enable_cp.enable = LE_SCAN_ENABLE;
678                 ext_enable_cp.filter_dup = filter_dup;
679
680                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
681                             sizeof(ext_enable_cp), &ext_enable_cp);
682         } else {
683                 struct hci_cp_le_set_scan_param param_cp;
684                 struct hci_cp_le_set_scan_enable enable_cp;
685
686                 memset(&param_cp, 0, sizeof(param_cp));
687                 param_cp.type = type;
688                 param_cp.interval = cpu_to_le16(interval);
689                 param_cp.window = cpu_to_le16(window);
690                 param_cp.own_address_type = own_addr_type;
691                 param_cp.filter_policy = filter_policy;
692                 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
693                             &param_cp);
694
695                 memset(&enable_cp, 0, sizeof(enable_cp));
696                 enable_cp.enable = LE_SCAN_ENABLE;
697                 enable_cp.filter_dup = filter_dup;
698                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
699                             &enable_cp);
700         }
701 }
702
703 /* Returns true if an le connection is in the scanning state */
704 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
705 {
706         struct hci_conn_hash *h = &hdev->conn_hash;
707         struct hci_conn  *c;
708
709         rcu_read_lock();
710
711         list_for_each_entry_rcu(c, &h->list, list) {
712                 if (c->type == LE_LINK && c->state == BT_CONNECT &&
713                     test_bit(HCI_CONN_SCANNING, &c->flags)) {
714                         rcu_read_unlock();
715                         return true;
716                 }
717         }
718
719         rcu_read_unlock();
720
721         return false;
722 }
723
724 /* Ensure to call hci_req_add_le_scan_disable() first to disable the
725  * controller based address resolution to be able to reconfigure
726  * resolving list.
727  */
728 void hci_req_add_le_passive_scan(struct hci_request *req)
729 {
730         struct hci_dev *hdev = req->hdev;
731         u8 own_addr_type;
732         u8 filter_policy;
733         u16 window, interval;
734         /* Default is to enable duplicates filter */
735         u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
736         /* Background scanning should run with address resolution */
737         bool addr_resolv = true;
738
739         if (hdev->scanning_paused) {
740                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
741                 return;
742         }
743
744         /* Set require_privacy to false since no SCAN_REQ are send
745          * during passive scanning. Not using an non-resolvable address
746          * here is important so that peer devices using direct
747          * advertising with our address will be correctly reported
748          * by the controller.
749          */
750         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
751                                       &own_addr_type))
752                 return;
753
754         if (hdev->enable_advmon_interleave_scan &&
755             __hci_update_interleaved_scan(hdev))
756                 return;
757
758         bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
759         /* Adding or removing entries from the accept list must
760          * happen before enabling scanning. The controller does
761          * not allow accept list modification while scanning.
762          */
763         filter_policy = update_accept_list(req);
764
765         /* When the controller is using random resolvable addresses and
766          * with that having LE privacy enabled, then controllers with
767          * Extended Scanner Filter Policies support can now enable support
768          * for handling directed advertising.
769          *
770          * So instead of using filter polices 0x00 (no accept list)
771          * and 0x01 (accept list enabled) use the new filter policies
772          * 0x02 (no accept list) and 0x03 (accept list enabled).
773          */
774         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
775             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
776                 filter_policy |= 0x02;
777
778         if (hdev->suspended) {
779                 window = hdev->le_scan_window_suspend;
780                 interval = hdev->le_scan_int_suspend;
781         } else if (hci_is_le_conn_scanning(hdev)) {
782                 window = hdev->le_scan_window_connect;
783                 interval = hdev->le_scan_int_connect;
784         } else if (hci_is_adv_monitoring(hdev)) {
785                 window = hdev->le_scan_window_adv_monitor;
786                 interval = hdev->le_scan_int_adv_monitor;
787
788                 /* Disable duplicates filter when scanning for advertisement
789                  * monitor for the following reasons.
790                  *
791                  * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
792                  * controllers ignore RSSI_Sampling_Period when the duplicates
793                  * filter is enabled.
794                  *
795                  * For SW pattern filtering, when we're not doing interleaved
796                  * scanning, it is necessary to disable duplicates filter,
797                  * otherwise hosts can only receive one advertisement and it's
798                  * impossible to know if a peer is still in range.
799                  */
800                 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
801         } else {
802                 window = hdev->le_scan_window;
803                 interval = hdev->le_scan_interval;
804         }
805
806         bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
807                    filter_policy);
808         hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
809                            own_addr_type, filter_policy, filter_dup,
810                            addr_resolv);
811 }
812
813 static void cancel_adv_timeout(struct hci_dev *hdev)
814 {
815         if (hdev->adv_instance_timeout) {
816                 hdev->adv_instance_timeout = 0;
817                 cancel_delayed_work(&hdev->adv_instance_expire);
818         }
819 }
820
821 /* This function requires the caller holds hdev->lock */
822 void __hci_req_pause_adv_instances(struct hci_request *req)
823 {
824         bt_dev_dbg(req->hdev, "Pausing advertising instances");
825
826         /* Call to disable any advertisements active on the controller.
827          * This will succeed even if no advertisements are configured.
828          */
829         __hci_req_disable_advertising(req);
830
831         /* If we are using software rotation, pause the loop */
832         if (!ext_adv_capable(req->hdev))
833                 cancel_adv_timeout(req->hdev);
834 }
835
836 /* This function requires the caller holds hdev->lock */
837 static void __hci_req_resume_adv_instances(struct hci_request *req)
838 {
839         struct adv_info *adv;
840
841         bt_dev_dbg(req->hdev, "Resuming advertising instances");
842
843         if (ext_adv_capable(req->hdev)) {
844                 /* Call for each tracked instance to be re-enabled */
845                 list_for_each_entry(adv, &req->hdev->adv_instances, list) {
846                         __hci_req_enable_ext_advertising(req,
847                                                          adv->instance);
848                 }
849
850         } else {
851                 /* Schedule for most recent instance to be restarted and begin
852                  * the software rotation loop
853                  */
854                 __hci_req_schedule_adv_instance(req,
855                                                 req->hdev->cur_adv_instance,
856                                                 true);
857         }
858 }
859
860 /* This function requires the caller holds hdev->lock */
861 int hci_req_resume_adv_instances(struct hci_dev *hdev)
862 {
863         struct hci_request req;
864
865         hci_req_init(&req, hdev);
866         __hci_req_resume_adv_instances(&req);
867
868         return hci_req_run(&req, NULL);
869 }
870
871 static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
872 {
873         return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
874 }
875
876 void __hci_req_disable_advertising(struct hci_request *req)
877 {
878         if (ext_adv_capable(req->hdev)) {
879                 __hci_req_disable_ext_adv_instance(req, 0x00);
880
881         } else {
882                 u8 enable = 0x00;
883
884                 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
885         }
886 }
887
888 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
889 {
890         /* If privacy is not enabled don't use RPA */
891         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
892                 return false;
893
894         /* If basic privacy mode is enabled use RPA */
895         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
896                 return true;
897
898         /* If limited privacy mode is enabled don't use RPA if we're
899          * both discoverable and bondable.
900          */
901         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
902             hci_dev_test_flag(hdev, HCI_BONDABLE))
903                 return false;
904
905         /* We're neither bondable nor discoverable in the limited
906          * privacy mode, therefore use RPA.
907          */
908         return true;
909 }
910
911 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
912 {
913         /* If there is no connection we are OK to advertise. */
914         if (hci_conn_num(hdev, LE_LINK) == 0)
915                 return true;
916
917         /* Check le_states if there is any connection in peripheral role. */
918         if (hdev->conn_hash.le_num_peripheral > 0) {
919                 /* Peripheral connection state and non connectable mode bit 20.
920                  */
921                 if (!connectable && !(hdev->le_states[2] & 0x10))
922                         return false;
923
924                 /* Peripheral connection state and connectable mode bit 38
925                  * and scannable bit 21.
926                  */
927                 if (connectable && (!(hdev->le_states[4] & 0x40) ||
928                                     !(hdev->le_states[2] & 0x20)))
929                         return false;
930         }
931
932         /* Check le_states if there is any connection in central role. */
933         if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
934                 /* Central connection state and non connectable mode bit 18. */
935                 if (!connectable && !(hdev->le_states[2] & 0x02))
936                         return false;
937
938                 /* Central connection state and connectable mode bit 35 and
939                  * scannable 19.
940                  */
941                 if (connectable && (!(hdev->le_states[4] & 0x08) ||
942                                     !(hdev->le_states[2] & 0x08)))
943                         return false;
944         }
945
946         return true;
947 }
948
949 void __hci_req_enable_advertising(struct hci_request *req)
950 {
951         struct hci_dev *hdev = req->hdev;
952         struct adv_info *adv;
953         struct hci_cp_le_set_adv_param cp;
954         u8 own_addr_type, enable = 0x01;
955         bool connectable;
956         u16 adv_min_interval, adv_max_interval;
957         u32 flags;
958
959         flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
960         adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
961
962         /* If the "connectable" instance flag was not set, then choose between
963          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
964          */
965         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
966                       mgmt_get_connectable(hdev);
967
968         if (!is_advertising_allowed(hdev, connectable))
969                 return;
970
971         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
972                 __hci_req_disable_advertising(req);
973
974         /* Clear the HCI_LE_ADV bit temporarily so that the
975          * hci_update_random_address knows that it's safe to go ahead
976          * and write a new random address. The flag will be set back on
977          * as soon as the SET_ADV_ENABLE HCI command completes.
978          */
979         hci_dev_clear_flag(hdev, HCI_LE_ADV);
980
981         /* Set require_privacy to true only when non-connectable
982          * advertising is used. In that case it is fine to use a
983          * non-resolvable private address.
984          */
985         if (hci_update_random_address(req, !connectable,
986                                       adv_use_rpa(hdev, flags),
987                                       &own_addr_type) < 0)
988                 return;
989
990         memset(&cp, 0, sizeof(cp));
991
992         if (adv) {
993                 adv_min_interval = adv->min_interval;
994                 adv_max_interval = adv->max_interval;
995         } else {
996                 adv_min_interval = hdev->le_adv_min_interval;
997                 adv_max_interval = hdev->le_adv_max_interval;
998         }
999
1000         if (connectable) {
1001                 cp.type = LE_ADV_IND;
1002         } else {
1003                 if (adv_cur_instance_is_scannable(hdev))
1004                         cp.type = LE_ADV_SCAN_IND;
1005                 else
1006                         cp.type = LE_ADV_NONCONN_IND;
1007
1008                 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1009                     hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1010                         adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1011                         adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1012                 }
1013         }
1014
1015         cp.min_interval = cpu_to_le16(adv_min_interval);
1016         cp.max_interval = cpu_to_le16(adv_max_interval);
1017         cp.own_address_type = own_addr_type;
1018         cp.channel_map = hdev->le_adv_channel_map;
1019
1020         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1021
1022         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1023 }
1024
1025 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1026 {
1027         struct hci_dev *hdev = req->hdev;
1028         u8 len;
1029
1030         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1031                 return;
1032
1033         if (ext_adv_capable(hdev)) {
1034                 struct {
1035                         struct hci_cp_le_set_ext_scan_rsp_data cp;
1036                         u8 data[HCI_MAX_EXT_AD_LENGTH];
1037                 } pdu;
1038
1039                 memset(&pdu, 0, sizeof(pdu));
1040
1041                 len = eir_create_scan_rsp(hdev, instance, pdu.data);
1042
1043                 if (hdev->scan_rsp_data_len == len &&
1044                     !memcmp(pdu.data, hdev->scan_rsp_data, len))
1045                         return;
1046
1047                 memcpy(hdev->scan_rsp_data, pdu.data, len);
1048                 hdev->scan_rsp_data_len = len;
1049
1050                 pdu.cp.handle = instance;
1051                 pdu.cp.length = len;
1052                 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1053                 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1054
1055                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1056                             sizeof(pdu.cp) + len, &pdu.cp);
1057         } else {
1058                 struct hci_cp_le_set_scan_rsp_data cp;
1059
1060                 memset(&cp, 0, sizeof(cp));
1061
1062                 len = eir_create_scan_rsp(hdev, instance, cp.data);
1063
1064                 if (hdev->scan_rsp_data_len == len &&
1065                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1066                         return;
1067
1068                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1069                 hdev->scan_rsp_data_len = len;
1070
1071                 cp.length = len;
1072
1073                 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1074         }
1075 }
1076
1077 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1078 {
1079         struct hci_dev *hdev = req->hdev;
1080         u8 len;
1081
1082         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1083                 return;
1084
1085         if (ext_adv_capable(hdev)) {
1086                 struct {
1087                         struct hci_cp_le_set_ext_adv_data cp;
1088                         u8 data[HCI_MAX_EXT_AD_LENGTH];
1089                 } pdu;
1090
1091                 memset(&pdu, 0, sizeof(pdu));
1092
1093                 len = eir_create_adv_data(hdev, instance, pdu.data);
1094
1095                 /* There's nothing to do if the data hasn't changed */
1096                 if (hdev->adv_data_len == len &&
1097                     memcmp(pdu.data, hdev->adv_data, len) == 0)
1098                         return;
1099
1100                 memcpy(hdev->adv_data, pdu.data, len);
1101                 hdev->adv_data_len = len;
1102
1103                 pdu.cp.length = len;
1104                 pdu.cp.handle = instance;
1105                 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1106                 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1107
1108                 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
1109                             sizeof(pdu.cp) + len, &pdu.cp);
1110         } else {
1111                 struct hci_cp_le_set_adv_data cp;
1112
1113                 memset(&cp, 0, sizeof(cp));
1114
1115                 len = eir_create_adv_data(hdev, instance, cp.data);
1116
1117                 /* There's nothing to do if the data hasn't changed */
1118                 if (hdev->adv_data_len == len &&
1119                     memcmp(cp.data, hdev->adv_data, len) == 0)
1120                         return;
1121
1122                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1123                 hdev->adv_data_len = len;
1124
1125                 cp.length = len;
1126
1127                 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1128         }
1129 }
1130
1131 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1132 {
1133         struct hci_request req;
1134
1135         hci_req_init(&req, hdev);
1136         __hci_req_update_adv_data(&req, instance);
1137
1138         return hci_req_run(&req, NULL);
1139 }
1140
1141 static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status,
1142                                             u16 opcode)
1143 {
1144         BT_DBG("%s status %u", hdev->name, status);
1145 }
1146
1147 void hci_req_disable_address_resolution(struct hci_dev *hdev)
1148 {
1149         struct hci_request req;
1150         __u8 enable = 0x00;
1151
1152         if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
1153                 return;
1154
1155         hci_req_init(&req, hdev);
1156
1157         hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
1158
1159         hci_req_run(&req, enable_addr_resolution_complete);
1160 }
1161
1162 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1163 {
1164         bt_dev_dbg(hdev, "status %u", status);
1165 }
1166
1167 void hci_req_reenable_advertising(struct hci_dev *hdev)
1168 {
1169         struct hci_request req;
1170
1171         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1172             list_empty(&hdev->adv_instances))
1173                 return;
1174
1175         hci_req_init(&req, hdev);
1176
1177         if (hdev->cur_adv_instance) {
1178                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1179                                                 true);
1180         } else {
1181                 if (ext_adv_capable(hdev)) {
1182                         __hci_req_start_ext_adv(&req, 0x00);
1183                 } else {
1184                         __hci_req_update_adv_data(&req, 0x00);
1185                         __hci_req_update_scan_rsp_data(&req, 0x00);
1186                         __hci_req_enable_advertising(&req);
1187                 }
1188         }
1189
1190         hci_req_run(&req, adv_enable_complete);
1191 }
1192
1193 static void adv_timeout_expire(struct work_struct *work)
1194 {
1195         struct hci_dev *hdev = container_of(work, struct hci_dev,
1196                                             adv_instance_expire.work);
1197
1198         struct hci_request req;
1199         u8 instance;
1200
1201         bt_dev_dbg(hdev, "");
1202
1203         hci_dev_lock(hdev);
1204
1205         hdev->adv_instance_timeout = 0;
1206
1207         instance = hdev->cur_adv_instance;
1208         if (instance == 0x00)
1209                 goto unlock;
1210
1211         hci_req_init(&req, hdev);
1212
1213         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1214
1215         if (list_empty(&hdev->adv_instances))
1216                 __hci_req_disable_advertising(&req);
1217
1218         hci_req_run(&req, NULL);
1219
1220 unlock:
1221         hci_dev_unlock(hdev);
1222 }
1223
1224 static int hci_req_add_le_interleaved_scan(struct hci_request *req,
1225                                            unsigned long opt)
1226 {
1227         struct hci_dev *hdev = req->hdev;
1228         int ret = 0;
1229
1230         hci_dev_lock(hdev);
1231
1232         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1233                 hci_req_add_le_scan_disable(req, false);
1234         hci_req_add_le_passive_scan(req);
1235
1236         switch (hdev->interleave_scan_state) {
1237         case INTERLEAVE_SCAN_ALLOWLIST:
1238                 bt_dev_dbg(hdev, "next state: allowlist");
1239                 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
1240                 break;
1241         case INTERLEAVE_SCAN_NO_FILTER:
1242                 bt_dev_dbg(hdev, "next state: no filter");
1243                 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
1244                 break;
1245         case INTERLEAVE_SCAN_NONE:
1246                 BT_ERR("unexpected error");
1247                 ret = -1;
1248         }
1249
1250         hci_dev_unlock(hdev);
1251
1252         return ret;
1253 }
1254
1255 static void interleave_scan_work(struct work_struct *work)
1256 {
1257         struct hci_dev *hdev = container_of(work, struct hci_dev,
1258                                             interleave_scan.work);
1259         u8 status;
1260         unsigned long timeout;
1261
1262         if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
1263                 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
1264         } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
1265                 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
1266         } else {
1267                 bt_dev_err(hdev, "unexpected error");
1268                 return;
1269         }
1270
1271         hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0,
1272                      HCI_CMD_TIMEOUT, &status);
1273
1274         /* Don't continue interleaving if it was canceled */
1275         if (is_interleave_scanning(hdev))
1276                 queue_delayed_work(hdev->req_workqueue,
1277                                    &hdev->interleave_scan, timeout);
1278 }
1279
1280 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1281                            bool use_rpa, struct adv_info *adv_instance,
1282                            u8 *own_addr_type, bdaddr_t *rand_addr)
1283 {
1284         int err;
1285
1286         bacpy(rand_addr, BDADDR_ANY);
1287
1288         /* If privacy is enabled use a resolvable private address. If
1289          * current RPA has expired then generate a new one.
1290          */
1291         if (use_rpa) {
1292                 /* If Controller supports LL Privacy use own address type is
1293                  * 0x03
1294                  */
1295                 if (use_ll_privacy(hdev))
1296                         *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1297                 else
1298                         *own_addr_type = ADDR_LE_DEV_RANDOM;
1299
1300                 if (adv_instance) {
1301                         if (adv_rpa_valid(adv_instance))
1302                                 return 0;
1303                 } else {
1304                         if (rpa_valid(hdev))
1305                                 return 0;
1306                 }
1307
1308                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1309                 if (err < 0) {
1310                         bt_dev_err(hdev, "failed to generate new RPA");
1311                         return err;
1312                 }
1313
1314                 bacpy(rand_addr, &hdev->rpa);
1315
1316                 return 0;
1317         }
1318
1319         /* In case of required privacy without resolvable private address,
1320          * use an non-resolvable private address. This is useful for
1321          * non-connectable advertising.
1322          */
1323         if (require_privacy) {
1324                 bdaddr_t nrpa;
1325
1326                 while (true) {
1327                         /* The non-resolvable private address is generated
1328                          * from random six bytes with the two most significant
1329                          * bits cleared.
1330                          */
1331                         get_random_bytes(&nrpa, 6);
1332                         nrpa.b[5] &= 0x3f;
1333
1334                         /* The non-resolvable private address shall not be
1335                          * equal to the public address.
1336                          */
1337                         if (bacmp(&hdev->bdaddr, &nrpa))
1338                                 break;
1339                 }
1340
1341                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1342                 bacpy(rand_addr, &nrpa);
1343
1344                 return 0;
1345         }
1346
1347         /* No privacy so use a public address. */
1348         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1349
1350         return 0;
1351 }
1352
1353 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1354 {
1355         hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1356 }
1357
1358 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1359 {
1360         struct hci_dev *hdev = req->hdev;
1361
1362         /* If we're advertising or initiating an LE connection we can't
1363          * go ahead and change the random address at this time. This is
1364          * because the eventual initiator address used for the
1365          * subsequently created connection will be undefined (some
1366          * controllers use the new address and others the one we had
1367          * when the operation started).
1368          *
1369          * In this kind of scenario skip the update and let the random
1370          * address be updated at the next cycle.
1371          */
1372         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1373             hci_lookup_le_connect(hdev)) {
1374                 bt_dev_dbg(hdev, "Deferring random address update");
1375                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1376                 return;
1377         }
1378
1379         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1380 }
1381
1382 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1383 {
1384         struct hci_cp_le_set_ext_adv_params cp;
1385         struct hci_dev *hdev = req->hdev;
1386         bool connectable;
1387         u32 flags;
1388         bdaddr_t random_addr;
1389         u8 own_addr_type;
1390         int err;
1391         struct adv_info *adv_instance;
1392         bool secondary_adv;
1393
1394         if (instance > 0) {
1395                 adv_instance = hci_find_adv_instance(hdev, instance);
1396                 if (!adv_instance)
1397                         return -EINVAL;
1398         } else {
1399                 adv_instance = NULL;
1400         }
1401
1402         flags = hci_adv_instance_flags(hdev, instance);
1403
1404         /* If the "connectable" instance flag was not set, then choose between
1405          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1406          */
1407         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1408                       mgmt_get_connectable(hdev);
1409
1410         if (!is_advertising_allowed(hdev, connectable))
1411                 return -EPERM;
1412
1413         /* Set require_privacy to true only when non-connectable
1414          * advertising is used. In that case it is fine to use a
1415          * non-resolvable private address.
1416          */
1417         err = hci_get_random_address(hdev, !connectable,
1418                                      adv_use_rpa(hdev, flags), adv_instance,
1419                                      &own_addr_type, &random_addr);
1420         if (err < 0)
1421                 return err;
1422
1423         memset(&cp, 0, sizeof(cp));
1424
1425         if (adv_instance) {
1426                 hci_cpu_to_le24(adv_instance->min_interval, cp.min_interval);
1427                 hci_cpu_to_le24(adv_instance->max_interval, cp.max_interval);
1428                 cp.tx_power = adv_instance->tx_power;
1429         } else {
1430                 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1431                 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1432                 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1433         }
1434
1435         secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1436
1437         if (connectable) {
1438                 if (secondary_adv)
1439                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1440                 else
1441                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1442         } else if (hci_adv_instance_is_scannable(hdev, instance) ||
1443                    (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
1444                 if (secondary_adv)
1445                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1446                 else
1447                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1448         } else {
1449                 if (secondary_adv)
1450                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1451                 else
1452                         cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1453         }
1454
1455         cp.own_addr_type = own_addr_type;
1456         cp.channel_map = hdev->le_adv_channel_map;
1457         cp.handle = instance;
1458
1459         if (flags & MGMT_ADV_FLAG_SEC_2M) {
1460                 cp.primary_phy = HCI_ADV_PHY_1M;
1461                 cp.secondary_phy = HCI_ADV_PHY_2M;
1462         } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1463                 cp.primary_phy = HCI_ADV_PHY_CODED;
1464                 cp.secondary_phy = HCI_ADV_PHY_CODED;
1465         } else {
1466                 /* In all other cases use 1M */
1467                 cp.primary_phy = HCI_ADV_PHY_1M;
1468                 cp.secondary_phy = HCI_ADV_PHY_1M;
1469         }
1470
1471         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1472
1473         if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
1474              own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
1475             bacmp(&random_addr, BDADDR_ANY)) {
1476                 struct hci_cp_le_set_adv_set_rand_addr cp;
1477
1478                 /* Check if random address need to be updated */
1479                 if (adv_instance) {
1480                         if (!bacmp(&random_addr, &adv_instance->random_addr))
1481                                 return 0;
1482                 } else {
1483                         if (!bacmp(&random_addr, &hdev->random_addr))
1484                                 return 0;
1485                         /* Instance 0x00 doesn't have an adv_info, instead it
1486                          * uses hdev->random_addr to track its address so
1487                          * whenever it needs to be updated this also set the
1488                          * random address since hdev->random_addr is shared with
1489                          * scan state machine.
1490                          */
1491                         set_random_addr(req, &random_addr);
1492                 }
1493
1494                 memset(&cp, 0, sizeof(cp));
1495
1496                 cp.handle = instance;
1497                 bacpy(&cp.bdaddr, &random_addr);
1498
1499                 hci_req_add(req,
1500                             HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1501                             sizeof(cp), &cp);
1502         }
1503
1504         return 0;
1505 }
1506
1507 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1508 {
1509         struct hci_dev *hdev = req->hdev;
1510         struct hci_cp_le_set_ext_adv_enable *cp;
1511         struct hci_cp_ext_adv_set *adv_set;
1512         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1513         struct adv_info *adv_instance;
1514
1515         if (instance > 0) {
1516                 adv_instance = hci_find_adv_instance(hdev, instance);
1517                 if (!adv_instance)
1518                         return -EINVAL;
1519         } else {
1520                 adv_instance = NULL;
1521         }
1522
1523         cp = (void *) data;
1524         adv_set = (void *) cp->data;
1525
1526         memset(cp, 0, sizeof(*cp));
1527
1528         cp->enable = 0x01;
1529         cp->num_of_sets = 0x01;
1530
1531         memset(adv_set, 0, sizeof(*adv_set));
1532
1533         adv_set->handle = instance;
1534
1535         /* Set duration per instance since controller is responsible for
1536          * scheduling it.
1537          */
1538         if (adv_instance && adv_instance->duration) {
1539                 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
1540
1541                 /* Time = N * 10 ms */
1542                 adv_set->duration = cpu_to_le16(duration / 10);
1543         }
1544
1545         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1546                     sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1547                     data);
1548
1549         return 0;
1550 }
1551
1552 int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance)
1553 {
1554         struct hci_dev *hdev = req->hdev;
1555         struct hci_cp_le_set_ext_adv_enable *cp;
1556         struct hci_cp_ext_adv_set *adv_set;
1557         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1558         u8 req_size;
1559
1560         /* If request specifies an instance that doesn't exist, fail */
1561         if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1562                 return -EINVAL;
1563
1564         memset(data, 0, sizeof(data));
1565
1566         cp = (void *)data;
1567         adv_set = (void *)cp->data;
1568
1569         /* Instance 0x00 indicates all advertising instances will be disabled */
1570         cp->num_of_sets = !!instance;
1571         cp->enable = 0x00;
1572
1573         adv_set->handle = instance;
1574
1575         req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets;
1576         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data);
1577
1578         return 0;
1579 }
1580
1581 int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance)
1582 {
1583         struct hci_dev *hdev = req->hdev;
1584
1585         /* If request specifies an instance that doesn't exist, fail */
1586         if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1587                 return -EINVAL;
1588
1589         hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance);
1590
1591         return 0;
1592 }
1593
1594 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1595 {
1596         struct hci_dev *hdev = req->hdev;
1597         struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance);
1598         int err;
1599
1600         /* If instance isn't pending, the chip knows about it, and it's safe to
1601          * disable
1602          */
1603         if (adv_instance && !adv_instance->pending)
1604                 __hci_req_disable_ext_adv_instance(req, instance);
1605
1606         err = __hci_req_setup_ext_adv_instance(req, instance);
1607         if (err < 0)
1608                 return err;
1609
1610         __hci_req_update_scan_rsp_data(req, instance);
1611         __hci_req_enable_ext_advertising(req, instance);
1612
1613         return 0;
1614 }
1615
1616 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1617                                     bool force)
1618 {
1619         struct hci_dev *hdev = req->hdev;
1620         struct adv_info *adv_instance = NULL;
1621         u16 timeout;
1622
1623         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1624             list_empty(&hdev->adv_instances))
1625                 return -EPERM;
1626
1627         if (hdev->adv_instance_timeout)
1628                 return -EBUSY;
1629
1630         adv_instance = hci_find_adv_instance(hdev, instance);
1631         if (!adv_instance)
1632                 return -ENOENT;
1633
1634         /* A zero timeout means unlimited advertising. As long as there is
1635          * only one instance, duration should be ignored. We still set a timeout
1636          * in case further instances are being added later on.
1637          *
1638          * If the remaining lifetime of the instance is more than the duration
1639          * then the timeout corresponds to the duration, otherwise it will be
1640          * reduced to the remaining instance lifetime.
1641          */
1642         if (adv_instance->timeout == 0 ||
1643             adv_instance->duration <= adv_instance->remaining_time)
1644                 timeout = adv_instance->duration;
1645         else
1646                 timeout = adv_instance->remaining_time;
1647
1648         /* The remaining time is being reduced unless the instance is being
1649          * advertised without time limit.
1650          */
1651         if (adv_instance->timeout)
1652                 adv_instance->remaining_time =
1653                                 adv_instance->remaining_time - timeout;
1654
1655         /* Only use work for scheduling instances with legacy advertising */
1656         if (!ext_adv_capable(hdev)) {
1657                 hdev->adv_instance_timeout = timeout;
1658                 queue_delayed_work(hdev->req_workqueue,
1659                            &hdev->adv_instance_expire,
1660                            msecs_to_jiffies(timeout * 1000));
1661         }
1662
1663         /* If we're just re-scheduling the same instance again then do not
1664          * execute any HCI commands. This happens when a single instance is
1665          * being advertised.
1666          */
1667         if (!force && hdev->cur_adv_instance == instance &&
1668             hci_dev_test_flag(hdev, HCI_LE_ADV))
1669                 return 0;
1670
1671         hdev->cur_adv_instance = instance;
1672         if (ext_adv_capable(hdev)) {
1673                 __hci_req_start_ext_adv(req, instance);
1674         } else {
1675                 __hci_req_update_adv_data(req, instance);
1676                 __hci_req_update_scan_rsp_data(req, instance);
1677                 __hci_req_enable_advertising(req);
1678         }
1679
1680         return 0;
1681 }
1682
1683 /* For a single instance:
1684  * - force == true: The instance will be removed even when its remaining
1685  *   lifetime is not zero.
1686  * - force == false: the instance will be deactivated but kept stored unless
1687  *   the remaining lifetime is zero.
1688  *
1689  * For instance == 0x00:
1690  * - force == true: All instances will be removed regardless of their timeout
1691  *   setting.
1692  * - force == false: Only instances that have a timeout will be removed.
1693  */
1694 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1695                                 struct hci_request *req, u8 instance,
1696                                 bool force)
1697 {
1698         struct adv_info *adv_instance, *n, *next_instance = NULL;
1699         int err;
1700         u8 rem_inst;
1701
1702         /* Cancel any timeout concerning the removed instance(s). */
1703         if (!instance || hdev->cur_adv_instance == instance)
1704                 cancel_adv_timeout(hdev);
1705
1706         /* Get the next instance to advertise BEFORE we remove
1707          * the current one. This can be the same instance again
1708          * if there is only one instance.
1709          */
1710         if (instance && hdev->cur_adv_instance == instance)
1711                 next_instance = hci_get_next_instance(hdev, instance);
1712
1713         if (instance == 0x00) {
1714                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1715                                          list) {
1716                         if (!(force || adv_instance->timeout))
1717                                 continue;
1718
1719                         rem_inst = adv_instance->instance;
1720                         err = hci_remove_adv_instance(hdev, rem_inst);
1721                         if (!err)
1722                                 mgmt_advertising_removed(sk, hdev, rem_inst);
1723                 }
1724         } else {
1725                 adv_instance = hci_find_adv_instance(hdev, instance);
1726
1727                 if (force || (adv_instance && adv_instance->timeout &&
1728                               !adv_instance->remaining_time)) {
1729                         /* Don't advertise a removed instance. */
1730                         if (next_instance &&
1731                             next_instance->instance == instance)
1732                                 next_instance = NULL;
1733
1734                         err = hci_remove_adv_instance(hdev, instance);
1735                         if (!err)
1736                                 mgmt_advertising_removed(sk, hdev, instance);
1737                 }
1738         }
1739
1740         if (!req || !hdev_is_powered(hdev) ||
1741             hci_dev_test_flag(hdev, HCI_ADVERTISING))
1742                 return;
1743
1744         if (next_instance && !ext_adv_capable(hdev))
1745                 __hci_req_schedule_adv_instance(req, next_instance->instance,
1746                                                 false);
1747 }
1748
1749 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1750                               bool use_rpa, u8 *own_addr_type)
1751 {
1752         struct hci_dev *hdev = req->hdev;
1753         int err;
1754
1755         /* If privacy is enabled use a resolvable private address. If
1756          * current RPA has expired or there is something else than
1757          * the current RPA in use, then generate a new one.
1758          */
1759         if (use_rpa) {
1760                 /* If Controller supports LL Privacy use own address type is
1761                  * 0x03
1762                  */
1763                 if (use_ll_privacy(hdev))
1764                         *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1765                 else
1766                         *own_addr_type = ADDR_LE_DEV_RANDOM;
1767
1768                 if (rpa_valid(hdev))
1769                         return 0;
1770
1771                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1772                 if (err < 0) {
1773                         bt_dev_err(hdev, "failed to generate new RPA");
1774                         return err;
1775                 }
1776
1777                 set_random_addr(req, &hdev->rpa);
1778
1779                 return 0;
1780         }
1781
1782         /* In case of required privacy without resolvable private address,
1783          * use an non-resolvable private address. This is useful for active
1784          * scanning and non-connectable advertising.
1785          */
1786         if (require_privacy) {
1787                 bdaddr_t nrpa;
1788
1789                 while (true) {
1790                         /* The non-resolvable private address is generated
1791                          * from random six bytes with the two most significant
1792                          * bits cleared.
1793                          */
1794                         get_random_bytes(&nrpa, 6);
1795                         nrpa.b[5] &= 0x3f;
1796
1797                         /* The non-resolvable private address shall not be
1798                          * equal to the public address.
1799                          */
1800                         if (bacmp(&hdev->bdaddr, &nrpa))
1801                                 break;
1802                 }
1803
1804                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1805                 set_random_addr(req, &nrpa);
1806                 return 0;
1807         }
1808
1809         /* If forcing static address is in use or there is no public
1810          * address use the static address as random address (but skip
1811          * the HCI command if the current random address is already the
1812          * static one.
1813          *
1814          * In case BR/EDR has been disabled on a dual-mode controller
1815          * and a static address has been configured, then use that
1816          * address instead of the public BR/EDR address.
1817          */
1818         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1819             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1820             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1821              bacmp(&hdev->static_addr, BDADDR_ANY))) {
1822                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1823                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1824                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1825                                     &hdev->static_addr);
1826                 return 0;
1827         }
1828
1829         /* Neither privacy nor static address is being used so use a
1830          * public address.
1831          */
1832         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1833
1834         return 0;
1835 }
1836
1837 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
1838 {
1839         struct bdaddr_list *b;
1840
1841         list_for_each_entry(b, &hdev->accept_list, list) {
1842                 struct hci_conn *conn;
1843
1844                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1845                 if (!conn)
1846                         return true;
1847
1848                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1849                         return true;
1850         }
1851
1852         return false;
1853 }
1854
1855 void __hci_req_update_scan(struct hci_request *req)
1856 {
1857         struct hci_dev *hdev = req->hdev;
1858         u8 scan;
1859
1860         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1861                 return;
1862
1863         if (!hdev_is_powered(hdev))
1864                 return;
1865
1866         if (mgmt_powering_down(hdev))
1867                 return;
1868
1869         if (hdev->scanning_paused)
1870                 return;
1871
1872         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1873             disconnected_accept_list_entries(hdev))
1874                 scan = SCAN_PAGE;
1875         else
1876                 scan = SCAN_DISABLED;
1877
1878         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1879                 scan |= SCAN_INQUIRY;
1880
1881         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1882             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1883                 return;
1884
1885         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1886 }
1887
1888 static int update_scan(struct hci_request *req, unsigned long opt)
1889 {
1890         hci_dev_lock(req->hdev);
1891         __hci_req_update_scan(req);
1892         hci_dev_unlock(req->hdev);
1893         return 0;
1894 }
1895
1896 static void scan_update_work(struct work_struct *work)
1897 {
1898         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1899
1900         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1901 }
1902
1903 static u8 get_service_classes(struct hci_dev *hdev)
1904 {
1905         struct bt_uuid *uuid;
1906         u8 val = 0;
1907
1908         list_for_each_entry(uuid, &hdev->uuids, list)
1909                 val |= uuid->svc_hint;
1910
1911         return val;
1912 }
1913
1914 void __hci_req_update_class(struct hci_request *req)
1915 {
1916         struct hci_dev *hdev = req->hdev;
1917         u8 cod[3];
1918
1919         bt_dev_dbg(hdev, "");
1920
1921         if (!hdev_is_powered(hdev))
1922                 return;
1923
1924         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1925                 return;
1926
1927         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1928                 return;
1929
1930         cod[0] = hdev->minor_class;
1931         cod[1] = hdev->major_class;
1932         cod[2] = get_service_classes(hdev);
1933
1934         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1935                 cod[1] |= 0x20;
1936
1937         if (memcmp(cod, hdev->dev_class, 3) == 0)
1938                 return;
1939
1940         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1941 }
1942
1943 static void write_iac(struct hci_request *req)
1944 {
1945         struct hci_dev *hdev = req->hdev;
1946         struct hci_cp_write_current_iac_lap cp;
1947
1948         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1949                 return;
1950
1951         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1952                 /* Limited discoverable mode */
1953                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1954                 cp.iac_lap[0] = 0x00;   /* LIAC */
1955                 cp.iac_lap[1] = 0x8b;
1956                 cp.iac_lap[2] = 0x9e;
1957                 cp.iac_lap[3] = 0x33;   /* GIAC */
1958                 cp.iac_lap[4] = 0x8b;
1959                 cp.iac_lap[5] = 0x9e;
1960         } else {
1961                 /* General discoverable mode */
1962                 cp.num_iac = 1;
1963                 cp.iac_lap[0] = 0x33;   /* GIAC */
1964                 cp.iac_lap[1] = 0x8b;
1965                 cp.iac_lap[2] = 0x9e;
1966         }
1967
1968         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1969                     (cp.num_iac * 3) + 1, &cp);
1970 }
1971
1972 static int discoverable_update(struct hci_request *req, unsigned long opt)
1973 {
1974         struct hci_dev *hdev = req->hdev;
1975
1976         hci_dev_lock(hdev);
1977
1978         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1979                 write_iac(req);
1980                 __hci_req_update_scan(req);
1981                 __hci_req_update_class(req);
1982         }
1983
1984         /* Advertising instances don't use the global discoverable setting, so
1985          * only update AD if advertising was enabled using Set Advertising.
1986          */
1987         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1988                 __hci_req_update_adv_data(req, 0x00);
1989
1990                 /* Discoverable mode affects the local advertising
1991                  * address in limited privacy mode.
1992                  */
1993                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
1994                         if (ext_adv_capable(hdev))
1995                                 __hci_req_start_ext_adv(req, 0x00);
1996                         else
1997                                 __hci_req_enable_advertising(req);
1998                 }
1999         }
2000
2001         hci_dev_unlock(hdev);
2002
2003         return 0;
2004 }
2005
2006 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2007                       u8 reason)
2008 {
2009         switch (conn->state) {
2010         case BT_CONNECTED:
2011         case BT_CONFIG:
2012                 if (conn->type == AMP_LINK) {
2013                         struct hci_cp_disconn_phy_link cp;
2014
2015                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2016                         cp.reason = reason;
2017                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2018                                     &cp);
2019                 } else {
2020                         struct hci_cp_disconnect dc;
2021
2022                         dc.handle = cpu_to_le16(conn->handle);
2023                         dc.reason = reason;
2024                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2025                 }
2026
2027                 conn->state = BT_DISCONN;
2028
2029                 break;
2030         case BT_CONNECT:
2031                 if (conn->type == LE_LINK) {
2032                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2033                                 break;
2034                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2035                                     0, NULL);
2036                 } else if (conn->type == ACL_LINK) {
2037                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2038                                 break;
2039                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2040                                     6, &conn->dst);
2041                 }
2042                 break;
2043         case BT_CONNECT2:
2044                 if (conn->type == ACL_LINK) {
2045                         struct hci_cp_reject_conn_req rej;
2046
2047                         bacpy(&rej.bdaddr, &conn->dst);
2048                         rej.reason = reason;
2049
2050                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2051                                     sizeof(rej), &rej);
2052                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2053                         struct hci_cp_reject_sync_conn_req rej;
2054
2055                         bacpy(&rej.bdaddr, &conn->dst);
2056
2057                         /* SCO rejection has its own limited set of
2058                          * allowed error values (0x0D-0x0F) which isn't
2059                          * compatible with most values passed to this
2060                          * function. To be safe hard-code one of the
2061                          * values that's suitable for SCO.
2062                          */
2063                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2064
2065                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2066                                     sizeof(rej), &rej);
2067                 }
2068                 break;
2069         default:
2070                 conn->state = BT_CLOSED;
2071                 break;
2072         }
2073 }
2074
2075 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2076 {
2077         if (status)
2078                 bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
2079 }
2080
2081 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2082 {
2083         struct hci_request req;
2084         int err;
2085
2086         hci_req_init(&req, conn->hdev);
2087
2088         __hci_abort_conn(&req, conn, reason);
2089
2090         err = hci_req_run(&req, abort_conn_complete);
2091         if (err && err != -ENODATA) {
2092                 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2093                 return err;
2094         }
2095
2096         return 0;
2097 }
2098
2099 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2100 {
2101         hci_req_add_le_scan_disable(req, false);
2102         return 0;
2103 }
2104
2105 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2106 {
2107         u8 length = opt;
2108         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2109         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2110         struct hci_cp_inquiry cp;
2111
2112         if (test_bit(HCI_INQUIRY, &req->hdev->flags))
2113                 return 0;
2114
2115         bt_dev_dbg(req->hdev, "");
2116
2117         hci_dev_lock(req->hdev);
2118         hci_inquiry_cache_flush(req->hdev);
2119         hci_dev_unlock(req->hdev);
2120
2121         memset(&cp, 0, sizeof(cp));
2122
2123         if (req->hdev->discovery.limited)
2124                 memcpy(&cp.lap, liac, sizeof(cp.lap));
2125         else
2126                 memcpy(&cp.lap, giac, sizeof(cp.lap));
2127
2128         cp.length = length;
2129
2130         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2131
2132         return 0;
2133 }
2134
2135 static void le_scan_disable_work(struct work_struct *work)
2136 {
2137         struct hci_dev *hdev = container_of(work, struct hci_dev,
2138                                             le_scan_disable.work);
2139         u8 status;
2140
2141         bt_dev_dbg(hdev, "");
2142
2143         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2144                 return;
2145
2146         cancel_delayed_work(&hdev->le_scan_restart);
2147
2148         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2149         if (status) {
2150                 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2151                            status);
2152                 return;
2153         }
2154
2155         hdev->discovery.scan_start = 0;
2156
2157         /* If we were running LE only scan, change discovery state. If
2158          * we were running both LE and BR/EDR inquiry simultaneously,
2159          * and BR/EDR inquiry is already finished, stop discovery,
2160          * otherwise BR/EDR inquiry will stop discovery when finished.
2161          * If we will resolve remote device name, do not change
2162          * discovery state.
2163          */
2164
2165         if (hdev->discovery.type == DISCOV_TYPE_LE)
2166                 goto discov_stopped;
2167
2168         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2169                 return;
2170
2171         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2172                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2173                     hdev->discovery.state != DISCOVERY_RESOLVING)
2174                         goto discov_stopped;
2175
2176                 return;
2177         }
2178
2179         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2180                      HCI_CMD_TIMEOUT, &status);
2181         if (status) {
2182                 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2183                 goto discov_stopped;
2184         }
2185
2186         return;
2187
2188 discov_stopped:
2189         hci_dev_lock(hdev);
2190         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2191         hci_dev_unlock(hdev);
2192 }
2193
2194 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2195 {
2196         struct hci_dev *hdev = req->hdev;
2197
2198         /* If controller is not scanning we are done. */
2199         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2200                 return 0;
2201
2202         if (hdev->scanning_paused) {
2203                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
2204                 return 0;
2205         }
2206
2207         hci_req_add_le_scan_disable(req, false);
2208
2209         if (use_ext_scan(hdev)) {
2210                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2211
2212                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2213                 ext_enable_cp.enable = LE_SCAN_ENABLE;
2214                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2215
2216                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2217                             sizeof(ext_enable_cp), &ext_enable_cp);
2218         } else {
2219                 struct hci_cp_le_set_scan_enable cp;
2220
2221                 memset(&cp, 0, sizeof(cp));
2222                 cp.enable = LE_SCAN_ENABLE;
2223                 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2224                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2225         }
2226
2227         return 0;
2228 }
2229
2230 static void le_scan_restart_work(struct work_struct *work)
2231 {
2232         struct hci_dev *hdev = container_of(work, struct hci_dev,
2233                                             le_scan_restart.work);
2234         unsigned long timeout, duration, scan_start, now;
2235         u8 status;
2236
2237         bt_dev_dbg(hdev, "");
2238
2239         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2240         if (status) {
2241                 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2242                            status);
2243                 return;
2244         }
2245
2246         hci_dev_lock(hdev);
2247
2248         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2249             !hdev->discovery.scan_start)
2250                 goto unlock;
2251
2252         /* When the scan was started, hdev->le_scan_disable has been queued
2253          * after duration from scan_start. During scan restart this job
2254          * has been canceled, and we need to queue it again after proper
2255          * timeout, to make sure that scan does not run indefinitely.
2256          */
2257         duration = hdev->discovery.scan_duration;
2258         scan_start = hdev->discovery.scan_start;
2259         now = jiffies;
2260         if (now - scan_start <= duration) {
2261                 int elapsed;
2262
2263                 if (now >= scan_start)
2264                         elapsed = now - scan_start;
2265                 else
2266                         elapsed = ULONG_MAX - scan_start + now;
2267
2268                 timeout = duration - elapsed;
2269         } else {
2270                 timeout = 0;
2271         }
2272
2273         queue_delayed_work(hdev->req_workqueue,
2274                            &hdev->le_scan_disable, timeout);
2275
2276 unlock:
2277         hci_dev_unlock(hdev);
2278 }
2279
2280 static int active_scan(struct hci_request *req, unsigned long opt)
2281 {
2282         uint16_t interval = opt;
2283         struct hci_dev *hdev = req->hdev;
2284         u8 own_addr_type;
2285         /* Accept list is not used for discovery */
2286         u8 filter_policy = 0x00;
2287         /* Default is to enable duplicates filter */
2288         u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2289         /* Discovery doesn't require controller address resolution */
2290         bool addr_resolv = false;
2291         int err;
2292
2293         bt_dev_dbg(hdev, "");
2294
2295         /* If controller is scanning, it means the background scanning is
2296          * running. Thus, we should temporarily stop it in order to set the
2297          * discovery scanning parameters.
2298          */
2299         if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2300                 hci_req_add_le_scan_disable(req, false);
2301                 cancel_interleave_scan(hdev);
2302         }
2303
2304         /* All active scans will be done with either a resolvable private
2305          * address (when privacy feature has been enabled) or non-resolvable
2306          * private address.
2307          */
2308         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2309                                         &own_addr_type);
2310         if (err < 0)
2311                 own_addr_type = ADDR_LE_DEV_PUBLIC;
2312
2313         if (hci_is_adv_monitoring(hdev)) {
2314                 /* Duplicate filter should be disabled when some advertisement
2315                  * monitor is activated, otherwise AdvMon can only receive one
2316                  * advertisement for one peer(*) during active scanning, and
2317                  * might report loss to these peers.
2318                  *
2319                  * Note that different controllers have different meanings of
2320                  * |duplicate|. Some of them consider packets with the same
2321                  * address as duplicate, and others consider packets with the
2322                  * same address and the same RSSI as duplicate. Although in the
2323                  * latter case we don't need to disable duplicate filter, but
2324                  * it is common to have active scanning for a short period of
2325                  * time, the power impact should be neglectable.
2326                  */
2327                 filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2328         }
2329
2330         hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
2331                            hdev->le_scan_window_discovery, own_addr_type,
2332                            filter_policy, filter_dup, addr_resolv);
2333         return 0;
2334 }
2335
2336 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2337 {
2338         int err;
2339
2340         bt_dev_dbg(req->hdev, "");
2341
2342         err = active_scan(req, opt);
2343         if (err)
2344                 return err;
2345
2346         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2347 }
2348
2349 static void start_discovery(struct hci_dev *hdev, u8 *status)
2350 {
2351         unsigned long timeout;
2352
2353         bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
2354
2355         switch (hdev->discovery.type) {
2356         case DISCOV_TYPE_BREDR:
2357                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2358                         hci_req_sync(hdev, bredr_inquiry,
2359                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2360                                      status);
2361                 return;
2362         case DISCOV_TYPE_INTERLEAVED:
2363                 /* When running simultaneous discovery, the LE scanning time
2364                  * should occupy the whole discovery time sine BR/EDR inquiry
2365                  * and LE scanning are scheduled by the controller.
2366                  *
2367                  * For interleaving discovery in comparison, BR/EDR inquiry
2368                  * and LE scanning are done sequentially with separate
2369                  * timeouts.
2370                  */
2371                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2372                              &hdev->quirks)) {
2373                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2374                         /* During simultaneous discovery, we double LE scan
2375                          * interval. We must leave some time for the controller
2376                          * to do BR/EDR inquiry.
2377                          */
2378                         hci_req_sync(hdev, interleaved_discov,
2379                                      hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT,
2380                                      status);
2381                         break;
2382                 }
2383
2384                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2385                 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2386                              HCI_CMD_TIMEOUT, status);
2387                 break;
2388         case DISCOV_TYPE_LE:
2389                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2390                 hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery,
2391                              HCI_CMD_TIMEOUT, status);
2392                 break;
2393         default:
2394                 *status = HCI_ERROR_UNSPECIFIED;
2395                 return;
2396         }
2397
2398         if (*status)
2399                 return;
2400
2401         bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
2402
2403         /* When service discovery is used and the controller has a
2404          * strict duplicate filter, it is important to remember the
2405          * start and duration of the scan. This is required for
2406          * restarting scanning during the discovery phase.
2407          */
2408         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2409                      hdev->discovery.result_filtering) {
2410                 hdev->discovery.scan_start = jiffies;
2411                 hdev->discovery.scan_duration = timeout;
2412         }
2413
2414         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2415                            timeout);
2416 }
2417
2418 bool hci_req_stop_discovery(struct hci_request *req)
2419 {
2420         struct hci_dev *hdev = req->hdev;
2421         struct discovery_state *d = &hdev->discovery;
2422         struct hci_cp_remote_name_req_cancel cp;
2423         struct inquiry_entry *e;
2424         bool ret = false;
2425
2426         bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
2427
2428         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2429                 if (test_bit(HCI_INQUIRY, &hdev->flags))
2430                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2431
2432                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2433                         cancel_delayed_work(&hdev->le_scan_disable);
2434                         cancel_delayed_work(&hdev->le_scan_restart);
2435                         hci_req_add_le_scan_disable(req, false);
2436                 }
2437
2438                 ret = true;
2439         } else {
2440                 /* Passive scanning */
2441                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2442                         hci_req_add_le_scan_disable(req, false);
2443                         ret = true;
2444                 }
2445         }
2446
2447         /* No further actions needed for LE-only discovery */
2448         if (d->type == DISCOV_TYPE_LE)
2449                 return ret;
2450
2451         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2452                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2453                                                      NAME_PENDING);
2454                 if (!e)
2455                         return ret;
2456
2457                 bacpy(&cp.bdaddr, &e->data.bdaddr);
2458                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2459                             &cp);
2460                 ret = true;
2461         }
2462
2463         return ret;
2464 }
2465
2466 static void config_data_path_complete(struct hci_dev *hdev, u8 status,
2467                                       u16 opcode)
2468 {
2469         bt_dev_dbg(hdev, "status %u", status);
2470 }
2471
2472 int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec)
2473 {
2474         struct hci_request req;
2475         int err;
2476         __u8 vnd_len, *vnd_data = NULL;
2477         struct hci_op_configure_data_path *cmd = NULL;
2478
2479         hci_req_init(&req, hdev);
2480
2481         err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len,
2482                                           &vnd_data);
2483         if (err < 0)
2484                 goto error;
2485
2486         cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL);
2487         if (!cmd) {
2488                 err = -ENOMEM;
2489                 goto error;
2490         }
2491
2492         err = hdev->get_data_path_id(hdev, &cmd->data_path_id);
2493         if (err < 0)
2494                 goto error;
2495
2496         cmd->vnd_len = vnd_len;
2497         memcpy(cmd->vnd_data, vnd_data, vnd_len);
2498
2499         cmd->direction = 0x00;
2500         hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2501
2502         cmd->direction = 0x01;
2503         hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd);
2504
2505         err = hci_req_run(&req, config_data_path_complete);
2506 error:
2507
2508         kfree(cmd);
2509         kfree(vnd_data);
2510         return err;
2511 }
2512
2513 static int stop_discovery(struct hci_request *req, unsigned long opt)
2514 {
2515         hci_dev_lock(req->hdev);
2516         hci_req_stop_discovery(req);
2517         hci_dev_unlock(req->hdev);
2518
2519         return 0;
2520 }
2521
2522 static void discov_update(struct work_struct *work)
2523 {
2524         struct hci_dev *hdev = container_of(work, struct hci_dev,
2525                                             discov_update);
2526         u8 status = 0;
2527
2528         switch (hdev->discovery.state) {
2529         case DISCOVERY_STARTING:
2530                 start_discovery(hdev, &status);
2531                 mgmt_start_discovery_complete(hdev, status);
2532                 if (status)
2533                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2534                 else
2535                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2536                 break;
2537         case DISCOVERY_STOPPING:
2538                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2539                 mgmt_stop_discovery_complete(hdev, status);
2540                 if (!status)
2541                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2542                 break;
2543         case DISCOVERY_STOPPED:
2544         default:
2545                 return;
2546         }
2547 }
2548
2549 static void discov_off(struct work_struct *work)
2550 {
2551         struct hci_dev *hdev = container_of(work, struct hci_dev,
2552                                             discov_off.work);
2553
2554         bt_dev_dbg(hdev, "");
2555
2556         hci_dev_lock(hdev);
2557
2558         /* When discoverable timeout triggers, then just make sure
2559          * the limited discoverable flag is cleared. Even in the case
2560          * of a timeout triggered from general discoverable, it is
2561          * safe to unconditionally clear the flag.
2562          */
2563         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2564         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2565         hdev->discov_timeout = 0;
2566
2567         hci_dev_unlock(hdev);
2568
2569         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2570         mgmt_new_settings(hdev);
2571 }
2572
2573 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2574 {
2575         struct hci_dev *hdev = req->hdev;
2576         u8 link_sec;
2577
2578         hci_dev_lock(hdev);
2579
2580         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2581             !lmp_host_ssp_capable(hdev)) {
2582                 u8 mode = 0x01;
2583
2584                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2585
2586                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2587                         u8 support = 0x01;
2588
2589                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2590                                     sizeof(support), &support);
2591                 }
2592         }
2593
2594         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2595             lmp_bredr_capable(hdev)) {
2596                 struct hci_cp_write_le_host_supported cp;
2597
2598                 cp.le = 0x01;
2599                 cp.simul = 0x00;
2600
2601                 /* Check first if we already have the right
2602                  * host state (host features set)
2603                  */
2604                 if (cp.le != lmp_host_le_capable(hdev) ||
2605                     cp.simul != lmp_host_le_br_capable(hdev))
2606                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2607                                     sizeof(cp), &cp);
2608         }
2609
2610         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2611                 /* Make sure the controller has a good default for
2612                  * advertising data. This also applies to the case
2613                  * where BR/EDR was toggled during the AUTO_OFF phase.
2614                  */
2615                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2616                     list_empty(&hdev->adv_instances)) {
2617                         int err;
2618
2619                         if (ext_adv_capable(hdev)) {
2620                                 err = __hci_req_setup_ext_adv_instance(req,
2621                                                                        0x00);
2622                                 if (!err)
2623                                         __hci_req_update_scan_rsp_data(req,
2624                                                                        0x00);
2625                         } else {
2626                                 err = 0;
2627                                 __hci_req_update_adv_data(req, 0x00);
2628                                 __hci_req_update_scan_rsp_data(req, 0x00);
2629                         }
2630
2631                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2632                                 if (!ext_adv_capable(hdev))
2633                                         __hci_req_enable_advertising(req);
2634                                 else if (!err)
2635                                         __hci_req_enable_ext_advertising(req,
2636                                                                          0x00);
2637                         }
2638                 } else if (!list_empty(&hdev->adv_instances)) {
2639                         struct adv_info *adv_instance;
2640
2641                         adv_instance = list_first_entry(&hdev->adv_instances,
2642                                                         struct adv_info, list);
2643                         __hci_req_schedule_adv_instance(req,
2644                                                         adv_instance->instance,
2645                                                         true);
2646                 }
2647         }
2648
2649         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2650         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2651                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2652                             sizeof(link_sec), &link_sec);
2653
2654         if (lmp_bredr_capable(hdev)) {
2655                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2656                         __hci_req_write_fast_connectable(req, true);
2657                 else
2658                         __hci_req_write_fast_connectable(req, false);
2659                 __hci_req_update_scan(req);
2660                 __hci_req_update_class(req);
2661                 __hci_req_update_name(req);
2662                 __hci_req_update_eir(req);
2663         }
2664
2665         hci_dev_unlock(hdev);
2666         return 0;
2667 }
2668
2669 int __hci_req_hci_power_on(struct hci_dev *hdev)
2670 {
2671         /* Register the available SMP channels (BR/EDR and LE) only when
2672          * successfully powering on the controller. This late
2673          * registration is required so that LE SMP can clearly decide if
2674          * the public address or static address is used.
2675          */
2676         smp_register(hdev);
2677
2678         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2679                               NULL);
2680 }
2681
2682 void hci_request_setup(struct hci_dev *hdev)
2683 {
2684         INIT_WORK(&hdev->discov_update, discov_update);
2685         INIT_WORK(&hdev->scan_update, scan_update_work);
2686         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2687         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2688         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2689         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2690         INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
2691 }
2692
2693 void hci_request_cancel_all(struct hci_dev *hdev)
2694 {
2695         hci_cmd_sync_cancel(hdev, ENODEV);
2696
2697         cancel_work_sync(&hdev->discov_update);
2698         cancel_work_sync(&hdev->scan_update);
2699         cancel_delayed_work_sync(&hdev->discov_off);
2700         cancel_delayed_work_sync(&hdev->le_scan_disable);
2701         cancel_delayed_work_sync(&hdev->le_scan_restart);
2702
2703         if (hdev->adv_instance_timeout) {
2704                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2705                 hdev->adv_instance_timeout = 0;
2706         }
2707
2708         cancel_interleave_scan(hdev);
2709 }