1162464
[linux-2.6-block.git] /
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3
4    Copyright (C) 2014 Intel Corporation
5
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License version 2 as
8    published by the Free Software Foundation;
9
10    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21    SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE      0
34 #define HCI_REQ_PEND      1
35 #define HCI_REQ_CANCELED  2
36
37 #define LE_SUSPEND_SCAN_WINDOW          0x0012
38 #define LE_SUSPEND_SCAN_INTERVAL        0x0060
39
40 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
41 {
42         skb_queue_head_init(&req->cmd_q);
43         req->hdev = hdev;
44         req->err = 0;
45 }
46
47 void hci_req_purge(struct hci_request *req)
48 {
49         skb_queue_purge(&req->cmd_q);
50 }
51
52 bool hci_req_status_pend(struct hci_dev *hdev)
53 {
54         return hdev->req_status == HCI_REQ_PEND;
55 }
56
57 static int req_run(struct hci_request *req, hci_req_complete_t complete,
58                    hci_req_complete_skb_t complete_skb)
59 {
60         struct hci_dev *hdev = req->hdev;
61         struct sk_buff *skb;
62         unsigned long flags;
63
64         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
65
66         /* If an error occurred during request building, remove all HCI
67          * commands queued on the HCI request queue.
68          */
69         if (req->err) {
70                 skb_queue_purge(&req->cmd_q);
71                 return req->err;
72         }
73
74         /* Do not allow empty requests */
75         if (skb_queue_empty(&req->cmd_q))
76                 return -ENODATA;
77
78         skb = skb_peek_tail(&req->cmd_q);
79         if (complete) {
80                 bt_cb(skb)->hci.req_complete = complete;
81         } else if (complete_skb) {
82                 bt_cb(skb)->hci.req_complete_skb = complete_skb;
83                 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
84         }
85
86         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
87         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
88         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
89
90         queue_work(hdev->workqueue, &hdev->cmd_work);
91
92         return 0;
93 }
94
95 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
96 {
97         return req_run(req, complete, NULL);
98 }
99
100 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
101 {
102         return req_run(req, NULL, complete);
103 }
104
105 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
106                                   struct sk_buff *skb)
107 {
108         BT_DBG("%s result 0x%2.2x", hdev->name, result);
109
110         if (hdev->req_status == HCI_REQ_PEND) {
111                 hdev->req_result = result;
112                 hdev->req_status = HCI_REQ_DONE;
113                 if (skb)
114                         hdev->req_skb = skb_get(skb);
115                 wake_up_interruptible(&hdev->req_wait_q);
116         }
117 }
118
119 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
120 {
121         BT_DBG("%s err 0x%2.2x", hdev->name, err);
122
123         if (hdev->req_status == HCI_REQ_PEND) {
124                 hdev->req_result = err;
125                 hdev->req_status = HCI_REQ_CANCELED;
126                 wake_up_interruptible(&hdev->req_wait_q);
127         }
128 }
129
130 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
131                                   const void *param, u8 event, u32 timeout)
132 {
133         struct hci_request req;
134         struct sk_buff *skb;
135         int err = 0;
136
137         BT_DBG("%s", hdev->name);
138
139         hci_req_init(&req, hdev);
140
141         hci_req_add_ev(&req, opcode, plen, param, event);
142
143         hdev->req_status = HCI_REQ_PEND;
144
145         err = hci_req_run_skb(&req, hci_req_sync_complete);
146         if (err < 0)
147                 return ERR_PTR(err);
148
149         err = wait_event_interruptible_timeout(hdev->req_wait_q,
150                         hdev->req_status != HCI_REQ_PEND, timeout);
151
152         if (err == -ERESTARTSYS)
153                 return ERR_PTR(-EINTR);
154
155         switch (hdev->req_status) {
156         case HCI_REQ_DONE:
157                 err = -bt_to_errno(hdev->req_result);
158                 break;
159
160         case HCI_REQ_CANCELED:
161                 err = -hdev->req_result;
162                 break;
163
164         default:
165                 err = -ETIMEDOUT;
166                 break;
167         }
168
169         hdev->req_status = hdev->req_result = 0;
170         skb = hdev->req_skb;
171         hdev->req_skb = NULL;
172
173         BT_DBG("%s end: err %d", hdev->name, err);
174
175         if (err < 0) {
176                 kfree_skb(skb);
177                 return ERR_PTR(err);
178         }
179
180         if (!skb)
181                 return ERR_PTR(-ENODATA);
182
183         return skb;
184 }
185 EXPORT_SYMBOL(__hci_cmd_sync_ev);
186
187 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
188                                const void *param, u32 timeout)
189 {
190         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
191 }
192 EXPORT_SYMBOL(__hci_cmd_sync);
193
194 /* Execute request and wait for completion. */
195 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
196                                                      unsigned long opt),
197                    unsigned long opt, u32 timeout, u8 *hci_status)
198 {
199         struct hci_request req;
200         int err = 0;
201
202         BT_DBG("%s start", hdev->name);
203
204         hci_req_init(&req, hdev);
205
206         hdev->req_status = HCI_REQ_PEND;
207
208         err = func(&req, opt);
209         if (err) {
210                 if (hci_status)
211                         *hci_status = HCI_ERROR_UNSPECIFIED;
212                 return err;
213         }
214
215         err = hci_req_run_skb(&req, hci_req_sync_complete);
216         if (err < 0) {
217                 hdev->req_status = 0;
218
219                 /* ENODATA means the HCI request command queue is empty.
220                  * This can happen when a request with conditionals doesn't
221                  * trigger any commands to be sent. This is normal behavior
222                  * and should not trigger an error return.
223                  */
224                 if (err == -ENODATA) {
225                         if (hci_status)
226                                 *hci_status = 0;
227                         return 0;
228                 }
229
230                 if (hci_status)
231                         *hci_status = HCI_ERROR_UNSPECIFIED;
232
233                 return err;
234         }
235
236         err = wait_event_interruptible_timeout(hdev->req_wait_q,
237                         hdev->req_status != HCI_REQ_PEND, timeout);
238
239         if (err == -ERESTARTSYS)
240                 return -EINTR;
241
242         switch (hdev->req_status) {
243         case HCI_REQ_DONE:
244                 err = -bt_to_errno(hdev->req_result);
245                 if (hci_status)
246                         *hci_status = hdev->req_result;
247                 break;
248
249         case HCI_REQ_CANCELED:
250                 err = -hdev->req_result;
251                 if (hci_status)
252                         *hci_status = HCI_ERROR_UNSPECIFIED;
253                 break;
254
255         default:
256                 err = -ETIMEDOUT;
257                 if (hci_status)
258                         *hci_status = HCI_ERROR_UNSPECIFIED;
259                 break;
260         }
261
262         kfree_skb(hdev->req_skb);
263         hdev->req_skb = NULL;
264         hdev->req_status = hdev->req_result = 0;
265
266         BT_DBG("%s end: err %d", hdev->name, err);
267
268         return err;
269 }
270
271 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
272                                                   unsigned long opt),
273                  unsigned long opt, u32 timeout, u8 *hci_status)
274 {
275         int ret;
276
277         if (!test_bit(HCI_UP, &hdev->flags))
278                 return -ENETDOWN;
279
280         /* Serialize all requests */
281         hci_req_sync_lock(hdev);
282         ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
283         hci_req_sync_unlock(hdev);
284
285         return ret;
286 }
287
288 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
289                                 const void *param)
290 {
291         int len = HCI_COMMAND_HDR_SIZE + plen;
292         struct hci_command_hdr *hdr;
293         struct sk_buff *skb;
294
295         skb = bt_skb_alloc(len, GFP_ATOMIC);
296         if (!skb)
297                 return NULL;
298
299         hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
300         hdr->opcode = cpu_to_le16(opcode);
301         hdr->plen   = plen;
302
303         if (plen)
304                 skb_put_data(skb, param, plen);
305
306         BT_DBG("skb len %d", skb->len);
307
308         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
309         hci_skb_opcode(skb) = opcode;
310
311         return skb;
312 }
313
314 /* Queue a command to an asynchronous HCI request */
315 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
316                     const void *param, u8 event)
317 {
318         struct hci_dev *hdev = req->hdev;
319         struct sk_buff *skb;
320
321         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
322
323         /* If an error occurred during request building, there is no point in
324          * queueing the HCI command. We can simply return.
325          */
326         if (req->err)
327                 return;
328
329         skb = hci_prepare_cmd(hdev, opcode, plen, param);
330         if (!skb) {
331                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
332                            opcode);
333                 req->err = -ENOMEM;
334                 return;
335         }
336
337         if (skb_queue_empty(&req->cmd_q))
338                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
339
340         bt_cb(skb)->hci.req_event = event;
341
342         skb_queue_tail(&req->cmd_q, skb);
343 }
344
345 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
346                  const void *param)
347 {
348         hci_req_add_ev(req, opcode, plen, param, 0);
349 }
350
351 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
352 {
353         struct hci_dev *hdev = req->hdev;
354         struct hci_cp_write_page_scan_activity acp;
355         u8 type;
356
357         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
358                 return;
359
360         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
361                 return;
362
363         if (enable) {
364                 type = PAGE_SCAN_TYPE_INTERLACED;
365
366                 /* 160 msec page scan interval */
367                 acp.interval = cpu_to_le16(0x0100);
368         } else {
369                 type = PAGE_SCAN_TYPE_STANDARD; /* default */
370
371                 /* default 1.28 sec page scan */
372                 acp.interval = cpu_to_le16(0x0800);
373         }
374
375         acp.window = cpu_to_le16(0x0012);
376
377         if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
378             __cpu_to_le16(hdev->page_scan_window) != acp.window)
379                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
380                             sizeof(acp), &acp);
381
382         if (hdev->page_scan_type != type)
383                 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
384 }
385
386 /* This function controls the background scanning based on hdev->pend_le_conns
387  * list. If there are pending LE connection we start the background scanning,
388  * otherwise we stop it.
389  *
390  * This function requires the caller holds hdev->lock.
391  */
392 static void __hci_update_background_scan(struct hci_request *req)
393 {
394         struct hci_dev *hdev = req->hdev;
395
396         if (!test_bit(HCI_UP, &hdev->flags) ||
397             test_bit(HCI_INIT, &hdev->flags) ||
398             hci_dev_test_flag(hdev, HCI_SETUP) ||
399             hci_dev_test_flag(hdev, HCI_CONFIG) ||
400             hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
401             hci_dev_test_flag(hdev, HCI_UNREGISTER))
402                 return;
403
404         /* No point in doing scanning if LE support hasn't been enabled */
405         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
406                 return;
407
408         /* If discovery is active don't interfere with it */
409         if (hdev->discovery.state != DISCOVERY_STOPPED)
410                 return;
411
412         /* Reset RSSI and UUID filters when starting background scanning
413          * since these filters are meant for service discovery only.
414          *
415          * The Start Discovery and Start Service Discovery operations
416          * ensure to set proper values for RSSI threshold and UUID
417          * filter list. So it is safe to just reset them here.
418          */
419         hci_discovery_filter_clear(hdev);
420
421         if (list_empty(&hdev->pend_le_conns) &&
422             list_empty(&hdev->pend_le_reports)) {
423                 /* If there is no pending LE connections or devices
424                  * to be scanned for, we should stop the background
425                  * scanning.
426                  */
427
428                 /* If controller is not scanning we are done. */
429                 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
430                         return;
431
432                 hci_req_add_le_scan_disable(req);
433
434                 BT_DBG("%s stopping background scanning", hdev->name);
435         } else {
436                 /* If there is at least one pending LE connection, we should
437                  * keep the background scan running.
438                  */
439
440                 /* If controller is connecting, we should not start scanning
441                  * since some controllers are not able to scan and connect at
442                  * the same time.
443                  */
444                 if (hci_lookup_le_connect(hdev))
445                         return;
446
447                 /* If controller is currently scanning, we stop it to ensure we
448                  * don't miss any advertising (due to duplicates filter).
449                  */
450                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
451                         hci_req_add_le_scan_disable(req);
452
453                 hci_req_add_le_passive_scan(req);
454
455                 BT_DBG("%s starting background scanning", hdev->name);
456         }
457 }
458
459 void __hci_req_update_name(struct hci_request *req)
460 {
461         struct hci_dev *hdev = req->hdev;
462         struct hci_cp_write_local_name cp;
463
464         memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
465
466         hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
467 }
468
469 #define PNP_INFO_SVCLASS_ID             0x1200
470
471 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
472 {
473         u8 *ptr = data, *uuids_start = NULL;
474         struct bt_uuid *uuid;
475
476         if (len < 4)
477                 return ptr;
478
479         list_for_each_entry(uuid, &hdev->uuids, list) {
480                 u16 uuid16;
481
482                 if (uuid->size != 16)
483                         continue;
484
485                 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
486                 if (uuid16 < 0x1100)
487                         continue;
488
489                 if (uuid16 == PNP_INFO_SVCLASS_ID)
490                         continue;
491
492                 if (!uuids_start) {
493                         uuids_start = ptr;
494                         uuids_start[0] = 1;
495                         uuids_start[1] = EIR_UUID16_ALL;
496                         ptr += 2;
497                 }
498
499                 /* Stop if not enough space to put next UUID */
500                 if ((ptr - data) + sizeof(u16) > len) {
501                         uuids_start[1] = EIR_UUID16_SOME;
502                         break;
503                 }
504
505                 *ptr++ = (uuid16 & 0x00ff);
506                 *ptr++ = (uuid16 & 0xff00) >> 8;
507                 uuids_start[0] += sizeof(uuid16);
508         }
509
510         return ptr;
511 }
512
513 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
514 {
515         u8 *ptr = data, *uuids_start = NULL;
516         struct bt_uuid *uuid;
517
518         if (len < 6)
519                 return ptr;
520
521         list_for_each_entry(uuid, &hdev->uuids, list) {
522                 if (uuid->size != 32)
523                         continue;
524
525                 if (!uuids_start) {
526                         uuids_start = ptr;
527                         uuids_start[0] = 1;
528                         uuids_start[1] = EIR_UUID32_ALL;
529                         ptr += 2;
530                 }
531
532                 /* Stop if not enough space to put next UUID */
533                 if ((ptr - data) + sizeof(u32) > len) {
534                         uuids_start[1] = EIR_UUID32_SOME;
535                         break;
536                 }
537
538                 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
539                 ptr += sizeof(u32);
540                 uuids_start[0] += sizeof(u32);
541         }
542
543         return ptr;
544 }
545
546 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
547 {
548         u8 *ptr = data, *uuids_start = NULL;
549         struct bt_uuid *uuid;
550
551         if (len < 18)
552                 return ptr;
553
554         list_for_each_entry(uuid, &hdev->uuids, list) {
555                 if (uuid->size != 128)
556                         continue;
557
558                 if (!uuids_start) {
559                         uuids_start = ptr;
560                         uuids_start[0] = 1;
561                         uuids_start[1] = EIR_UUID128_ALL;
562                         ptr += 2;
563                 }
564
565                 /* Stop if not enough space to put next UUID */
566                 if ((ptr - data) + 16 > len) {
567                         uuids_start[1] = EIR_UUID128_SOME;
568                         break;
569                 }
570
571                 memcpy(ptr, uuid->uuid, 16);
572                 ptr += 16;
573                 uuids_start[0] += 16;
574         }
575
576         return ptr;
577 }
578
579 static void create_eir(struct hci_dev *hdev, u8 *data)
580 {
581         u8 *ptr = data;
582         size_t name_len;
583
584         name_len = strlen(hdev->dev_name);
585
586         if (name_len > 0) {
587                 /* EIR Data type */
588                 if (name_len > 48) {
589                         name_len = 48;
590                         ptr[1] = EIR_NAME_SHORT;
591                 } else
592                         ptr[1] = EIR_NAME_COMPLETE;
593
594                 /* EIR Data length */
595                 ptr[0] = name_len + 1;
596
597                 memcpy(ptr + 2, hdev->dev_name, name_len);
598
599                 ptr += (name_len + 2);
600         }
601
602         if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
603                 ptr[0] = 2;
604                 ptr[1] = EIR_TX_POWER;
605                 ptr[2] = (u8) hdev->inq_tx_power;
606
607                 ptr += 3;
608         }
609
610         if (hdev->devid_source > 0) {
611                 ptr[0] = 9;
612                 ptr[1] = EIR_DEVICE_ID;
613
614                 put_unaligned_le16(hdev->devid_source, ptr + 2);
615                 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
616                 put_unaligned_le16(hdev->devid_product, ptr + 6);
617                 put_unaligned_le16(hdev->devid_version, ptr + 8);
618
619                 ptr += 10;
620         }
621
622         ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
623         ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624         ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
625 }
626
627 void __hci_req_update_eir(struct hci_request *req)
628 {
629         struct hci_dev *hdev = req->hdev;
630         struct hci_cp_write_eir cp;
631
632         if (!hdev_is_powered(hdev))
633                 return;
634
635         if (!lmp_ext_inq_capable(hdev))
636                 return;
637
638         if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
639                 return;
640
641         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
642                 return;
643
644         memset(&cp, 0, sizeof(cp));
645
646         create_eir(hdev, cp.data);
647
648         if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
649                 return;
650
651         memcpy(hdev->eir, cp.data, sizeof(cp.data));
652
653         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
654 }
655
656 void hci_req_add_le_scan_disable(struct hci_request *req)
657 {
658         struct hci_dev *hdev = req->hdev;
659
660         if (hdev->scanning_paused) {
661                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
662                 return;
663         }
664
665         if (use_ext_scan(hdev)) {
666                 struct hci_cp_le_set_ext_scan_enable cp;
667
668                 memset(&cp, 0, sizeof(cp));
669                 cp.enable = LE_SCAN_DISABLE;
670                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
671                             &cp);
672         } else {
673                 struct hci_cp_le_set_scan_enable cp;
674
675                 memset(&cp, 0, sizeof(cp));
676                 cp.enable = LE_SCAN_DISABLE;
677                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
678         }
679 }
680
681 static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
682                                 u8 bdaddr_type)
683 {
684         struct hci_cp_le_del_from_white_list cp;
685
686         cp.bdaddr_type = bdaddr_type;
687         bacpy(&cp.bdaddr, bdaddr);
688
689         bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
690                    cp.bdaddr_type);
691         hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
692 }
693
694 /* Adds connection to white list if needed. On error, returns -1. */
695 static int add_to_white_list(struct hci_request *req,
696                              struct hci_conn_params *params, u8 *num_entries,
697                              bool allow_rpa)
698 {
699         struct hci_cp_le_add_to_white_list cp;
700         struct hci_dev *hdev = req->hdev;
701
702         /* Already in white list */
703         if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
704                                    params->addr_type))
705                 return 0;
706
707         /* Select filter policy to accept all advertising */
708         if (*num_entries >= hdev->le_white_list_size)
709                 return -1;
710
711         /* White list can not be used with RPAs */
712         if (!allow_rpa &&
713             hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
714                 return -1;
715         }
716
717         /* During suspend, only wakeable devices can be in whitelist */
718         if (hdev->suspended && !params->wakeable)
719                 return 0;
720
721         *num_entries += 1;
722         cp.bdaddr_type = params->addr_type;
723         bacpy(&cp.bdaddr, &params->addr);
724
725         bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
726                    cp.bdaddr_type);
727         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
728
729         return 0;
730 }
731
732 static u8 update_white_list(struct hci_request *req)
733 {
734         struct hci_dev *hdev = req->hdev;
735         struct hci_conn_params *params;
736         struct bdaddr_list *b;
737         u8 num_entries = 0;
738         bool pend_conn, pend_report;
739         /* We allow whitelisting even with RPAs in suspend. In the worst case,
740          * we won't be able to wake from devices that use the privacy1.2
741          * features. Additionally, once we support privacy1.2 and IRK
742          * offloading, we can update this to also check for those conditions.
743          */
744         bool allow_rpa = hdev->suspended;
745
746         /* Go through the current white list programmed into the
747          * controller one by one and check if that address is still
748          * in the list of pending connections or list of devices to
749          * report. If not present in either list, then queue the
750          * command to remove it from the controller.
751          */
752         list_for_each_entry(b, &hdev->le_white_list, list) {
753                 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
754                                                       &b->bdaddr,
755                                                       b->bdaddr_type);
756                 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
757                                                         &b->bdaddr,
758                                                         b->bdaddr_type);
759
760                 /* If the device is not likely to connect or report,
761                  * remove it from the whitelist.
762                  */
763                 if (!pend_conn && !pend_report) {
764                         del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
765                         continue;
766                 }
767
768                 /* White list can not be used with RPAs */
769                 if (!allow_rpa &&
770                     hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
771                         return 0x00;
772                 }
773
774                 num_entries++;
775         }
776
777         /* Since all no longer valid white list entries have been
778          * removed, walk through the list of pending connections
779          * and ensure that any new device gets programmed into
780          * the controller.
781          *
782          * If the list of the devices is larger than the list of
783          * available white list entries in the controller, then
784          * just abort and return filer policy value to not use the
785          * white list.
786          */
787         list_for_each_entry(params, &hdev->pend_le_conns, action) {
788                 if (add_to_white_list(req, params, &num_entries, allow_rpa))
789                         return 0x00;
790         }
791
792         /* After adding all new pending connections, walk through
793          * the list of pending reports and also add these to the
794          * white list if there is still space. Abort if space runs out.
795          */
796         list_for_each_entry(params, &hdev->pend_le_reports, action) {
797                 if (add_to_white_list(req, params, &num_entries, allow_rpa))
798                         return 0x00;
799         }
800
801         /* Select filter policy to use white list */
802         return 0x01;
803 }
804
805 static bool scan_use_rpa(struct hci_dev *hdev)
806 {
807         return hci_dev_test_flag(hdev, HCI_PRIVACY);
808 }
809
810 static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
811                                u16 window, u8 own_addr_type, u8 filter_policy)
812 {
813         struct hci_dev *hdev = req->hdev;
814
815         /* Use ext scanning if set ext scan param and ext scan enable is
816          * supported
817          */
818         if (use_ext_scan(hdev)) {
819                 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
820                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
821                 struct hci_cp_le_scan_phy_params *phy_params;
822                 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
823                 u32 plen;
824
825                 ext_param_cp = (void *)data;
826                 phy_params = (void *)ext_param_cp->data;
827
828                 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
829                 ext_param_cp->own_addr_type = own_addr_type;
830                 ext_param_cp->filter_policy = filter_policy;
831
832                 plen = sizeof(*ext_param_cp);
833
834                 if (scan_1m(hdev) || scan_2m(hdev)) {
835                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
836
837                         memset(phy_params, 0, sizeof(*phy_params));
838                         phy_params->type = type;
839                         phy_params->interval = cpu_to_le16(interval);
840                         phy_params->window = cpu_to_le16(window);
841
842                         plen += sizeof(*phy_params);
843                         phy_params++;
844                 }
845
846                 if (scan_coded(hdev)) {
847                         ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
848
849                         memset(phy_params, 0, sizeof(*phy_params));
850                         phy_params->type = type;
851                         phy_params->interval = cpu_to_le16(interval);
852                         phy_params->window = cpu_to_le16(window);
853
854                         plen += sizeof(*phy_params);
855                         phy_params++;
856                 }
857
858                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
859                             plen, ext_param_cp);
860
861                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
862                 ext_enable_cp.enable = LE_SCAN_ENABLE;
863                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
864
865                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
866                             sizeof(ext_enable_cp), &ext_enable_cp);
867         } else {
868                 struct hci_cp_le_set_scan_param param_cp;
869                 struct hci_cp_le_set_scan_enable enable_cp;
870
871                 memset(&param_cp, 0, sizeof(param_cp));
872                 param_cp.type = type;
873                 param_cp.interval = cpu_to_le16(interval);
874                 param_cp.window = cpu_to_le16(window);
875                 param_cp.own_address_type = own_addr_type;
876                 param_cp.filter_policy = filter_policy;
877                 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
878                             &param_cp);
879
880                 memset(&enable_cp, 0, sizeof(enable_cp));
881                 enable_cp.enable = LE_SCAN_ENABLE;
882                 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
883                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
884                             &enable_cp);
885         }
886 }
887
888 void hci_req_add_le_passive_scan(struct hci_request *req)
889 {
890         struct hci_dev *hdev = req->hdev;
891         u8 own_addr_type;
892         u8 filter_policy;
893         u8 window, interval;
894
895         if (hdev->scanning_paused) {
896                 bt_dev_dbg(hdev, "Scanning is paused for suspend");
897                 return;
898         }
899
900         /* Set require_privacy to false since no SCAN_REQ are send
901          * during passive scanning. Not using an non-resolvable address
902          * here is important so that peer devices using direct
903          * advertising with our address will be correctly reported
904          * by the controller.
905          */
906         if (hci_update_random_address(req, false, scan_use_rpa(hdev),
907                                       &own_addr_type))
908                 return;
909
910         /* Adding or removing entries from the white list must
911          * happen before enabling scanning. The controller does
912          * not allow white list modification while scanning.
913          */
914         filter_policy = update_white_list(req);
915
916         /* When the controller is using random resolvable addresses and
917          * with that having LE privacy enabled, then controllers with
918          * Extended Scanner Filter Policies support can now enable support
919          * for handling directed advertising.
920          *
921          * So instead of using filter polices 0x00 (no whitelist)
922          * and 0x01 (whitelist enabled) use the new filter policies
923          * 0x02 (no whitelist) and 0x03 (whitelist enabled).
924          */
925         if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
926             (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
927                 filter_policy |= 0x02;
928
929         if (hdev->suspended) {
930                 window = LE_SUSPEND_SCAN_WINDOW;
931                 interval = LE_SUSPEND_SCAN_INTERVAL;
932         } else {
933                 window = hdev->le_scan_window;
934                 interval = hdev->le_scan_interval;
935         }
936
937         bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
938         hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
939                            own_addr_type, filter_policy);
940 }
941
942 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
943 {
944         struct adv_info *adv_instance;
945
946         /* Instance 0x00 always set local name */
947         if (instance == 0x00)
948                 return 1;
949
950         adv_instance = hci_find_adv_instance(hdev, instance);
951         if (!adv_instance)
952                 return 0;
953
954         /* TODO: Take into account the "appearance" and "local-name" flags here.
955          * These are currently being ignored as they are not supported.
956          */
957         return adv_instance->scan_rsp_len;
958 }
959
960 static void hci_req_clear_event_filter(struct hci_request *req)
961 {
962         struct hci_cp_set_event_filter f;
963
964         memset(&f, 0, sizeof(f));
965         f.flt_type = HCI_FLT_CLEAR_ALL;
966         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
967
968         /* Update page scan state (since we may have modified it when setting
969          * the event filter).
970          */
971         __hci_req_update_scan(req);
972 }
973
974 static void hci_req_set_event_filter(struct hci_request *req)
975 {
976         struct bdaddr_list *b;
977         struct hci_cp_set_event_filter f;
978         struct hci_dev *hdev = req->hdev;
979         u8 scan;
980
981         /* Always clear event filter when starting */
982         hci_req_clear_event_filter(req);
983
984         list_for_each_entry(b, &hdev->wakeable, list) {
985                 memset(&f, 0, sizeof(f));
986                 bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
987                 f.flt_type = HCI_FLT_CONN_SETUP;
988                 f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
989                 f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
990
991                 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
992                 hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
993         }
994
995         scan = !list_empty(&hdev->wakeable) ? SCAN_PAGE : SCAN_DISABLED;
996         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
997 }
998
999 static void hci_req_config_le_suspend_scan(struct hci_request *req)
1000 {
1001         /* Can't change params without disabling first */
1002         hci_req_add_le_scan_disable(req);
1003
1004         /* Configure params and enable scanning */
1005         hci_req_add_le_passive_scan(req);
1006
1007         /* Block suspend notifier on response */
1008         set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
1009 }
1010
1011 static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1012 {
1013         bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
1014                    status);
1015         if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
1016             test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
1017                 wake_up(&hdev->suspend_wait_q);
1018         }
1019 }
1020
1021 /* Call with hci_dev_lock */
1022 void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
1023 {
1024         struct hci_conn *conn;
1025         struct hci_request req;
1026         u8 page_scan;
1027         int disconnect_counter;
1028
1029         if (next == hdev->suspend_state) {
1030                 bt_dev_dbg(hdev, "Same state before and after: %d", next);
1031                 goto done;
1032         }
1033
1034         hdev->suspend_state = next;
1035         hci_req_init(&req, hdev);
1036
1037         if (next == BT_SUSPEND_DISCONNECT) {
1038                 /* Mark device as suspended */
1039                 hdev->suspended = true;
1040
1041                 /* Disable page scan */
1042                 page_scan = SCAN_DISABLED;
1043                 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
1044
1045                 /* Disable LE passive scan */
1046                 hci_req_add_le_scan_disable(&req);
1047
1048                 /* Mark task needing completion */
1049                 set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
1050
1051                 /* Prevent disconnects from causing scanning to be re-enabled */
1052                 hdev->scanning_paused = true;
1053
1054                 /* Run commands before disconnecting */
1055                 hci_req_run(&req, suspend_req_complete);
1056
1057                 disconnect_counter = 0;
1058                 /* Soft disconnect everything (power off) */
1059                 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1060                         hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
1061                         disconnect_counter++;
1062                 }
1063
1064                 if (disconnect_counter > 0) {
1065                         bt_dev_dbg(hdev,
1066                                    "Had %d disconnects. Will wait on them",
1067                                    disconnect_counter);
1068                         set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
1069                 }
1070         } else if (next == BT_SUSPEND_COMPLETE) {
1071                 /* Unpause to take care of updating scanning params */
1072                 hdev->scanning_paused = false;
1073                 /* Enable event filter for paired devices */
1074                 hci_req_set_event_filter(&req);
1075                 /* Enable passive scan at lower duty cycle */
1076                 hci_req_config_le_suspend_scan(&req);
1077                 /* Pause scan changes again. */
1078                 hdev->scanning_paused = true;
1079                 hci_req_run(&req, suspend_req_complete);
1080         } else {
1081                 hdev->suspended = false;
1082                 hdev->scanning_paused = false;
1083
1084                 hci_req_clear_event_filter(&req);
1085                 /* Reset passive/background scanning to normal */
1086                 hci_req_config_le_suspend_scan(&req);
1087                 hci_req_run(&req, suspend_req_complete);
1088         }
1089
1090         hdev->suspend_state = next;
1091
1092 done:
1093         clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
1094         wake_up(&hdev->suspend_wait_q);
1095 }
1096
1097 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
1098 {
1099         u8 instance = hdev->cur_adv_instance;
1100         struct adv_info *adv_instance;
1101
1102         /* Instance 0x00 always set local name */
1103         if (instance == 0x00)
1104                 return 1;
1105
1106         adv_instance = hci_find_adv_instance(hdev, instance);
1107         if (!adv_instance)
1108                 return 0;
1109
1110         /* TODO: Take into account the "appearance" and "local-name" flags here.
1111          * These are currently being ignored as they are not supported.
1112          */
1113         return adv_instance->scan_rsp_len;
1114 }
1115
1116 void __hci_req_disable_advertising(struct hci_request *req)
1117 {
1118         if (ext_adv_capable(req->hdev)) {
1119                 struct hci_cp_le_set_ext_adv_enable cp;
1120
1121                 cp.enable = 0x00;
1122                 /* Disable all sets since we only support one set at the moment */
1123                 cp.num_of_sets = 0x00;
1124
1125                 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp);
1126         } else {
1127                 u8 enable = 0x00;
1128
1129                 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1130         }
1131 }
1132
1133 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1134 {
1135         u32 flags;
1136         struct adv_info *adv_instance;
1137
1138         if (instance == 0x00) {
1139                 /* Instance 0 always manages the "Tx Power" and "Flags"
1140                  * fields
1141                  */
1142                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1143
1144                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1145                  * corresponds to the "connectable" instance flag.
1146                  */
1147                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1148                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
1149
1150                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1151                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1152                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1153                         flags |= MGMT_ADV_FLAG_DISCOV;
1154
1155                 return flags;
1156         }
1157
1158         adv_instance = hci_find_adv_instance(hdev, instance);
1159
1160         /* Return 0 when we got an invalid instance identifier. */
1161         if (!adv_instance)
1162                 return 0;
1163
1164         return adv_instance->flags;
1165 }
1166
1167 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1168 {
1169         /* If privacy is not enabled don't use RPA */
1170         if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1171                 return false;
1172
1173         /* If basic privacy mode is enabled use RPA */
1174         if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1175                 return true;
1176
1177         /* If limited privacy mode is enabled don't use RPA if we're
1178          * both discoverable and bondable.
1179          */
1180         if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1181             hci_dev_test_flag(hdev, HCI_BONDABLE))
1182                 return false;
1183
1184         /* We're neither bondable nor discoverable in the limited
1185          * privacy mode, therefore use RPA.
1186          */
1187         return true;
1188 }
1189
1190 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1191 {
1192         /* If there is no connection we are OK to advertise. */
1193         if (hci_conn_num(hdev, LE_LINK) == 0)
1194                 return true;
1195
1196         /* Check le_states if there is any connection in slave role. */
1197         if (hdev->conn_hash.le_num_slave > 0) {
1198                 /* Slave connection state and non connectable mode bit 20. */
1199                 if (!connectable && !(hdev->le_states[2] & 0x10))
1200                         return false;
1201
1202                 /* Slave connection state and connectable mode bit 38
1203                  * and scannable bit 21.
1204                  */
1205                 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1206                                     !(hdev->le_states[2] & 0x20)))
1207                         return false;
1208         }
1209
1210         /* Check le_states if there is any connection in master role. */
1211         if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1212                 /* Master connection state and non connectable mode bit 18. */
1213                 if (!connectable && !(hdev->le_states[2] & 0x02))
1214                         return false;
1215
1216                 /* Master connection state and connectable mode bit 35 and
1217                  * scannable 19.
1218                  */
1219                 if (connectable && (!(hdev->le_states[4] & 0x08) ||
1220                                     !(hdev->le_states[2] & 0x08)))
1221                         return false;
1222         }
1223
1224         return true;
1225 }
1226
1227 void __hci_req_enable_advertising(struct hci_request *req)
1228 {
1229         struct hci_dev *hdev = req->hdev;
1230         struct hci_cp_le_set_adv_param cp;
1231         u8 own_addr_type, enable = 0x01;
1232         bool connectable;
1233         u16 adv_min_interval, adv_max_interval;
1234         u32 flags;
1235
1236         flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1237
1238         /* If the "connectable" instance flag was not set, then choose between
1239          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1240          */
1241         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1242                       mgmt_get_connectable(hdev);
1243
1244         if (!is_advertising_allowed(hdev, connectable))
1245                 return;
1246
1247         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1248                 __hci_req_disable_advertising(req);
1249
1250         /* Clear the HCI_LE_ADV bit temporarily so that the
1251          * hci_update_random_address knows that it's safe to go ahead
1252          * and write a new random address. The flag will be set back on
1253          * as soon as the SET_ADV_ENABLE HCI command completes.
1254          */
1255         hci_dev_clear_flag(hdev, HCI_LE_ADV);
1256
1257         /* Set require_privacy to true only when non-connectable
1258          * advertising is used. In that case it is fine to use a
1259          * non-resolvable private address.
1260          */
1261         if (hci_update_random_address(req, !connectable,
1262                                       adv_use_rpa(hdev, flags),
1263                                       &own_addr_type) < 0)
1264                 return;
1265
1266         memset(&cp, 0, sizeof(cp));
1267
1268         if (connectable) {
1269                 cp.type = LE_ADV_IND;
1270
1271                 adv_min_interval = hdev->le_adv_min_interval;
1272                 adv_max_interval = hdev->le_adv_max_interval;
1273         } else {
1274                 if (get_cur_adv_instance_scan_rsp_len(hdev))
1275                         cp.type = LE_ADV_SCAN_IND;
1276                 else
1277                         cp.type = LE_ADV_NONCONN_IND;
1278
1279                 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1280                     hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1281                         adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1282                         adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1283                 } else {
1284                         adv_min_interval = hdev->le_adv_min_interval;
1285                         adv_max_interval = hdev->le_adv_max_interval;
1286                 }
1287         }
1288
1289         cp.min_interval = cpu_to_le16(adv_min_interval);
1290         cp.max_interval = cpu_to_le16(adv_max_interval);
1291         cp.own_address_type = own_addr_type;
1292         cp.channel_map = hdev->le_adv_channel_map;
1293
1294         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1295
1296         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1297 }
1298
1299 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1300 {
1301         size_t short_len;
1302         size_t complete_len;
1303
1304         /* no space left for name (+ NULL + type + len) */
1305         if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1306                 return ad_len;
1307
1308         /* use complete name if present and fits */
1309         complete_len = strlen(hdev->dev_name);
1310         if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
1311                 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
1312                                        hdev->dev_name, complete_len + 1);
1313
1314         /* use short name if present */
1315         short_len = strlen(hdev->short_name);
1316         if (short_len)
1317                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
1318                                        hdev->short_name, short_len + 1);
1319
1320         /* use shortened full name if present, we already know that name
1321          * is longer then HCI_MAX_SHORT_NAME_LENGTH
1322          */
1323         if (complete_len) {
1324                 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1325
1326                 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1327                 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1328
1329                 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1330                                        sizeof(name));
1331         }
1332
1333         return ad_len;
1334 }
1335
1336 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1337 {
1338         return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1339 }
1340
1341 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1342 {
1343         u8 scan_rsp_len = 0;
1344
1345         if (hdev->appearance) {
1346                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1347         }
1348
1349         return append_local_name(hdev, ptr, scan_rsp_len);
1350 }
1351
1352 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1353                                         u8 *ptr)
1354 {
1355         struct adv_info *adv_instance;
1356         u32 instance_flags;
1357         u8 scan_rsp_len = 0;
1358
1359         adv_instance = hci_find_adv_instance(hdev, instance);
1360         if (!adv_instance)
1361                 return 0;
1362
1363         instance_flags = adv_instance->flags;
1364
1365         if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1366                 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1367         }
1368
1369         memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1370                adv_instance->scan_rsp_len);
1371
1372         scan_rsp_len += adv_instance->scan_rsp_len;
1373
1374         if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1375                 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1376
1377         return scan_rsp_len;
1378 }
1379
1380 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1381 {
1382         struct hci_dev *hdev = req->hdev;
1383         u8 len;
1384
1385         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1386                 return;
1387
1388         if (ext_adv_capable(hdev)) {
1389                 struct hci_cp_le_set_ext_scan_rsp_data cp;
1390
1391                 memset(&cp, 0, sizeof(cp));
1392
1393                 if (instance)
1394                         len = create_instance_scan_rsp_data(hdev, instance,
1395                                                             cp.data);
1396                 else
1397                         len = create_default_scan_rsp_data(hdev, cp.data);
1398
1399                 if (hdev->scan_rsp_data_len == len &&
1400                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1401                         return;
1402
1403                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1404                 hdev->scan_rsp_data_len = len;
1405
1406                 cp.handle = 0;
1407                 cp.length = len;
1408                 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1409                 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1410
1411                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1412                             &cp);
1413         } else {
1414                 struct hci_cp_le_set_scan_rsp_data cp;
1415
1416                 memset(&cp, 0, sizeof(cp));
1417
1418                 if (instance)
1419                         len = create_instance_scan_rsp_data(hdev, instance,
1420                                                             cp.data);
1421                 else
1422                         len = create_default_scan_rsp_data(hdev, cp.data);
1423
1424                 if (hdev->scan_rsp_data_len == len &&
1425                     !memcmp(cp.data, hdev->scan_rsp_data, len))
1426                         return;
1427
1428                 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1429                 hdev->scan_rsp_data_len = len;
1430
1431                 cp.length = len;
1432
1433                 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1434         }
1435 }
1436
1437 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1438 {
1439         struct adv_info *adv_instance = NULL;
1440         u8 ad_len = 0, flags = 0;
1441         u32 instance_flags;
1442
1443         /* Return 0 when the current instance identifier is invalid. */
1444         if (instance) {
1445                 adv_instance = hci_find_adv_instance(hdev, instance);
1446                 if (!adv_instance)
1447                         return 0;
1448         }
1449
1450         instance_flags = get_adv_instance_flags(hdev, instance);
1451
1452         /* If instance already has the flags set skip adding it once
1453          * again.
1454          */
1455         if (adv_instance && eir_get_data(adv_instance->adv_data,
1456                                          adv_instance->adv_data_len, EIR_FLAGS,
1457                                          NULL))
1458                 goto skip_flags;
1459
1460         /* The Add Advertising command allows userspace to set both the general
1461          * and limited discoverable flags.
1462          */
1463         if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1464                 flags |= LE_AD_GENERAL;
1465
1466         if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1467                 flags |= LE_AD_LIMITED;
1468
1469         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1470                 flags |= LE_AD_NO_BREDR;
1471
1472         if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1473                 /* If a discovery flag wasn't provided, simply use the global
1474                  * settings.
1475                  */
1476                 if (!flags)
1477                         flags |= mgmt_get_adv_discov_flags(hdev);
1478
1479                 /* If flags would still be empty, then there is no need to
1480                  * include the "Flags" AD field".
1481                  */
1482                 if (flags) {
1483                         ptr[0] = 0x02;
1484                         ptr[1] = EIR_FLAGS;
1485                         ptr[2] = flags;
1486
1487                         ad_len += 3;
1488                         ptr += 3;
1489                 }
1490         }
1491
1492 skip_flags:
1493         if (adv_instance) {
1494                 memcpy(ptr, adv_instance->adv_data,
1495                        adv_instance->adv_data_len);
1496                 ad_len += adv_instance->adv_data_len;
1497                 ptr += adv_instance->adv_data_len;
1498         }
1499
1500         if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1501                 s8 adv_tx_power;
1502
1503                 if (ext_adv_capable(hdev)) {
1504                         if (adv_instance)
1505                                 adv_tx_power = adv_instance->tx_power;
1506                         else
1507                                 adv_tx_power = hdev->adv_tx_power;
1508                 } else {
1509                         adv_tx_power = hdev->adv_tx_power;
1510                 }
1511
1512                 /* Provide Tx Power only if we can provide a valid value for it */
1513                 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1514                         ptr[0] = 0x02;
1515                         ptr[1] = EIR_TX_POWER;
1516                         ptr[2] = (u8)adv_tx_power;
1517
1518                         ad_len += 3;
1519                         ptr += 3;
1520                 }
1521         }
1522
1523         return ad_len;
1524 }
1525
1526 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1527 {
1528         struct hci_dev *hdev = req->hdev;
1529         u8 len;
1530
1531         if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1532                 return;
1533
1534         if (ext_adv_capable(hdev)) {
1535                 struct hci_cp_le_set_ext_adv_data cp;
1536
1537                 memset(&cp, 0, sizeof(cp));
1538
1539                 len = create_instance_adv_data(hdev, instance, cp.data);
1540
1541                 /* There's nothing to do if the data hasn't changed */
1542                 if (hdev->adv_data_len == len &&
1543                     memcmp(cp.data, hdev->adv_data, len) == 0)
1544                         return;
1545
1546                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1547                 hdev->adv_data_len = len;
1548
1549                 cp.length = len;
1550                 cp.handle = 0;
1551                 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1552                 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1553
1554                 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1555         } else {
1556                 struct hci_cp_le_set_adv_data cp;
1557
1558                 memset(&cp, 0, sizeof(cp));
1559
1560                 len = create_instance_adv_data(hdev, instance, cp.data);
1561
1562                 /* There's nothing to do if the data hasn't changed */
1563                 if (hdev->adv_data_len == len &&
1564                     memcmp(cp.data, hdev->adv_data, len) == 0)
1565                         return;
1566
1567                 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1568                 hdev->adv_data_len = len;
1569
1570                 cp.length = len;
1571
1572                 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1573         }
1574 }
1575
1576 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1577 {
1578         struct hci_request req;
1579
1580         hci_req_init(&req, hdev);
1581         __hci_req_update_adv_data(&req, instance);
1582
1583         return hci_req_run(&req, NULL);
1584 }
1585
1586 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1587 {
1588         BT_DBG("%s status %u", hdev->name, status);
1589 }
1590
1591 void hci_req_reenable_advertising(struct hci_dev *hdev)
1592 {
1593         struct hci_request req;
1594
1595         if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1596             list_empty(&hdev->adv_instances))
1597                 return;
1598
1599         hci_req_init(&req, hdev);
1600
1601         if (hdev->cur_adv_instance) {
1602                 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1603                                                 true);
1604         } else {
1605                 if (ext_adv_capable(hdev)) {
1606                         __hci_req_start_ext_adv(&req, 0x00);
1607                 } else {
1608                         __hci_req_update_adv_data(&req, 0x00);
1609                         __hci_req_update_scan_rsp_data(&req, 0x00);
1610                         __hci_req_enable_advertising(&req);
1611                 }
1612         }
1613
1614         hci_req_run(&req, adv_enable_complete);
1615 }
1616
1617 static void adv_timeout_expire(struct work_struct *work)
1618 {
1619         struct hci_dev *hdev = container_of(work, struct hci_dev,
1620                                             adv_instance_expire.work);
1621
1622         struct hci_request req;
1623         u8 instance;
1624
1625         BT_DBG("%s", hdev->name);
1626
1627         hci_dev_lock(hdev);
1628
1629         hdev->adv_instance_timeout = 0;
1630
1631         instance = hdev->cur_adv_instance;
1632         if (instance == 0x00)
1633                 goto unlock;
1634
1635         hci_req_init(&req, hdev);
1636
1637         hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1638
1639         if (list_empty(&hdev->adv_instances))
1640                 __hci_req_disable_advertising(&req);
1641
1642         hci_req_run(&req, NULL);
1643
1644 unlock:
1645         hci_dev_unlock(hdev);
1646 }
1647
1648 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
1649                            bool use_rpa, struct adv_info *adv_instance,
1650                            u8 *own_addr_type, bdaddr_t *rand_addr)
1651 {
1652         int err;
1653
1654         bacpy(rand_addr, BDADDR_ANY);
1655
1656         /* If privacy is enabled use a resolvable private address. If
1657          * current RPA has expired then generate a new one.
1658          */
1659         if (use_rpa) {
1660                 int to;
1661
1662                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1663
1664                 if (adv_instance) {
1665                         if (!adv_instance->rpa_expired &&
1666                             !bacmp(&adv_instance->random_addr, &hdev->rpa))
1667                                 return 0;
1668
1669                         adv_instance->rpa_expired = false;
1670                 } else {
1671                         if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1672                             !bacmp(&hdev->random_addr, &hdev->rpa))
1673                                 return 0;
1674                 }
1675
1676                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1677                 if (err < 0) {
1678                         bt_dev_err(hdev, "failed to generate new RPA");
1679                         return err;
1680                 }
1681
1682                 bacpy(rand_addr, &hdev->rpa);
1683
1684                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1685                 if (adv_instance)
1686                         queue_delayed_work(hdev->workqueue,
1687                                            &adv_instance->rpa_expired_cb, to);
1688                 else
1689                         queue_delayed_work(hdev->workqueue,
1690                                            &hdev->rpa_expired, to);
1691
1692                 return 0;
1693         }
1694
1695         /* In case of required privacy without resolvable private address,
1696          * use an non-resolvable private address. This is useful for
1697          * non-connectable advertising.
1698          */
1699         if (require_privacy) {
1700                 bdaddr_t nrpa;
1701
1702                 while (true) {
1703                         /* The non-resolvable private address is generated
1704                          * from random six bytes with the two most significant
1705                          * bits cleared.
1706                          */
1707                         get_random_bytes(&nrpa, 6);
1708                         nrpa.b[5] &= 0x3f;
1709
1710                         /* The non-resolvable private address shall not be
1711                          * equal to the public address.
1712                          */
1713                         if (bacmp(&hdev->bdaddr, &nrpa))
1714                                 break;
1715                 }
1716
1717                 *own_addr_type = ADDR_LE_DEV_RANDOM;
1718                 bacpy(rand_addr, &nrpa);
1719
1720                 return 0;
1721         }
1722
1723         /* No privacy so use a public address. */
1724         *own_addr_type = ADDR_LE_DEV_PUBLIC;
1725
1726         return 0;
1727 }
1728
1729 void __hci_req_clear_ext_adv_sets(struct hci_request *req)
1730 {
1731         hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
1732 }
1733
1734 int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
1735 {
1736         struct hci_cp_le_set_ext_adv_params cp;
1737         struct hci_dev *hdev = req->hdev;
1738         bool connectable;
1739         u32 flags;
1740         bdaddr_t random_addr;
1741         u8 own_addr_type;
1742         int err;
1743         struct adv_info *adv_instance;
1744         bool secondary_adv;
1745         /* In ext adv set param interval is 3 octets */
1746         const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1747
1748         if (instance > 0) {
1749                 adv_instance = hci_find_adv_instance(hdev, instance);
1750                 if (!adv_instance)
1751                         return -EINVAL;
1752         } else {
1753                 adv_instance = NULL;
1754         }
1755
1756         flags = get_adv_instance_flags(hdev, instance);
1757
1758         /* If the "connectable" instance flag was not set, then choose between
1759          * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1760          */
1761         connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1762                       mgmt_get_connectable(hdev);
1763
1764         if (!is_advertising_allowed(hdev, connectable))
1765                 return -EPERM;
1766
1767         /* Set require_privacy to true only when non-connectable
1768          * advertising is used. In that case it is fine to use a
1769          * non-resolvable private address.
1770          */
1771         err = hci_get_random_address(hdev, !connectable,
1772                                      adv_use_rpa(hdev, flags), adv_instance,
1773                                      &own_addr_type, &random_addr);
1774         if (err < 0)
1775                 return err;
1776
1777         memset(&cp, 0, sizeof(cp));
1778
1779         memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1780         memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1781
1782         secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1783
1784         if (connectable) {
1785                 if (secondary_adv)
1786                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1787                 else
1788                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1789         } else if (get_adv_instance_scan_rsp_len(hdev, instance)) {
1790                 if (secondary_adv)
1791                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1792                 else
1793                         cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1794         } else {
1795                 if (secondary_adv)
1796                         cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1797                 else
1798                         cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1799         }
1800
1801         cp.own_addr_type = own_addr_type;
1802         cp.channel_map = hdev->le_adv_channel_map;
1803         cp.tx_power = 127;
1804         cp.handle = instance;
1805
1806         if (flags & MGMT_ADV_FLAG_SEC_2M) {
1807                 cp.primary_phy = HCI_ADV_PHY_1M;
1808                 cp.secondary_phy = HCI_ADV_PHY_2M;
1809         } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1810                 cp.primary_phy = HCI_ADV_PHY_CODED;
1811                 cp.secondary_phy = HCI_ADV_PHY_CODED;
1812         } else {
1813                 /* In all other cases use 1M */
1814                 cp.primary_phy = HCI_ADV_PHY_1M;
1815                 cp.secondary_phy = HCI_ADV_PHY_1M;
1816         }
1817
1818         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1819
1820         if (own_addr_type == ADDR_LE_DEV_RANDOM &&
1821             bacmp(&random_addr, BDADDR_ANY)) {
1822                 struct hci_cp_le_set_adv_set_rand_addr cp;
1823
1824                 /* Check if random address need to be updated */
1825                 if (adv_instance) {
1826                         if (!bacmp(&random_addr, &adv_instance->random_addr))
1827                                 return 0;
1828                 } else {
1829                         if (!bacmp(&random_addr, &hdev->random_addr))
1830                                 return 0;
1831                 }
1832
1833                 memset(&cp, 0, sizeof(cp));
1834
1835                 cp.handle = 0;
1836                 bacpy(&cp.bdaddr, &random_addr);
1837
1838                 hci_req_add(req,
1839                             HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1840                             sizeof(cp), &cp);
1841         }
1842
1843         return 0;
1844 }
1845
1846 int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance)
1847 {
1848         struct hci_dev *hdev = req->hdev;
1849         struct hci_cp_le_set_ext_adv_enable *cp;
1850         struct hci_cp_ext_adv_set *adv_set;
1851         u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1852         struct adv_info *adv_instance;
1853
1854         if (instance > 0) {
1855                 adv_instance = hci_find_adv_instance(hdev, instance);
1856                 if (!adv_instance)
1857                         return -EINVAL;
1858         } else {
1859                 adv_instance = NULL;
1860         }
1861
1862         cp = (void *) data;
1863         adv_set = (void *) cp->data;
1864
1865         memset(cp, 0, sizeof(*cp));
1866
1867         cp->enable = 0x01;
1868         cp->num_of_sets = 0x01;
1869
1870         memset(adv_set, 0, sizeof(*adv_set));
1871
1872         adv_set->handle = instance;
1873
1874         /* Set duration per instance since controller is responsible for
1875          * scheduling it.
1876          */
1877         if (adv_instance && adv_instance->duration) {
1878                 u16 duration = adv_instance->timeout * MSEC_PER_SEC;
1879
1880                 /* Time = N * 10 ms */
1881                 adv_set->duration = cpu_to_le16(duration / 10);
1882         }
1883
1884         hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1885                     sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1886                     data);
1887
1888         return 0;
1889 }
1890
1891 int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1892 {
1893         struct hci_dev *hdev = req->hdev;
1894         int err;
1895
1896         if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1897                 __hci_req_disable_advertising(req);
1898
1899         err = __hci_req_setup_ext_adv_instance(req, instance);
1900         if (err < 0)
1901                 return err;
1902
1903         __hci_req_update_scan_rsp_data(req, instance);
1904         __hci_req_enable_ext_advertising(req, instance);
1905
1906         return 0;
1907 }
1908
1909 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1910                                     bool force)
1911 {
1912         struct hci_dev *hdev = req->hdev;
1913         struct adv_info *adv_instance = NULL;
1914         u16 timeout;
1915
1916         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1917             list_empty(&hdev->adv_instances))
1918                 return -EPERM;
1919
1920         if (hdev->adv_instance_timeout)
1921                 return -EBUSY;
1922
1923         adv_instance = hci_find_adv_instance(hdev, instance);
1924         if (!adv_instance)
1925                 return -ENOENT;
1926
1927         /* A zero timeout means unlimited advertising. As long as there is
1928          * only one instance, duration should be ignored. We still set a timeout
1929          * in case further instances are being added later on.
1930          *
1931          * If the remaining lifetime of the instance is more than the duration
1932          * then the timeout corresponds to the duration, otherwise it will be
1933          * reduced to the remaining instance lifetime.
1934          */
1935         if (adv_instance->timeout == 0 ||
1936             adv_instance->duration <= adv_instance->remaining_time)
1937                 timeout = adv_instance->duration;
1938         else
1939                 timeout = adv_instance->remaining_time;
1940
1941         /* The remaining time is being reduced unless the instance is being
1942          * advertised without time limit.
1943          */
1944         if (adv_instance->timeout)
1945                 adv_instance->remaining_time =
1946                                 adv_instance->remaining_time - timeout;
1947
1948         /* Only use work for scheduling instances with legacy advertising */
1949         if (!ext_adv_capable(hdev)) {
1950                 hdev->adv_instance_timeout = timeout;
1951                 queue_delayed_work(hdev->req_workqueue,
1952                            &hdev->adv_instance_expire,
1953                            msecs_to_jiffies(timeout * 1000));
1954         }
1955
1956         /* If we're just re-scheduling the same instance again then do not
1957          * execute any HCI commands. This happens when a single instance is
1958          * being advertised.
1959          */
1960         if (!force && hdev->cur_adv_instance == instance &&
1961             hci_dev_test_flag(hdev, HCI_LE_ADV))
1962                 return 0;
1963
1964         hdev->cur_adv_instance = instance;
1965         if (ext_adv_capable(hdev)) {
1966                 __hci_req_start_ext_adv(req, instance);
1967         } else {
1968                 __hci_req_update_adv_data(req, instance);
1969                 __hci_req_update_scan_rsp_data(req, instance);
1970                 __hci_req_enable_advertising(req);
1971         }
1972
1973         return 0;
1974 }
1975
1976 static void cancel_adv_timeout(struct hci_dev *hdev)
1977 {
1978         if (hdev->adv_instance_timeout) {
1979                 hdev->adv_instance_timeout = 0;
1980                 cancel_delayed_work(&hdev->adv_instance_expire);
1981         }
1982 }
1983
1984 /* For a single instance:
1985  * - force == true: The instance will be removed even when its remaining
1986  *   lifetime is not zero.
1987  * - force == false: the instance will be deactivated but kept stored unless
1988  *   the remaining lifetime is zero.
1989  *
1990  * For instance == 0x00:
1991  * - force == true: All instances will be removed regardless of their timeout
1992  *   setting.
1993  * - force == false: Only instances that have a timeout will be removed.
1994  */
1995 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1996                                 struct hci_request *req, u8 instance,
1997                                 bool force)
1998 {
1999         struct adv_info *adv_instance, *n, *next_instance = NULL;
2000         int err;
2001         u8 rem_inst;
2002
2003         /* Cancel any timeout concerning the removed instance(s). */
2004         if (!instance || hdev->cur_adv_instance == instance)
2005                 cancel_adv_timeout(hdev);
2006
2007         /* Get the next instance to advertise BEFORE we remove
2008          * the current one. This can be the same instance again
2009          * if there is only one instance.
2010          */
2011         if (instance && hdev->cur_adv_instance == instance)
2012                 next_instance = hci_get_next_instance(hdev, instance);
2013
2014         if (instance == 0x00) {
2015                 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
2016                                          list) {
2017                         if (!(force || adv_instance->timeout))
2018                                 continue;
2019
2020                         rem_inst = adv_instance->instance;
2021                         err = hci_remove_adv_instance(hdev, rem_inst);
2022                         if (!err)
2023                                 mgmt_advertising_removed(sk, hdev, rem_inst);
2024                 }
2025         } else {
2026                 adv_instance = hci_find_adv_instance(hdev, instance);
2027
2028                 if (force || (adv_instance && adv_instance->timeout &&
2029                               !adv_instance->remaining_time)) {
2030                         /* Don't advertise a removed instance. */
2031                         if (next_instance &&
2032                             next_instance->instance == instance)
2033                                 next_instance = NULL;
2034
2035                         err = hci_remove_adv_instance(hdev, instance);
2036                         if (!err)
2037                                 mgmt_advertising_removed(sk, hdev, instance);
2038                 }
2039         }
2040
2041         if (!req || !hdev_is_powered(hdev) ||
2042             hci_dev_test_flag(hdev, HCI_ADVERTISING))
2043                 return;
2044
2045         if (next_instance)
2046                 __hci_req_schedule_adv_instance(req, next_instance->instance,
2047                                                 false);
2048 }
2049
2050 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
2051 {
2052         struct hci_dev *hdev = req->hdev;
2053
2054         /* If we're advertising or initiating an LE connection we can't
2055          * go ahead and change the random address at this time. This is
2056          * because the eventual initiator address used for the
2057          * subsequently created connection will be undefined (some
2058          * controllers use the new address and others the one we had
2059          * when the operation started).
2060          *
2061          * In this kind of scenario skip the update and let the random
2062          * address be updated at the next cycle.
2063          */
2064         if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
2065             hci_lookup_le_connect(hdev)) {
2066                 BT_DBG("Deferring random address update");
2067                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2068                 return;
2069         }
2070
2071         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
2072 }
2073
2074 int hci_update_random_address(struct hci_request *req, bool require_privacy,
2075                               bool use_rpa, u8 *own_addr_type)
2076 {
2077         struct hci_dev *hdev = req->hdev;
2078         int err;
2079
2080         /* If privacy is enabled use a resolvable private address. If
2081          * current RPA has expired or there is something else than
2082          * the current RPA in use, then generate a new one.
2083          */
2084         if (use_rpa) {
2085                 int to;
2086
2087                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2088
2089                 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
2090                     !bacmp(&hdev->random_addr, &hdev->rpa))
2091                         return 0;
2092
2093                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
2094                 if (err < 0) {
2095                         bt_dev_err(hdev, "failed to generate new RPA");
2096                         return err;
2097                 }
2098
2099                 set_random_addr(req, &hdev->rpa);
2100
2101                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
2102                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
2103
2104                 return 0;
2105         }
2106
2107         /* In case of required privacy without resolvable private address,
2108          * use an non-resolvable private address. This is useful for active
2109          * scanning and non-connectable advertising.
2110          */
2111         if (require_privacy) {
2112                 bdaddr_t nrpa;
2113
2114                 while (true) {
2115                         /* The non-resolvable private address is generated
2116                          * from random six bytes with the two most significant
2117                          * bits cleared.
2118                          */
2119                         get_random_bytes(&nrpa, 6);
2120                         nrpa.b[5] &= 0x3f;
2121
2122                         /* The non-resolvable private address shall not be
2123                          * equal to the public address.
2124                          */
2125                         if (bacmp(&hdev->bdaddr, &nrpa))
2126                                 break;
2127                 }
2128
2129                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2130                 set_random_addr(req, &nrpa);
2131                 return 0;
2132         }
2133
2134         /* If forcing static address is in use or there is no public
2135          * address use the static address as random address (but skip
2136          * the HCI command if the current random address is already the
2137          * static one.
2138          *
2139          * In case BR/EDR has been disabled on a dual-mode controller
2140          * and a static address has been configured, then use that
2141          * address instead of the public BR/EDR address.
2142          */
2143         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2144             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2145             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2146              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2147                 *own_addr_type = ADDR_LE_DEV_RANDOM;
2148                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
2149                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2150                                     &hdev->static_addr);
2151                 return 0;
2152         }
2153
2154         /* Neither privacy nor static address is being used so use a
2155          * public address.
2156          */
2157         *own_addr_type = ADDR_LE_DEV_PUBLIC;
2158
2159         return 0;
2160 }
2161
2162 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
2163 {
2164         struct bdaddr_list *b;
2165
2166         list_for_each_entry(b, &hdev->whitelist, list) {
2167                 struct hci_conn *conn;
2168
2169                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
2170                 if (!conn)
2171                         return true;
2172
2173                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2174                         return true;
2175         }
2176
2177         return false;
2178 }
2179
2180 void __hci_req_update_scan(struct hci_request *req)
2181 {
2182         struct hci_dev *hdev = req->hdev;
2183         u8 scan;
2184
2185         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2186                 return;
2187
2188         if (!hdev_is_powered(hdev))
2189                 return;
2190
2191         if (mgmt_powering_down(hdev))
2192                 return;
2193
2194         if (hdev->scanning_paused)
2195                 return;
2196
2197         if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
2198             disconnected_whitelist_entries(hdev))
2199                 scan = SCAN_PAGE;
2200         else
2201                 scan = SCAN_DISABLED;
2202
2203         if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2204                 scan |= SCAN_INQUIRY;
2205
2206         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
2207             test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
2208                 return;
2209
2210         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2211 }
2212
2213 static int update_scan(struct hci_request *req, unsigned long opt)
2214 {
2215         hci_dev_lock(req->hdev);
2216         __hci_req_update_scan(req);
2217         hci_dev_unlock(req->hdev);
2218         return 0;
2219 }
2220
2221 static void scan_update_work(struct work_struct *work)
2222 {
2223         struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
2224
2225         hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
2226 }
2227
2228 static int connectable_update(struct hci_request *req, unsigned long opt)
2229 {
2230         struct hci_dev *hdev = req->hdev;
2231
2232         hci_dev_lock(hdev);
2233
2234         __hci_req_update_scan(req);
2235
2236         /* If BR/EDR is not enabled and we disable advertising as a
2237          * by-product of disabling connectable, we need to update the
2238          * advertising flags.
2239          */
2240         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2241                 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
2242
2243         /* Update the advertising parameters if necessary */
2244         if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2245             !list_empty(&hdev->adv_instances)) {
2246                 if (ext_adv_capable(hdev))
2247                         __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
2248                 else
2249                         __hci_req_enable_advertising(req);
2250         }
2251
2252         __hci_update_background_scan(req);
2253
2254         hci_dev_unlock(hdev);
2255
2256         return 0;
2257 }
2258
2259 static void connectable_update_work(struct work_struct *work)
2260 {
2261         struct hci_dev *hdev = container_of(work, struct hci_dev,
2262                                             connectable_update);
2263         u8 status;
2264
2265         hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
2266         mgmt_set_connectable_complete(hdev, status);
2267 }
2268
2269 static u8 get_service_classes(struct hci_dev *hdev)
2270 {
2271         struct bt_uuid *uuid;
2272         u8 val = 0;
2273
2274         list_for_each_entry(uuid, &hdev->uuids, list)
2275                 val |= uuid->svc_hint;
2276
2277         return val;
2278 }
2279
2280 void __hci_req_update_class(struct hci_request *req)
2281 {
2282         struct hci_dev *hdev = req->hdev;
2283         u8 cod[3];
2284
2285         BT_DBG("%s", hdev->name);
2286
2287         if (!hdev_is_powered(hdev))
2288                 return;
2289
2290         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2291                 return;
2292
2293         if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
2294                 return;
2295
2296         cod[0] = hdev->minor_class;
2297         cod[1] = hdev->major_class;
2298         cod[2] = get_service_classes(hdev);
2299
2300         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
2301                 cod[1] |= 0x20;
2302
2303         if (memcmp(cod, hdev->dev_class, 3) == 0)
2304                 return;
2305
2306         hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
2307 }
2308
2309 static void write_iac(struct hci_request *req)
2310 {
2311         struct hci_dev *hdev = req->hdev;
2312         struct hci_cp_write_current_iac_lap cp;
2313
2314         if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
2315                 return;
2316
2317         if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
2318                 /* Limited discoverable mode */
2319                 cp.num_iac = min_t(u8, hdev->num_iac, 2);
2320                 cp.iac_lap[0] = 0x00;   /* LIAC */
2321                 cp.iac_lap[1] = 0x8b;
2322                 cp.iac_lap[2] = 0x9e;
2323                 cp.iac_lap[3] = 0x33;   /* GIAC */
2324                 cp.iac_lap[4] = 0x8b;
2325                 cp.iac_lap[5] = 0x9e;
2326         } else {
2327                 /* General discoverable mode */
2328                 cp.num_iac = 1;
2329                 cp.iac_lap[0] = 0x33;   /* GIAC */
2330                 cp.iac_lap[1] = 0x8b;
2331                 cp.iac_lap[2] = 0x9e;
2332         }
2333
2334         hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
2335                     (cp.num_iac * 3) + 1, &cp);
2336 }
2337
2338 static int discoverable_update(struct hci_request *req, unsigned long opt)
2339 {
2340         struct hci_dev *hdev = req->hdev;
2341
2342         hci_dev_lock(hdev);
2343
2344         if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2345                 write_iac(req);
2346                 __hci_req_update_scan(req);
2347                 __hci_req_update_class(req);
2348         }
2349
2350         /* Advertising instances don't use the global discoverable setting, so
2351          * only update AD if advertising was enabled using Set Advertising.
2352          */
2353         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2354                 __hci_req_update_adv_data(req, 0x00);
2355
2356                 /* Discoverable mode affects the local advertising
2357                  * address in limited privacy mode.
2358                  */
2359                 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
2360                         if (ext_adv_capable(hdev))
2361                                 __hci_req_start_ext_adv(req, 0x00);
2362                         else
2363                                 __hci_req_enable_advertising(req);
2364                 }
2365         }
2366
2367         hci_dev_unlock(hdev);
2368
2369         return 0;
2370 }
2371
2372 static void discoverable_update_work(struct work_struct *work)
2373 {
2374         struct hci_dev *hdev = container_of(work, struct hci_dev,
2375                                             discoverable_update);
2376         u8 status;
2377
2378         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
2379         mgmt_set_discoverable_complete(hdev, status);
2380 }
2381
2382 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
2383                       u8 reason)
2384 {
2385         switch (conn->state) {
2386         case BT_CONNECTED:
2387         case BT_CONFIG:
2388                 if (conn->type == AMP_LINK) {
2389                         struct hci_cp_disconn_phy_link cp;
2390
2391                         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
2392                         cp.reason = reason;
2393                         hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
2394                                     &cp);
2395                 } else {
2396                         struct hci_cp_disconnect dc;
2397
2398                         dc.handle = cpu_to_le16(conn->handle);
2399                         dc.reason = reason;
2400                         hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2401                 }
2402
2403                 conn->state = BT_DISCONN;
2404
2405                 break;
2406         case BT_CONNECT:
2407                 if (conn->type == LE_LINK) {
2408                         if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2409                                 break;
2410                         hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2411                                     0, NULL);
2412                 } else if (conn->type == ACL_LINK) {
2413                         if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2414                                 break;
2415                         hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2416                                     6, &conn->dst);
2417                 }
2418                 break;
2419         case BT_CONNECT2:
2420                 if (conn->type == ACL_LINK) {
2421                         struct hci_cp_reject_conn_req rej;
2422
2423                         bacpy(&rej.bdaddr, &conn->dst);
2424                         rej.reason = reason;
2425
2426                         hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2427                                     sizeof(rej), &rej);
2428                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2429                         struct hci_cp_reject_sync_conn_req rej;
2430
2431                         bacpy(&rej.bdaddr, &conn->dst);
2432
2433                         /* SCO rejection has its own limited set of
2434                          * allowed error values (0x0D-0x0F) which isn't
2435                          * compatible with most values passed to this
2436                          * function. To be safe hard-code one of the
2437                          * values that's suitable for SCO.
2438                          */
2439                         rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2440
2441                         hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2442                                     sizeof(rej), &rej);
2443                 }
2444                 break;
2445         default:
2446                 conn->state = BT_CLOSED;
2447                 break;
2448         }
2449 }
2450
2451 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2452 {
2453         if (status)
2454                 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2455 }
2456
2457 int hci_abort_conn(struct hci_conn *conn, u8 reason)
2458 {
2459         struct hci_request req;
2460         int err;
2461
2462         hci_req_init(&req, conn->hdev);
2463
2464         __hci_abort_conn(&req, conn, reason);
2465
2466         err = hci_req_run(&req, abort_conn_complete);
2467         if (err && err != -ENODATA) {
2468                 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
2469                 return err;
2470         }
2471
2472         return 0;
2473 }
2474
2475 static int update_bg_scan(struct hci_request *req, unsigned long opt)
2476 {
2477         hci_dev_lock(req->hdev);
2478         __hci_update_background_scan(req);
2479         hci_dev_unlock(req->hdev);
2480         return 0;
2481 }
2482
2483 static void bg_scan_update(struct work_struct *work)
2484 {
2485         struct hci_dev *hdev = container_of(work, struct hci_dev,
2486                                             bg_scan_update);
2487         struct hci_conn *conn;
2488         u8 status;
2489         int err;
2490
2491         err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2492         if (!err)
2493                 return;
2494
2495         hci_dev_lock(hdev);
2496
2497         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2498         if (conn)
2499                 hci_le_conn_failed(conn, status);
2500
2501         hci_dev_unlock(hdev);
2502 }
2503
2504 static int le_scan_disable(struct hci_request *req, unsigned long opt)
2505 {
2506         hci_req_add_le_scan_disable(req);
2507         return 0;
2508 }
2509
2510 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2511 {
2512         u8 length = opt;
2513         const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2514         const u8 liac[3] = { 0x00, 0x8b, 0x9e };
2515         struct hci_cp_inquiry cp;
2516
2517         BT_DBG("%s", req->hdev->name);
2518
2519         hci_dev_lock(req->hdev);
2520         hci_inquiry_cache_flush(req->hdev);
2521         hci_dev_unlock(req->hdev);
2522
2523         memset(&cp, 0, sizeof(cp));
2524
2525         if (req->hdev->discovery.limited)
2526                 memcpy(&cp.lap, liac, sizeof(cp.lap));
2527         else
2528                 memcpy(&cp.lap, giac, sizeof(cp.lap));
2529
2530         cp.length = length;
2531
2532         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2533
2534         return 0;
2535 }
2536
2537 static void le_scan_disable_work(struct work_struct *work)
2538 {
2539         struct hci_dev *hdev = container_of(work, struct hci_dev,
2540                                             le_scan_disable.work);
2541         u8 status;
2542
2543         BT_DBG("%s", hdev->name);
2544
2545         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2546                 return;
2547
2548         cancel_delayed_work(&hdev->le_scan_restart);
2549
2550         hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2551         if (status) {
2552                 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2553                            status);
2554                 return;
2555         }
2556
2557         hdev->discovery.scan_start = 0;
2558
2559         /* If we were running LE only scan, change discovery state. If
2560          * we were running both LE and BR/EDR inquiry simultaneously,
2561          * and BR/EDR inquiry is already finished, stop discovery,
2562          * otherwise BR/EDR inquiry will stop discovery when finished.
2563          * If we will resolve remote device name, do not change
2564          * discovery state.
2565          */
2566
2567         if (hdev->discovery.type == DISCOV_TYPE_LE)
2568                 goto discov_stopped;
2569
2570         if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2571                 return;
2572
2573         if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2574                 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2575                     hdev->discovery.state != DISCOVERY_RESOLVING)
2576                         goto discov_stopped;
2577
2578                 return;
2579         }
2580
2581         hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2582                      HCI_CMD_TIMEOUT, &status);
2583         if (status) {
2584                 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
2585                 goto discov_stopped;
2586         }
2587
2588         return;
2589
2590 discov_stopped:
2591         hci_dev_lock(hdev);
2592         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2593         hci_dev_unlock(hdev);
2594 }
2595
2596 static int le_scan_restart(struct hci_request *req, unsigned long opt)
2597 {
2598         struct hci_dev *hdev = req->hdev;
2599
2600         /* If controller is not scanning we are done. */
2601         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2602                 return 0;
2603
2604         hci_req_add_le_scan_disable(req);
2605
2606         if (use_ext_scan(hdev)) {
2607                 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2608
2609                 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2610                 ext_enable_cp.enable = LE_SCAN_ENABLE;
2611                 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2612
2613                 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2614                             sizeof(ext_enable_cp), &ext_enable_cp);
2615         } else {
2616                 struct hci_cp_le_set_scan_enable cp;
2617
2618                 memset(&cp, 0, sizeof(cp));
2619                 cp.enable = LE_SCAN_ENABLE;
2620                 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2621                 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2622         }
2623
2624         return 0;
2625 }
2626
2627 static void le_scan_restart_work(struct work_struct *work)
2628 {
2629         struct hci_dev *hdev = container_of(work, struct hci_dev,
2630                                             le_scan_restart.work);
2631         unsigned long timeout, duration, scan_start, now;
2632         u8 status;
2633
2634         BT_DBG("%s", hdev->name);
2635
2636         hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
2637         if (status) {
2638                 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2639                            status);
2640                 return;
2641         }
2642
2643         hci_dev_lock(hdev);
2644
2645         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2646             !hdev->discovery.scan_start)
2647                 goto unlock;
2648
2649         /* When the scan was started, hdev->le_scan_disable has been queued
2650          * after duration from scan_start. During scan restart this job
2651          * has been canceled, and we need to queue it again after proper
2652          * timeout, to make sure that scan does not run indefinitely.
2653          */
2654         duration = hdev->discovery.scan_duration;
2655         scan_start = hdev->discovery.scan_start;
2656         now = jiffies;
2657         if (now - scan_start <= duration) {
2658                 int elapsed;
2659
2660                 if (now >= scan_start)
2661                         elapsed = now - scan_start;
2662                 else
2663                         elapsed = ULONG_MAX - scan_start + now;
2664
2665                 timeout = duration - elapsed;
2666         } else {
2667                 timeout = 0;
2668         }
2669
2670         queue_delayed_work(hdev->req_workqueue,
2671                            &hdev->le_scan_disable, timeout);
2672
2673 unlock:
2674         hci_dev_unlock(hdev);
2675 }
2676
2677 static int active_scan(struct hci_request *req, unsigned long opt)
2678 {
2679         uint16_t interval = opt;
2680         struct hci_dev *hdev = req->hdev;
2681         u8 own_addr_type;
2682         int err;
2683
2684         BT_DBG("%s", hdev->name);
2685
2686         if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2687                 hci_dev_lock(hdev);
2688
2689                 /* Don't let discovery abort an outgoing connection attempt
2690                  * that's using directed advertising.
2691                  */
2692                 if (hci_lookup_le_connect(hdev)) {
2693                         hci_dev_unlock(hdev);
2694                         return -EBUSY;
2695                 }
2696
2697                 cancel_adv_timeout(hdev);
2698                 hci_dev_unlock(hdev);
2699
2700                 __hci_req_disable_advertising(req);
2701         }
2702
2703         /* If controller is scanning, it means the background scanning is
2704          * running. Thus, we should temporarily stop it in order to set the
2705          * discovery scanning parameters.
2706          */
2707         if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2708                 hci_req_add_le_scan_disable(req);
2709
2710         /* All active scans will be done with either a resolvable private
2711          * address (when privacy feature has been enabled) or non-resolvable
2712          * private address.
2713          */
2714         err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2715                                         &own_addr_type);
2716         if (err < 0)
2717                 own_addr_type = ADDR_LE_DEV_PUBLIC;
2718
2719         hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2720                            own_addr_type, 0);
2721         return 0;
2722 }
2723
2724 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2725 {
2726         int err;
2727
2728         BT_DBG("%s", req->hdev->name);
2729
2730         err = active_scan(req, opt);
2731         if (err)
2732                 return err;
2733
2734         return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2735 }
2736
2737 static void start_discovery(struct hci_dev *hdev, u8 *status)
2738 {
2739         unsigned long timeout;
2740
2741         BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2742
2743         switch (hdev->discovery.type) {
2744         case DISCOV_TYPE_BREDR:
2745                 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2746                         hci_req_sync(hdev, bredr_inquiry,
2747                                      DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2748                                      status);
2749                 return;
2750         case DISCOV_TYPE_INTERLEAVED:
2751                 /* When running simultaneous discovery, the LE scanning time
2752                  * should occupy the whole discovery time sine BR/EDR inquiry
2753                  * and LE scanning are scheduled by the controller.
2754                  *
2755                  * For interleaving discovery in comparison, BR/EDR inquiry
2756                  * and LE scanning are done sequentially with separate
2757                  * timeouts.
2758                  */
2759                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2760                              &hdev->quirks)) {
2761                         timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2762                         /* During simultaneous discovery, we double LE scan
2763                          * interval. We must leave some time for the controller
2764                          * to do BR/EDR inquiry.
2765                          */
2766                         hci_req_sync(hdev, interleaved_discov,
2767                                      DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2768                                      status);
2769                         break;
2770                 }
2771
2772                 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2773                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2774                              HCI_CMD_TIMEOUT, status);
2775                 break;
2776         case DISCOV_TYPE_LE:
2777                 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2778                 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2779                              HCI_CMD_TIMEOUT, status);
2780                 break;
2781         default:
2782                 *status = HCI_ERROR_UNSPECIFIED;
2783                 return;
2784         }
2785
2786         if (*status)
2787                 return;
2788
2789         BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2790
2791         /* When service discovery is used and the controller has a
2792          * strict duplicate filter, it is important to remember the
2793          * start and duration of the scan. This is required for
2794          * restarting scanning during the discovery phase.
2795          */
2796         if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2797                      hdev->discovery.result_filtering) {
2798                 hdev->discovery.scan_start = jiffies;
2799                 hdev->discovery.scan_duration = timeout;
2800         }
2801
2802         queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2803                            timeout);
2804 }
2805
2806 bool hci_req_stop_discovery(struct hci_request *req)
2807 {
2808         struct hci_dev *hdev = req->hdev;
2809         struct discovery_state *d = &hdev->discovery;
2810         struct hci_cp_remote_name_req_cancel cp;
2811         struct inquiry_entry *e;
2812         bool ret = false;
2813
2814         BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2815
2816         if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2817                 if (test_bit(HCI_INQUIRY, &hdev->flags))
2818                         hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2819
2820                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2821                         cancel_delayed_work(&hdev->le_scan_disable);
2822                         hci_req_add_le_scan_disable(req);
2823                 }
2824
2825                 ret = true;
2826         } else {
2827                 /* Passive scanning */
2828                 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2829                         hci_req_add_le_scan_disable(req);
2830                         ret = true;
2831                 }
2832         }
2833
2834         /* No further actions needed for LE-only discovery */
2835         if (d->type == DISCOV_TYPE_LE)
2836                 return ret;
2837
2838         if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2839                 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2840                                                      NAME_PENDING);
2841                 if (!e)
2842                         return ret;
2843
2844                 bacpy(&cp.bdaddr, &e->data.bdaddr);
2845                 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2846                             &cp);
2847                 ret = true;
2848         }
2849
2850         return ret;
2851 }
2852
2853 static int stop_discovery(struct hci_request *req, unsigned long opt)
2854 {
2855         hci_dev_lock(req->hdev);
2856         hci_req_stop_discovery(req);
2857         hci_dev_unlock(req->hdev);
2858
2859         return 0;
2860 }
2861
2862 static void discov_update(struct work_struct *work)
2863 {
2864         struct hci_dev *hdev = container_of(work, struct hci_dev,
2865                                             discov_update);
2866         u8 status = 0;
2867
2868         switch (hdev->discovery.state) {
2869         case DISCOVERY_STARTING:
2870                 start_discovery(hdev, &status);
2871                 mgmt_start_discovery_complete(hdev, status);
2872                 if (status)
2873                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2874                 else
2875                         hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2876                 break;
2877         case DISCOVERY_STOPPING:
2878                 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2879                 mgmt_stop_discovery_complete(hdev, status);
2880                 if (!status)
2881                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2882                 break;
2883         case DISCOVERY_STOPPED:
2884         default:
2885                 return;
2886         }
2887 }
2888
2889 static void discov_off(struct work_struct *work)
2890 {
2891         struct hci_dev *hdev = container_of(work, struct hci_dev,
2892                                             discov_off.work);
2893
2894         BT_DBG("%s", hdev->name);
2895
2896         hci_dev_lock(hdev);
2897
2898         /* When discoverable timeout triggers, then just make sure
2899          * the limited discoverable flag is cleared. Even in the case
2900          * of a timeout triggered from general discoverable, it is
2901          * safe to unconditionally clear the flag.
2902          */
2903         hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2904         hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2905         hdev->discov_timeout = 0;
2906
2907         hci_dev_unlock(hdev);
2908
2909         hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2910         mgmt_new_settings(hdev);
2911 }
2912
2913 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2914 {
2915         struct hci_dev *hdev = req->hdev;
2916         u8 link_sec;
2917
2918         hci_dev_lock(hdev);
2919
2920         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2921             !lmp_host_ssp_capable(hdev)) {
2922                 u8 mode = 0x01;
2923
2924                 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2925
2926                 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2927                         u8 support = 0x01;
2928
2929                         hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2930                                     sizeof(support), &support);
2931                 }
2932         }
2933
2934         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2935             lmp_bredr_capable(hdev)) {
2936                 struct hci_cp_write_le_host_supported cp;
2937
2938                 cp.le = 0x01;
2939                 cp.simul = 0x00;
2940
2941                 /* Check first if we already have the right
2942                  * host state (host features set)
2943                  */
2944                 if (cp.le != lmp_host_le_capable(hdev) ||
2945                     cp.simul != lmp_host_le_br_capable(hdev))
2946                         hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2947                                     sizeof(cp), &cp);
2948         }
2949
2950         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2951                 /* Make sure the controller has a good default for
2952                  * advertising data. This also applies to the case
2953                  * where BR/EDR was toggled during the AUTO_OFF phase.
2954                  */
2955                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2956                     list_empty(&hdev->adv_instances)) {
2957                         int err;
2958
2959                         if (ext_adv_capable(hdev)) {
2960                                 err = __hci_req_setup_ext_adv_instance(req,
2961                                                                        0x00);
2962                                 if (!err)
2963                                         __hci_req_update_scan_rsp_data(req,
2964                                                                        0x00);
2965                         } else {
2966                                 err = 0;
2967                                 __hci_req_update_adv_data(req, 0x00);
2968                                 __hci_req_update_scan_rsp_data(req, 0x00);
2969                         }
2970
2971                         if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2972                                 if (!ext_adv_capable(hdev))
2973                                         __hci_req_enable_advertising(req);
2974                                 else if (!err)
2975                                         __hci_req_enable_ext_advertising(req,
2976                                                                          0x00);
2977                         }
2978                 } else if (!list_empty(&hdev->adv_instances)) {
2979                         struct adv_info *adv_instance;
2980
2981                         adv_instance = list_first_entry(&hdev->adv_instances,
2982                                                         struct adv_info, list);
2983                         __hci_req_schedule_adv_instance(req,
2984                                                         adv_instance->instance,
2985                                                         true);
2986                 }
2987         }
2988
2989         link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2990         if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2991                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2992                             sizeof(link_sec), &link_sec);
2993
2994         if (lmp_bredr_capable(hdev)) {
2995                 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2996                         __hci_req_write_fast_connectable(req, true);
2997                 else
2998                         __hci_req_write_fast_connectable(req, false);
2999                 __hci_req_update_scan(req);
3000                 __hci_req_update_class(req);
3001                 __hci_req_update_name(req);
3002                 __hci_req_update_eir(req);
3003         }
3004
3005         hci_dev_unlock(hdev);
3006         return 0;
3007 }
3008
3009 int __hci_req_hci_power_on(struct hci_dev *hdev)
3010 {
3011         /* Register the available SMP channels (BR/EDR and LE) only when
3012          * successfully powering on the controller. This late
3013          * registration is required so that LE SMP can clearly decide if
3014          * the public address or static address is used.
3015          */
3016         smp_register(hdev);
3017
3018         return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
3019                               NULL);
3020 }
3021
3022 void hci_request_setup(struct hci_dev *hdev)
3023 {
3024         INIT_WORK(&hdev->discov_update, discov_update);
3025         INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
3026         INIT_WORK(&hdev->scan_update, scan_update_work);
3027         INIT_WORK(&hdev->connectable_update, connectable_update_work);
3028         INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
3029         INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
3030         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3031         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3032         INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
3033 }
3034
3035 void hci_request_cancel_all(struct hci_dev *hdev)
3036 {
3037         hci_req_sync_cancel(hdev, ENODEV);
3038
3039         cancel_work_sync(&hdev->discov_update);
3040         cancel_work_sync(&hdev->bg_scan_update);
3041         cancel_work_sync(&hdev->scan_update);
3042         cancel_work_sync(&hdev->connectable_update);
3043         cancel_work_sync(&hdev->discoverable_update);
3044         cancel_delayed_work_sync(&hdev->discov_off);
3045         cancel_delayed_work_sync(&hdev->le_scan_disable);
3046         cancel_delayed_work_sync(&hdev->le_scan_restart);
3047
3048         if (hdev->adv_instance_timeout) {
3049                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
3050                 hdev->adv_instance_timeout = 0;
3051         }
3052 }