Commit | Line | Data |
---|---|---|
0857dd3b JH |
1 | /* |
2 | BlueZ - Bluetooth protocol stack for Linux | |
3 | ||
4 | Copyright (C) 2014 Intel Corporation | |
5 | ||
6 | This program is free software; you can redistribute it and/or modify | |
7 | it under the terms of the GNU General Public License version 2 as | |
8 | published by the Free Software Foundation; | |
9 | ||
10 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
11 | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
12 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. | |
13 | IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY | |
14 | CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES | |
15 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
16 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
17 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
18 | ||
19 | ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, | |
20 | COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS | |
21 | SOFTWARE IS DISCLAIMED. | |
22 | */ | |
23 | ||
174cd4b1 IM |
24 | #include <linux/sched/signal.h> |
25 | ||
0857dd3b JH |
26 | #include <net/bluetooth/bluetooth.h> |
27 | #include <net/bluetooth/hci_core.h> | |
f2252570 | 28 | #include <net/bluetooth/mgmt.h> |
0857dd3b JH |
29 | |
30 | #include "smp.h" | |
31 | #include "hci_request.h" | |
bf6a4e30 | 32 | #include "msft.h" |
01ce70b0 | 33 | #include "eir.h" |
0857dd3b JH |
34 | |
35 | void hci_req_init(struct hci_request *req, struct hci_dev *hdev) | |
36 | { | |
37 | skb_queue_head_init(&req->cmd_q); | |
38 | req->hdev = hdev; | |
39 | req->err = 0; | |
40 | } | |
41 | ||
f17d858e JK |
42 | void hci_req_purge(struct hci_request *req) |
43 | { | |
44 | skb_queue_purge(&req->cmd_q); | |
45 | } | |
46 | ||
f80c5dad JPRV |
47 | bool hci_req_status_pend(struct hci_dev *hdev) |
48 | { | |
49 | return hdev->req_status == HCI_REQ_PEND; | |
50 | } | |
51 | ||
e6214487 JH |
52 | static int req_run(struct hci_request *req, hci_req_complete_t complete, |
53 | hci_req_complete_skb_t complete_skb) | |
0857dd3b JH |
54 | { |
55 | struct hci_dev *hdev = req->hdev; | |
56 | struct sk_buff *skb; | |
57 | unsigned long flags; | |
58 | ||
22fbcfc5 | 59 | bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q)); |
0857dd3b JH |
60 | |
61 | /* If an error occurred during request building, remove all HCI | |
62 | * commands queued on the HCI request queue. | |
63 | */ | |
64 | if (req->err) { | |
65 | skb_queue_purge(&req->cmd_q); | |
66 | return req->err; | |
67 | } | |
68 | ||
69 | /* Do not allow empty requests */ | |
70 | if (skb_queue_empty(&req->cmd_q)) | |
71 | return -ENODATA; | |
72 | ||
73 | skb = skb_peek_tail(&req->cmd_q); | |
44d27137 JH |
74 | if (complete) { |
75 | bt_cb(skb)->hci.req_complete = complete; | |
76 | } else if (complete_skb) { | |
77 | bt_cb(skb)->hci.req_complete_skb = complete_skb; | |
78 | bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; | |
79 | } | |
0857dd3b JH |
80 | |
81 | spin_lock_irqsave(&hdev->cmd_q.lock, flags); | |
82 | skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); | |
83 | spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); | |
84 | ||
85 | queue_work(hdev->workqueue, &hdev->cmd_work); | |
86 | ||
87 | return 0; | |
88 | } | |
89 | ||
e6214487 JH |
90 | int hci_req_run(struct hci_request *req, hci_req_complete_t complete) |
91 | { | |
92 | return req_run(req, complete, NULL); | |
93 | } | |
94 | ||
95 | int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete) | |
96 | { | |
97 | return req_run(req, NULL, complete); | |
98 | } | |
99 | ||
161510cc LAD |
100 | void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, |
101 | struct sk_buff *skb) | |
be91cd05 | 102 | { |
22fbcfc5 | 103 | bt_dev_dbg(hdev, "result 0x%2.2x", result); |
be91cd05 JH |
104 | |
105 | if (hdev->req_status == HCI_REQ_PEND) { | |
106 | hdev->req_result = result; | |
107 | hdev->req_status = HCI_REQ_DONE; | |
45d355a9 DA |
108 | if (skb) { |
109 | kfree_skb(hdev->req_skb); | |
be91cd05 | 110 | hdev->req_skb = skb_get(skb); |
45d355a9 | 111 | } |
be91cd05 JH |
112 | wake_up_interruptible(&hdev->req_wait_q); |
113 | } | |
114 | } | |
115 | ||
be91cd05 | 116 | /* Execute request and wait for completion. */ |
a1d01db1 JH |
117 | int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, |
118 | unsigned long opt), | |
4ebeee2d | 119 | unsigned long opt, u32 timeout, u8 *hci_status) |
be91cd05 JH |
120 | { |
121 | struct hci_request req; | |
be91cd05 JH |
122 | int err = 0; |
123 | ||
22fbcfc5 | 124 | bt_dev_dbg(hdev, "start"); |
be91cd05 JH |
125 | |
126 | hci_req_init(&req, hdev); | |
127 | ||
128 | hdev->req_status = HCI_REQ_PEND; | |
129 | ||
a1d01db1 JH |
130 | err = func(&req, opt); |
131 | if (err) { | |
132 | if (hci_status) | |
133 | *hci_status = HCI_ERROR_UNSPECIFIED; | |
134 | return err; | |
135 | } | |
be91cd05 | 136 | |
be91cd05 JH |
137 | err = hci_req_run_skb(&req, hci_req_sync_complete); |
138 | if (err < 0) { | |
139 | hdev->req_status = 0; | |
140 | ||
be91cd05 JH |
141 | /* ENODATA means the HCI request command queue is empty. |
142 | * This can happen when a request with conditionals doesn't | |
143 | * trigger any commands to be sent. This is normal behavior | |
144 | * and should not trigger an error return. | |
145 | */ | |
568f44f6 JH |
146 | if (err == -ENODATA) { |
147 | if (hci_status) | |
148 | *hci_status = 0; | |
be91cd05 | 149 | return 0; |
568f44f6 JH |
150 | } |
151 | ||
152 | if (hci_status) | |
153 | *hci_status = HCI_ERROR_UNSPECIFIED; | |
be91cd05 JH |
154 | |
155 | return err; | |
156 | } | |
157 | ||
67d8cee4 JK |
158 | err = wait_event_interruptible_timeout(hdev->req_wait_q, |
159 | hdev->req_status != HCI_REQ_PEND, timeout); | |
be91cd05 | 160 | |
67d8cee4 | 161 | if (err == -ERESTARTSYS) |
be91cd05 JH |
162 | return -EINTR; |
163 | ||
164 | switch (hdev->req_status) { | |
165 | case HCI_REQ_DONE: | |
166 | err = -bt_to_errno(hdev->req_result); | |
4ebeee2d JH |
167 | if (hci_status) |
168 | *hci_status = hdev->req_result; | |
be91cd05 JH |
169 | break; |
170 | ||
171 | case HCI_REQ_CANCELED: | |
172 | err = -hdev->req_result; | |
4ebeee2d JH |
173 | if (hci_status) |
174 | *hci_status = HCI_ERROR_UNSPECIFIED; | |
be91cd05 JH |
175 | break; |
176 | ||
177 | default: | |
178 | err = -ETIMEDOUT; | |
4ebeee2d JH |
179 | if (hci_status) |
180 | *hci_status = HCI_ERROR_UNSPECIFIED; | |
be91cd05 JH |
181 | break; |
182 | } | |
183 | ||
9afee949 FD |
184 | kfree_skb(hdev->req_skb); |
185 | hdev->req_skb = NULL; | |
be91cd05 JH |
186 | hdev->req_status = hdev->req_result = 0; |
187 | ||
22fbcfc5 | 188 | bt_dev_dbg(hdev, "end: err %d", err); |
be91cd05 JH |
189 | |
190 | return err; | |
191 | } | |
192 | ||
a1d01db1 JH |
193 | int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, |
194 | unsigned long opt), | |
4ebeee2d | 195 | unsigned long opt, u32 timeout, u8 *hci_status) |
be91cd05 JH |
196 | { |
197 | int ret; | |
198 | ||
be91cd05 | 199 | /* Serialize all requests */ |
b504430c | 200 | hci_req_sync_lock(hdev); |
e2cb6b89 LM |
201 | /* check the state after obtaing the lock to protect the HCI_UP |
202 | * against any races from hci_dev_do_close when the controller | |
203 | * gets removed. | |
204 | */ | |
205 | if (test_bit(HCI_UP, &hdev->flags)) | |
206 | ret = __hci_req_sync(hdev, req, opt, timeout, hci_status); | |
207 | else | |
208 | ret = -ENETDOWN; | |
b504430c | 209 | hci_req_sync_unlock(hdev); |
be91cd05 JH |
210 | |
211 | return ret; | |
212 | } | |
213 | ||
0857dd3b JH |
214 | struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, |
215 | const void *param) | |
216 | { | |
217 | int len = HCI_COMMAND_HDR_SIZE + plen; | |
218 | struct hci_command_hdr *hdr; | |
219 | struct sk_buff *skb; | |
220 | ||
221 | skb = bt_skb_alloc(len, GFP_ATOMIC); | |
222 | if (!skb) | |
223 | return NULL; | |
224 | ||
4df864c1 | 225 | hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); |
0857dd3b JH |
226 | hdr->opcode = cpu_to_le16(opcode); |
227 | hdr->plen = plen; | |
228 | ||
229 | if (plen) | |
59ae1d12 | 230 | skb_put_data(skb, param, plen); |
0857dd3b | 231 | |
22fbcfc5 | 232 | bt_dev_dbg(hdev, "skb len %d", skb->len); |
0857dd3b | 233 | |
d79f34e3 MH |
234 | hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; |
235 | hci_skb_opcode(skb) = opcode; | |
0857dd3b JH |
236 | |
237 | return skb; | |
238 | } | |
239 | ||
240 | /* Queue a command to an asynchronous HCI request */ | |
241 | void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, | |
242 | const void *param, u8 event) | |
243 | { | |
244 | struct hci_dev *hdev = req->hdev; | |
245 | struct sk_buff *skb; | |
246 | ||
22fbcfc5 | 247 | bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); |
0857dd3b JH |
248 | |
249 | /* If an error occurred during request building, there is no point in | |
250 | * queueing the HCI command. We can simply return. | |
251 | */ | |
252 | if (req->err) | |
253 | return; | |
254 | ||
255 | skb = hci_prepare_cmd(hdev, opcode, plen, param); | |
256 | if (!skb) { | |
2064ee33 MH |
257 | bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", |
258 | opcode); | |
0857dd3b JH |
259 | req->err = -ENOMEM; |
260 | return; | |
261 | } | |
262 | ||
263 | if (skb_queue_empty(&req->cmd_q)) | |
44d27137 | 264 | bt_cb(skb)->hci.req_flags |= HCI_REQ_START; |
0857dd3b | 265 | |
edcb185f | 266 | hci_skb_event(skb) = event; |
0857dd3b JH |
267 | |
268 | skb_queue_tail(&req->cmd_q, skb); | |
269 | } | |
270 | ||
271 | void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, | |
272 | const void *param) | |
273 | { | |
696bd362 | 274 | bt_dev_dbg(req->hdev, "HCI_REQ-0x%4.4x", opcode); |
0857dd3b JH |
275 | hci_req_add_ev(req, opcode, plen, param, 0); |
276 | } | |
277 | ||
c4f1f408 HC |
278 | static void start_interleave_scan(struct hci_dev *hdev) |
279 | { | |
280 | hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; | |
281 | queue_delayed_work(hdev->req_workqueue, | |
282 | &hdev->interleave_scan, 0); | |
283 | } | |
284 | ||
285 | static bool is_interleave_scanning(struct hci_dev *hdev) | |
286 | { | |
287 | return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; | |
288 | } | |
289 | ||
290 | static void cancel_interleave_scan(struct hci_dev *hdev) | |
291 | { | |
292 | bt_dev_dbg(hdev, "cancelling interleave scan"); | |
293 | ||
294 | cancel_delayed_work_sync(&hdev->interleave_scan); | |
295 | ||
296 | hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; | |
297 | } | |
298 | ||
299 | /* Return true if interleave_scan wasn't started until exiting this function, | |
300 | * otherwise, return false | |
301 | */ | |
302 | static bool __hci_update_interleaved_scan(struct hci_dev *hdev) | |
303 | { | |
58ceb1e6 AP |
304 | /* Do interleaved scan only if all of the following are true: |
305 | * - There is at least one ADV monitor | |
306 | * - At least one pending LE connection or one device to be scanned for | |
307 | * - Monitor offloading is not supported | |
308 | * If so, we should alternate between allowlist scan and one without | |
309 | * any filters to save power. | |
c4f1f408 HC |
310 | */ |
311 | bool use_interleaving = hci_is_adv_monitoring(hdev) && | |
312 | !(list_empty(&hdev->pend_le_conns) && | |
58ceb1e6 AP |
313 | list_empty(&hdev->pend_le_reports)) && |
314 | hci_get_adv_monitor_offload_ext(hdev) == | |
315 | HCI_ADV_MONITOR_EXT_NONE; | |
c4f1f408 HC |
316 | bool is_interleaving = is_interleave_scanning(hdev); |
317 | ||
318 | if (use_interleaving && !is_interleaving) { | |
319 | start_interleave_scan(hdev); | |
320 | bt_dev_dbg(hdev, "starting interleave scan"); | |
321 | return true; | |
322 | } | |
323 | ||
324 | if (!use_interleaving && is_interleaving) | |
325 | cancel_interleave_scan(hdev); | |
326 | ||
327 | return false; | |
328 | } | |
329 | ||
5c49bcce | 330 | void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn) |
0857dd3b | 331 | { |
a2344b9e | 332 | struct hci_dev *hdev = req->hdev; |
0857dd3b | 333 | |
dd522a74 APS |
334 | if (hdev->scanning_paused) { |
335 | bt_dev_dbg(hdev, "Scanning is paused for suspend"); | |
336 | return; | |
337 | } | |
338 | ||
a2344b9e JK |
339 | if (use_ext_scan(hdev)) { |
340 | struct hci_cp_le_set_ext_scan_enable cp; | |
341 | ||
342 | memset(&cp, 0, sizeof(cp)); | |
343 | cp.enable = LE_SCAN_DISABLE; | |
344 | hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp), | |
345 | &cp); | |
346 | } else { | |
347 | struct hci_cp_le_set_scan_enable cp; | |
348 | ||
349 | memset(&cp, 0, sizeof(cp)); | |
350 | cp.enable = LE_SCAN_DISABLE; | |
351 | hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); | |
352 | } | |
e1d57235 | 353 | |
5c49bcce | 354 | /* Disable address resolution */ |
ad383c2c | 355 | if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) { |
e1d57235 | 356 | __u8 enable = 0x00; |
cbbdfa6f | 357 | |
e1d57235 MH |
358 | hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); |
359 | } | |
0857dd3b JH |
360 | } |
361 | ||
3d4f9c00 AP |
362 | static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr, |
363 | u8 bdaddr_type) | |
dd522a74 | 364 | { |
3d4f9c00 | 365 | struct hci_cp_le_del_from_accept_list cp; |
dd522a74 APS |
366 | |
367 | cp.bdaddr_type = bdaddr_type; | |
368 | bacpy(&cp.bdaddr, bdaddr); | |
369 | ||
3d4f9c00 | 370 | bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr, |
dd522a74 | 371 | cp.bdaddr_type); |
3d4f9c00 | 372 | hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp); |
0eee35bd | 373 | |
ad383c2c | 374 | if (use_ll_privacy(req->hdev)) { |
0eee35bd MH |
375 | struct smp_irk *irk; |
376 | ||
377 | irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type); | |
378 | if (irk) { | |
379 | struct hci_cp_le_del_from_resolv_list cp; | |
380 | ||
381 | cp.bdaddr_type = bdaddr_type; | |
382 | bacpy(&cp.bdaddr, bdaddr); | |
383 | ||
384 | hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST, | |
385 | sizeof(cp), &cp); | |
386 | } | |
387 | } | |
dd522a74 APS |
388 | } |
389 | ||
3d4f9c00 AP |
390 | /* Adds connection to accept list if needed. On error, returns -1. */ |
391 | static int add_to_accept_list(struct hci_request *req, | |
392 | struct hci_conn_params *params, u8 *num_entries, | |
393 | bool allow_rpa) | |
0857dd3b | 394 | { |
3d4f9c00 | 395 | struct hci_cp_le_add_to_accept_list cp; |
dd522a74 APS |
396 | struct hci_dev *hdev = req->hdev; |
397 | ||
3d4f9c00 AP |
398 | /* Already in accept list */ |
399 | if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr, | |
dd522a74 APS |
400 | params->addr_type)) |
401 | return 0; | |
0857dd3b | 402 | |
dd522a74 | 403 | /* Select filter policy to accept all advertising */ |
3d4f9c00 | 404 | if (*num_entries >= hdev->le_accept_list_size) |
dd522a74 APS |
405 | return -1; |
406 | ||
3d4f9c00 | 407 | /* Accept list can not be used with RPAs */ |
1fb17dfc SN |
408 | if (!allow_rpa && |
409 | !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && | |
dd522a74 APS |
410 | hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) { |
411 | return -1; | |
412 | } | |
413 | ||
3d4f9c00 | 414 | /* During suspend, only wakeable devices can be in accept list */ |
fe92ee64 | 415 | if (hdev->suspended && |
e1cff700 | 416 | !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) |
dd522a74 APS |
417 | return 0; |
418 | ||
419 | *num_entries += 1; | |
0857dd3b JH |
420 | cp.bdaddr_type = params->addr_type; |
421 | bacpy(&cp.bdaddr, ¶ms->addr); | |
422 | ||
3d4f9c00 | 423 | bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr, |
dd522a74 | 424 | cp.bdaddr_type); |
3d4f9c00 | 425 | hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp); |
dd522a74 | 426 | |
ad383c2c | 427 | if (use_ll_privacy(hdev)) { |
0eee35bd MH |
428 | struct smp_irk *irk; |
429 | ||
430 | irk = hci_find_irk_by_addr(hdev, ¶ms->addr, | |
431 | params->addr_type); | |
432 | if (irk) { | |
433 | struct hci_cp_le_add_to_resolv_list cp; | |
434 | ||
435 | cp.bdaddr_type = params->addr_type; | |
436 | bacpy(&cp.bdaddr, ¶ms->addr); | |
437 | memcpy(cp.peer_irk, irk->val, 16); | |
438 | ||
439 | if (hci_dev_test_flag(hdev, HCI_PRIVACY)) | |
440 | memcpy(cp.local_irk, hdev->irk, 16); | |
441 | else | |
442 | memset(cp.local_irk, 0, 16); | |
443 | ||
444 | hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST, | |
445 | sizeof(cp), &cp); | |
446 | } | |
447 | } | |
448 | ||
dd522a74 | 449 | return 0; |
0857dd3b JH |
450 | } |
451 | ||
3d4f9c00 | 452 | static u8 update_accept_list(struct hci_request *req) |
0857dd3b JH |
453 | { |
454 | struct hci_dev *hdev = req->hdev; | |
455 | struct hci_conn_params *params; | |
456 | struct bdaddr_list *b; | |
dd522a74 APS |
457 | u8 num_entries = 0; |
458 | bool pend_conn, pend_report; | |
3d4f9c00 AP |
459 | /* We allow usage of accept list even with RPAs in suspend. In the worst |
460 | * case, we won't be able to wake from devices that use the privacy1.2 | |
dd522a74 APS |
461 | * features. Additionally, once we support privacy1.2 and IRK |
462 | * offloading, we can update this to also check for those conditions. | |
463 | */ | |
464 | bool allow_rpa = hdev->suspended; | |
0857dd3b | 465 | |
ad383c2c | 466 | if (use_ll_privacy(hdev)) |
8ce85ada SN |
467 | allow_rpa = true; |
468 | ||
3d4f9c00 | 469 | /* Go through the current accept list programmed into the |
0857dd3b JH |
470 | * controller one by one and check if that address is still |
471 | * in the list of pending connections or list of devices to | |
472 | * report. If not present in either list, then queue the | |
473 | * command to remove it from the controller. | |
474 | */ | |
3d4f9c00 | 475 | list_for_each_entry(b, &hdev->le_accept_list, list) { |
dd522a74 APS |
476 | pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, |
477 | &b->bdaddr, | |
478 | b->bdaddr_type); | |
479 | pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports, | |
480 | &b->bdaddr, | |
481 | b->bdaddr_type); | |
482 | ||
483 | /* If the device is not likely to connect or report, | |
3d4f9c00 | 484 | * remove it from the accept list. |
cff10ce7 | 485 | */ |
dd522a74 | 486 | if (!pend_conn && !pend_report) { |
3d4f9c00 | 487 | del_from_accept_list(req, &b->bdaddr, b->bdaddr_type); |
0857dd3b JH |
488 | continue; |
489 | } | |
490 | ||
3d4f9c00 | 491 | /* Accept list can not be used with RPAs */ |
1fb17dfc SN |
492 | if (!allow_rpa && |
493 | !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && | |
dd522a74 | 494 | hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) { |
cff10ce7 JH |
495 | return 0x00; |
496 | } | |
0857dd3b | 497 | |
dd522a74 | 498 | num_entries++; |
0857dd3b JH |
499 | } |
500 | ||
3d4f9c00 | 501 | /* Since all no longer valid accept list entries have been |
0857dd3b JH |
502 | * removed, walk through the list of pending connections |
503 | * and ensure that any new device gets programmed into | |
504 | * the controller. | |
505 | * | |
506 | * If the list of the devices is larger than the list of | |
3d4f9c00 | 507 | * available accept list entries in the controller, then |
0857dd3b | 508 | * just abort and return filer policy value to not use the |
3d4f9c00 | 509 | * accept list. |
0857dd3b JH |
510 | */ |
511 | list_for_each_entry(params, &hdev->pend_le_conns, action) { | |
3d4f9c00 | 512 | if (add_to_accept_list(req, params, &num_entries, allow_rpa)) |
0857dd3b | 513 | return 0x00; |
0857dd3b JH |
514 | } |
515 | ||
516 | /* After adding all new pending connections, walk through | |
517 | * the list of pending reports and also add these to the | |
3d4f9c00 | 518 | * accept list if there is still space. Abort if space runs out. |
0857dd3b JH |
519 | */ |
520 | list_for_each_entry(params, &hdev->pend_le_reports, action) { | |
3d4f9c00 | 521 | if (add_to_accept_list(req, params, &num_entries, allow_rpa)) |
0857dd3b | 522 | return 0x00; |
0857dd3b JH |
523 | } |
524 | ||
c4f1f408 HC |
525 | /* Use the allowlist unless the following conditions are all true: |
526 | * - We are not currently suspending | |
58ceb1e6 | 527 | * - There are 1 or more ADV monitors registered and it's not offloaded |
c4f1f408 | 528 | * - Interleaved scanning is not currently using the allowlist |
8208f5a9 | 529 | */ |
c4f1f408 | 530 | if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && |
58ceb1e6 | 531 | hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE && |
c4f1f408 | 532 | hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) |
8208f5a9 MC |
533 | return 0x00; |
534 | ||
3d4f9c00 | 535 | /* Select filter policy to use accept list */ |
0857dd3b JH |
536 | return 0x01; |
537 | } | |
538 | ||
82a37ade JH |
539 | static bool scan_use_rpa(struct hci_dev *hdev) |
540 | { | |
541 | return hci_dev_test_flag(hdev, HCI_PRIVACY); | |
542 | } | |
543 | ||
3baef810 | 544 | static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, |
e1d57235 | 545 | u16 window, u8 own_addr_type, u8 filter_policy, |
c32d6246 | 546 | bool filter_dup, bool addr_resolv) |
0857dd3b | 547 | { |
a2344b9e | 548 | struct hci_dev *hdev = req->hdev; |
3baef810 | 549 | |
3a0377d9 APS |
550 | if (hdev->scanning_paused) { |
551 | bt_dev_dbg(hdev, "Scanning is paused for suspend"); | |
552 | return; | |
553 | } | |
554 | ||
ad383c2c | 555 | if (use_ll_privacy(hdev) && addr_resolv) { |
e1d57235 | 556 | u8 enable = 0x01; |
cbbdfa6f | 557 | |
e1d57235 MH |
558 | hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); |
559 | } | |
560 | ||
a2344b9e JK |
561 | /* Use ext scanning if set ext scan param and ext scan enable is |
562 | * supported | |
563 | */ | |
564 | if (use_ext_scan(hdev)) { | |
565 | struct hci_cp_le_set_ext_scan_params *ext_param_cp; | |
566 | struct hci_cp_le_set_ext_scan_enable ext_enable_cp; | |
567 | struct hci_cp_le_scan_phy_params *phy_params; | |
45bdd86e JK |
568 | u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2]; |
569 | u32 plen; | |
a2344b9e JK |
570 | |
571 | ext_param_cp = (void *)data; | |
572 | phy_params = (void *)ext_param_cp->data; | |
573 | ||
574 | memset(ext_param_cp, 0, sizeof(*ext_param_cp)); | |
575 | ext_param_cp->own_addr_type = own_addr_type; | |
576 | ext_param_cp->filter_policy = filter_policy; | |
a2344b9e | 577 | |
45bdd86e JK |
578 | plen = sizeof(*ext_param_cp); |
579 | ||
580 | if (scan_1m(hdev) || scan_2m(hdev)) { | |
581 | ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M; | |
582 | ||
583 | memset(phy_params, 0, sizeof(*phy_params)); | |
584 | phy_params->type = type; | |
585 | phy_params->interval = cpu_to_le16(interval); | |
586 | phy_params->window = cpu_to_le16(window); | |
587 | ||
588 | plen += sizeof(*phy_params); | |
589 | phy_params++; | |
590 | } | |
591 | ||
592 | if (scan_coded(hdev)) { | |
593 | ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED; | |
594 | ||
595 | memset(phy_params, 0, sizeof(*phy_params)); | |
596 | phy_params->type = type; | |
597 | phy_params->interval = cpu_to_le16(interval); | |
598 | phy_params->window = cpu_to_le16(window); | |
599 | ||
600 | plen += sizeof(*phy_params); | |
601 | phy_params++; | |
602 | } | |
a2344b9e JK |
603 | |
604 | hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS, | |
45bdd86e | 605 | plen, ext_param_cp); |
a2344b9e JK |
606 | |
607 | memset(&ext_enable_cp, 0, sizeof(ext_enable_cp)); | |
608 | ext_enable_cp.enable = LE_SCAN_ENABLE; | |
c32d6246 | 609 | ext_enable_cp.filter_dup = filter_dup; |
a2344b9e JK |
610 | |
611 | hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, | |
612 | sizeof(ext_enable_cp), &ext_enable_cp); | |
613 | } else { | |
614 | struct hci_cp_le_set_scan_param param_cp; | |
615 | struct hci_cp_le_set_scan_enable enable_cp; | |
616 | ||
617 | memset(¶m_cp, 0, sizeof(param_cp)); | |
618 | param_cp.type = type; | |
619 | param_cp.interval = cpu_to_le16(interval); | |
620 | param_cp.window = cpu_to_le16(window); | |
621 | param_cp.own_address_type = own_addr_type; | |
622 | param_cp.filter_policy = filter_policy; | |
623 | hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), | |
624 | ¶m_cp); | |
625 | ||
626 | memset(&enable_cp, 0, sizeof(enable_cp)); | |
627 | enable_cp.enable = LE_SCAN_ENABLE; | |
c32d6246 | 628 | enable_cp.filter_dup = filter_dup; |
a2344b9e JK |
629 | hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), |
630 | &enable_cp); | |
631 | } | |
3baef810 JK |
632 | } |
633 | ||
3fe318ee BG |
634 | static void set_random_addr(struct hci_request *req, bdaddr_t *rpa); |
635 | static int hci_update_random_address(struct hci_request *req, | |
636 | bool require_privacy, bool use_rpa, | |
637 | u8 *own_addr_type) | |
638 | { | |
639 | struct hci_dev *hdev = req->hdev; | |
640 | int err; | |
641 | ||
642 | /* If privacy is enabled use a resolvable private address. If | |
643 | * current RPA has expired or there is something else than | |
644 | * the current RPA in use, then generate a new one. | |
645 | */ | |
646 | if (use_rpa) { | |
647 | /* If Controller supports LL Privacy use own address type is | |
648 | * 0x03 | |
649 | */ | |
650 | if (use_ll_privacy(hdev)) | |
651 | *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; | |
652 | else | |
653 | *own_addr_type = ADDR_LE_DEV_RANDOM; | |
654 | ||
655 | if (rpa_valid(hdev)) | |
656 | return 0; | |
657 | ||
658 | err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); | |
659 | if (err < 0) { | |
660 | bt_dev_err(hdev, "failed to generate new RPA"); | |
661 | return err; | |
662 | } | |
663 | ||
664 | set_random_addr(req, &hdev->rpa); | |
665 | ||
666 | return 0; | |
667 | } | |
668 | ||
669 | /* In case of required privacy without resolvable private address, | |
670 | * use an non-resolvable private address. This is useful for active | |
671 | * scanning and non-connectable advertising. | |
672 | */ | |
673 | if (require_privacy) { | |
674 | bdaddr_t nrpa; | |
675 | ||
676 | while (true) { | |
677 | /* The non-resolvable private address is generated | |
678 | * from random six bytes with the two most significant | |
679 | * bits cleared. | |
680 | */ | |
681 | get_random_bytes(&nrpa, 6); | |
682 | nrpa.b[5] &= 0x3f; | |
683 | ||
684 | /* The non-resolvable private address shall not be | |
685 | * equal to the public address. | |
686 | */ | |
687 | if (bacmp(&hdev->bdaddr, &nrpa)) | |
688 | break; | |
689 | } | |
690 | ||
691 | *own_addr_type = ADDR_LE_DEV_RANDOM; | |
692 | set_random_addr(req, &nrpa); | |
693 | return 0; | |
694 | } | |
695 | ||
696 | /* If forcing static address is in use or there is no public | |
697 | * address use the static address as random address (but skip | |
698 | * the HCI command if the current random address is already the | |
699 | * static one. | |
700 | * | |
701 | * In case BR/EDR has been disabled on a dual-mode controller | |
702 | * and a static address has been configured, then use that | |
703 | * address instead of the public BR/EDR address. | |
704 | */ | |
705 | if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || | |
706 | !bacmp(&hdev->bdaddr, BDADDR_ANY) || | |
707 | (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && | |
708 | bacmp(&hdev->static_addr, BDADDR_ANY))) { | |
709 | *own_addr_type = ADDR_LE_DEV_RANDOM; | |
710 | if (bacmp(&hdev->static_addr, &hdev->random_addr)) | |
711 | hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, | |
712 | &hdev->static_addr); | |
713 | return 0; | |
714 | } | |
715 | ||
716 | /* Neither privacy nor static address is being used so use a | |
717 | * public address. | |
718 | */ | |
719 | *own_addr_type = ADDR_LE_DEV_PUBLIC; | |
720 | ||
721 | return 0; | |
722 | } | |
723 | ||
e1d57235 MH |
724 | /* Ensure to call hci_req_add_le_scan_disable() first to disable the |
725 | * controller based address resolution to be able to reconfigure | |
726 | * resolving list. | |
727 | */ | |
3baef810 JK |
728 | void hci_req_add_le_passive_scan(struct hci_request *req) |
729 | { | |
0857dd3b JH |
730 | struct hci_dev *hdev = req->hdev; |
731 | u8 own_addr_type; | |
732 | u8 filter_policy; | |
aaebf8e6 | 733 | u16 window, interval; |
c32d6246 YHC |
734 | /* Default is to enable duplicates filter */ |
735 | u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE; | |
e1d57235 MH |
736 | /* Background scanning should run with address resolution */ |
737 | bool addr_resolv = true; | |
dd522a74 APS |
738 | |
739 | if (hdev->scanning_paused) { | |
740 | bt_dev_dbg(hdev, "Scanning is paused for suspend"); | |
741 | return; | |
742 | } | |
0857dd3b JH |
743 | |
744 | /* Set require_privacy to false since no SCAN_REQ are send | |
745 | * during passive scanning. Not using an non-resolvable address | |
746 | * here is important so that peer devices using direct | |
747 | * advertising with our address will be correctly reported | |
748 | * by the controller. | |
749 | */ | |
82a37ade JH |
750 | if (hci_update_random_address(req, false, scan_use_rpa(hdev), |
751 | &own_addr_type)) | |
0857dd3b JH |
752 | return; |
753 | ||
80af16a3 HC |
754 | if (hdev->enable_advmon_interleave_scan && |
755 | __hci_update_interleaved_scan(hdev)) | |
c4f1f408 HC |
756 | return; |
757 | ||
758 | bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state); | |
3d4f9c00 | 759 | /* Adding or removing entries from the accept list must |
0857dd3b | 760 | * happen before enabling scanning. The controller does |
3d4f9c00 | 761 | * not allow accept list modification while scanning. |
0857dd3b | 762 | */ |
3d4f9c00 | 763 | filter_policy = update_accept_list(req); |
0857dd3b JH |
764 | |
765 | /* When the controller is using random resolvable addresses and | |
766 | * with that having LE privacy enabled, then controllers with | |
767 | * Extended Scanner Filter Policies support can now enable support | |
768 | * for handling directed advertising. | |
769 | * | |
3d4f9c00 AP |
770 | * So instead of using filter polices 0x00 (no accept list) |
771 | * and 0x01 (accept list enabled) use the new filter policies | |
772 | * 0x02 (no accept list) and 0x03 (accept list enabled). | |
0857dd3b | 773 | */ |
d7a5a11d | 774 | if (hci_dev_test_flag(hdev, HCI_PRIVACY) && |
0857dd3b JH |
775 | (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) |
776 | filter_policy |= 0x02; | |
777 | ||
dd522a74 | 778 | if (hdev->suspended) { |
10873f99 AM |
779 | window = hdev->le_scan_window_suspend; |
780 | interval = hdev->le_scan_int_suspend; | |
9a9373ff AM |
781 | } else if (hci_is_le_conn_scanning(hdev)) { |
782 | window = hdev->le_scan_window_connect; | |
783 | interval = hdev->le_scan_int_connect; | |
291f0c55 HC |
784 | } else if (hci_is_adv_monitoring(hdev)) { |
785 | window = hdev->le_scan_window_adv_monitor; | |
786 | interval = hdev->le_scan_int_adv_monitor; | |
c32d6246 YHC |
787 | |
788 | /* Disable duplicates filter when scanning for advertisement | |
789 | * monitor for the following reasons. | |
790 | * | |
791 | * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm | |
792 | * controllers ignore RSSI_Sampling_Period when the duplicates | |
793 | * filter is enabled. | |
794 | * | |
795 | * For SW pattern filtering, when we're not doing interleaved | |
796 | * scanning, it is necessary to disable duplicates filter, | |
797 | * otherwise hosts can only receive one advertisement and it's | |
798 | * impossible to know if a peer is still in range. | |
799 | */ | |
800 | filter_dup = LE_SCAN_FILTER_DUP_DISABLE; | |
dd522a74 APS |
801 | } else { |
802 | window = hdev->le_scan_window; | |
803 | interval = hdev->le_scan_interval; | |
804 | } | |
805 | ||
3d4f9c00 AP |
806 | bt_dev_dbg(hdev, "LE passive scan with accept list = %d", |
807 | filter_policy); | |
dd522a74 | 808 | hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window, |
c32d6246 YHC |
809 | own_addr_type, filter_policy, filter_dup, |
810 | addr_resolv); | |
0857dd3b JH |
811 | } |
812 | ||
c4f1f408 HC |
813 | static int hci_req_add_le_interleaved_scan(struct hci_request *req, |
814 | unsigned long opt) | |
815 | { | |
816 | struct hci_dev *hdev = req->hdev; | |
817 | int ret = 0; | |
818 | ||
819 | hci_dev_lock(hdev); | |
820 | ||
821 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) | |
822 | hci_req_add_le_scan_disable(req, false); | |
823 | hci_req_add_le_passive_scan(req); | |
824 | ||
825 | switch (hdev->interleave_scan_state) { | |
826 | case INTERLEAVE_SCAN_ALLOWLIST: | |
827 | bt_dev_dbg(hdev, "next state: allowlist"); | |
828 | hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; | |
829 | break; | |
830 | case INTERLEAVE_SCAN_NO_FILTER: | |
831 | bt_dev_dbg(hdev, "next state: no filter"); | |
832 | hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST; | |
833 | break; | |
834 | case INTERLEAVE_SCAN_NONE: | |
835 | BT_ERR("unexpected error"); | |
836 | ret = -1; | |
837 | } | |
838 | ||
839 | hci_dev_unlock(hdev); | |
840 | ||
841 | return ret; | |
842 | } | |
843 | ||
844 | static void interleave_scan_work(struct work_struct *work) | |
845 | { | |
846 | struct hci_dev *hdev = container_of(work, struct hci_dev, | |
847 | interleave_scan.work); | |
848 | u8 status; | |
849 | unsigned long timeout; | |
850 | ||
851 | if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) { | |
852 | timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration); | |
853 | } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) { | |
854 | timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration); | |
855 | } else { | |
856 | bt_dev_err(hdev, "unexpected error"); | |
857 | return; | |
858 | } | |
859 | ||
860 | hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0, | |
861 | HCI_CMD_TIMEOUT, &status); | |
862 | ||
863 | /* Don't continue interleaving if it was canceled */ | |
864 | if (is_interleave_scanning(hdev)) | |
865 | queue_delayed_work(hdev->req_workqueue, | |
866 | &hdev->interleave_scan, timeout); | |
867 | } | |
868 | ||
c45074d6 LAD |
869 | static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) |
870 | { | |
871 | struct hci_dev *hdev = req->hdev; | |
872 | ||
873 | /* If we're advertising or initiating an LE connection we can't | |
874 | * go ahead and change the random address at this time. This is | |
875 | * because the eventual initiator address used for the | |
876 | * subsequently created connection will be undefined (some | |
877 | * controllers use the new address and others the one we had | |
878 | * when the operation started). | |
879 | * | |
880 | * In this kind of scenario skip the update and let the random | |
881 | * address be updated at the next cycle. | |
882 | */ | |
883 | if (hci_dev_test_flag(hdev, HCI_LE_ADV) || | |
884 | hci_lookup_le_connect(hdev)) { | |
885 | bt_dev_dbg(hdev, "Deferring random address update"); | |
886 | hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); | |
887 | return; | |
888 | } | |
889 | ||
890 | hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa); | |
891 | } | |
892 | ||
5fc16cc4 JH |
893 | void hci_request_setup(struct hci_dev *hdev) |
894 | { | |
c4f1f408 | 895 | INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work); |
5fc16cc4 JH |
896 | } |
897 | ||
898 | void hci_request_cancel_all(struct hci_dev *hdev) | |
899 | { | |
63298d6e | 900 | hci_cmd_sync_cancel_sync(hdev, ENODEV); |
7df0f73e | 901 | |
c4f1f408 | 902 | cancel_interleave_scan(hdev); |
5fc16cc4 | 903 | } |