Commit | Line | Data |
---|---|---|
0857dd3b JH |
1 | /* |
2 | BlueZ - Bluetooth protocol stack for Linux | |
3 | ||
4 | Copyright (C) 2014 Intel Corporation | |
5 | ||
6 | This program is free software; you can redistribute it and/or modify | |
7 | it under the terms of the GNU General Public License version 2 as | |
8 | published by the Free Software Foundation; | |
9 | ||
10 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
11 | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
12 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. | |
13 | IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY | |
14 | CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES | |
15 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
16 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
17 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
18 | ||
19 | ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, | |
20 | COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS | |
21 | SOFTWARE IS DISCLAIMED. | |
22 | */ | |
23 | ||
b1a8917c JH |
24 | #include <asm/unaligned.h> |
25 | ||
0857dd3b JH |
26 | #include <net/bluetooth/bluetooth.h> |
27 | #include <net/bluetooth/hci_core.h> | |
f2252570 | 28 | #include <net/bluetooth/mgmt.h> |
0857dd3b JH |
29 | |
30 | #include "smp.h" | |
31 | #include "hci_request.h" | |
32 | ||
be91cd05 JH |
33 | #define HCI_REQ_DONE 0 |
34 | #define HCI_REQ_PEND 1 | |
35 | #define HCI_REQ_CANCELED 2 | |
36 | ||
0857dd3b JH |
37 | void hci_req_init(struct hci_request *req, struct hci_dev *hdev) |
38 | { | |
39 | skb_queue_head_init(&req->cmd_q); | |
40 | req->hdev = hdev; | |
41 | req->err = 0; | |
42 | } | |
43 | ||
e6214487 JH |
44 | static int req_run(struct hci_request *req, hci_req_complete_t complete, |
45 | hci_req_complete_skb_t complete_skb) | |
0857dd3b JH |
46 | { |
47 | struct hci_dev *hdev = req->hdev; | |
48 | struct sk_buff *skb; | |
49 | unsigned long flags; | |
50 | ||
51 | BT_DBG("length %u", skb_queue_len(&req->cmd_q)); | |
52 | ||
53 | /* If an error occurred during request building, remove all HCI | |
54 | * commands queued on the HCI request queue. | |
55 | */ | |
56 | if (req->err) { | |
57 | skb_queue_purge(&req->cmd_q); | |
58 | return req->err; | |
59 | } | |
60 | ||
61 | /* Do not allow empty requests */ | |
62 | if (skb_queue_empty(&req->cmd_q)) | |
63 | return -ENODATA; | |
64 | ||
65 | skb = skb_peek_tail(&req->cmd_q); | |
44d27137 JH |
66 | if (complete) { |
67 | bt_cb(skb)->hci.req_complete = complete; | |
68 | } else if (complete_skb) { | |
69 | bt_cb(skb)->hci.req_complete_skb = complete_skb; | |
70 | bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; | |
71 | } | |
0857dd3b JH |
72 | |
73 | spin_lock_irqsave(&hdev->cmd_q.lock, flags); | |
74 | skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); | |
75 | spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); | |
76 | ||
77 | queue_work(hdev->workqueue, &hdev->cmd_work); | |
78 | ||
79 | return 0; | |
80 | } | |
81 | ||
e6214487 JH |
82 | int hci_req_run(struct hci_request *req, hci_req_complete_t complete) |
83 | { | |
84 | return req_run(req, complete, NULL); | |
85 | } | |
86 | ||
87 | int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete) | |
88 | { | |
89 | return req_run(req, NULL, complete); | |
90 | } | |
91 | ||
be91cd05 JH |
92 | static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, |
93 | struct sk_buff *skb) | |
94 | { | |
95 | BT_DBG("%s result 0x%2.2x", hdev->name, result); | |
96 | ||
97 | if (hdev->req_status == HCI_REQ_PEND) { | |
98 | hdev->req_result = result; | |
99 | hdev->req_status = HCI_REQ_DONE; | |
100 | if (skb) | |
101 | hdev->req_skb = skb_get(skb); | |
102 | wake_up_interruptible(&hdev->req_wait_q); | |
103 | } | |
104 | } | |
105 | ||
b504430c | 106 | void hci_req_sync_cancel(struct hci_dev *hdev, int err) |
be91cd05 JH |
107 | { |
108 | BT_DBG("%s err 0x%2.2x", hdev->name, err); | |
109 | ||
110 | if (hdev->req_status == HCI_REQ_PEND) { | |
111 | hdev->req_result = err; | |
112 | hdev->req_status = HCI_REQ_CANCELED; | |
113 | wake_up_interruptible(&hdev->req_wait_q); | |
114 | } | |
115 | } | |
116 | ||
117 | struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, | |
118 | const void *param, u8 event, u32 timeout) | |
119 | { | |
120 | DECLARE_WAITQUEUE(wait, current); | |
121 | struct hci_request req; | |
122 | struct sk_buff *skb; | |
123 | int err = 0; | |
124 | ||
125 | BT_DBG("%s", hdev->name); | |
126 | ||
127 | hci_req_init(&req, hdev); | |
128 | ||
129 | hci_req_add_ev(&req, opcode, plen, param, event); | |
130 | ||
131 | hdev->req_status = HCI_REQ_PEND; | |
132 | ||
133 | add_wait_queue(&hdev->req_wait_q, &wait); | |
134 | set_current_state(TASK_INTERRUPTIBLE); | |
135 | ||
136 | err = hci_req_run_skb(&req, hci_req_sync_complete); | |
137 | if (err < 0) { | |
138 | remove_wait_queue(&hdev->req_wait_q, &wait); | |
139 | set_current_state(TASK_RUNNING); | |
140 | return ERR_PTR(err); | |
141 | } | |
142 | ||
143 | schedule_timeout(timeout); | |
144 | ||
145 | remove_wait_queue(&hdev->req_wait_q, &wait); | |
146 | ||
147 | if (signal_pending(current)) | |
148 | return ERR_PTR(-EINTR); | |
149 | ||
150 | switch (hdev->req_status) { | |
151 | case HCI_REQ_DONE: | |
152 | err = -bt_to_errno(hdev->req_result); | |
153 | break; | |
154 | ||
155 | case HCI_REQ_CANCELED: | |
156 | err = -hdev->req_result; | |
157 | break; | |
158 | ||
159 | default: | |
160 | err = -ETIMEDOUT; | |
161 | break; | |
162 | } | |
163 | ||
164 | hdev->req_status = hdev->req_result = 0; | |
165 | skb = hdev->req_skb; | |
166 | hdev->req_skb = NULL; | |
167 | ||
168 | BT_DBG("%s end: err %d", hdev->name, err); | |
169 | ||
170 | if (err < 0) { | |
171 | kfree_skb(skb); | |
172 | return ERR_PTR(err); | |
173 | } | |
174 | ||
175 | if (!skb) | |
176 | return ERR_PTR(-ENODATA); | |
177 | ||
178 | return skb; | |
179 | } | |
180 | EXPORT_SYMBOL(__hci_cmd_sync_ev); | |
181 | ||
182 | struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, | |
183 | const void *param, u32 timeout) | |
184 | { | |
185 | return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout); | |
186 | } | |
187 | EXPORT_SYMBOL(__hci_cmd_sync); | |
188 | ||
189 | /* Execute request and wait for completion. */ | |
a1d01db1 JH |
190 | int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, |
191 | unsigned long opt), | |
4ebeee2d | 192 | unsigned long opt, u32 timeout, u8 *hci_status) |
be91cd05 JH |
193 | { |
194 | struct hci_request req; | |
195 | DECLARE_WAITQUEUE(wait, current); | |
196 | int err = 0; | |
197 | ||
198 | BT_DBG("%s start", hdev->name); | |
199 | ||
200 | hci_req_init(&req, hdev); | |
201 | ||
202 | hdev->req_status = HCI_REQ_PEND; | |
203 | ||
a1d01db1 JH |
204 | err = func(&req, opt); |
205 | if (err) { | |
206 | if (hci_status) | |
207 | *hci_status = HCI_ERROR_UNSPECIFIED; | |
208 | return err; | |
209 | } | |
be91cd05 JH |
210 | |
211 | add_wait_queue(&hdev->req_wait_q, &wait); | |
212 | set_current_state(TASK_INTERRUPTIBLE); | |
213 | ||
214 | err = hci_req_run_skb(&req, hci_req_sync_complete); | |
215 | if (err < 0) { | |
216 | hdev->req_status = 0; | |
217 | ||
218 | remove_wait_queue(&hdev->req_wait_q, &wait); | |
219 | set_current_state(TASK_RUNNING); | |
220 | ||
221 | /* ENODATA means the HCI request command queue is empty. | |
222 | * This can happen when a request with conditionals doesn't | |
223 | * trigger any commands to be sent. This is normal behavior | |
224 | * and should not trigger an error return. | |
225 | */ | |
568f44f6 JH |
226 | if (err == -ENODATA) { |
227 | if (hci_status) | |
228 | *hci_status = 0; | |
be91cd05 | 229 | return 0; |
568f44f6 JH |
230 | } |
231 | ||
232 | if (hci_status) | |
233 | *hci_status = HCI_ERROR_UNSPECIFIED; | |
be91cd05 JH |
234 | |
235 | return err; | |
236 | } | |
237 | ||
238 | schedule_timeout(timeout); | |
239 | ||
240 | remove_wait_queue(&hdev->req_wait_q, &wait); | |
241 | ||
242 | if (signal_pending(current)) | |
243 | return -EINTR; | |
244 | ||
245 | switch (hdev->req_status) { | |
246 | case HCI_REQ_DONE: | |
247 | err = -bt_to_errno(hdev->req_result); | |
4ebeee2d JH |
248 | if (hci_status) |
249 | *hci_status = hdev->req_result; | |
be91cd05 JH |
250 | break; |
251 | ||
252 | case HCI_REQ_CANCELED: | |
253 | err = -hdev->req_result; | |
4ebeee2d JH |
254 | if (hci_status) |
255 | *hci_status = HCI_ERROR_UNSPECIFIED; | |
be91cd05 JH |
256 | break; |
257 | ||
258 | default: | |
259 | err = -ETIMEDOUT; | |
4ebeee2d JH |
260 | if (hci_status) |
261 | *hci_status = HCI_ERROR_UNSPECIFIED; | |
be91cd05 JH |
262 | break; |
263 | } | |
264 | ||
265 | hdev->req_status = hdev->req_result = 0; | |
266 | ||
267 | BT_DBG("%s end: err %d", hdev->name, err); | |
268 | ||
269 | return err; | |
270 | } | |
271 | ||
a1d01db1 JH |
272 | int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, |
273 | unsigned long opt), | |
4ebeee2d | 274 | unsigned long opt, u32 timeout, u8 *hci_status) |
be91cd05 JH |
275 | { |
276 | int ret; | |
277 | ||
278 | if (!test_bit(HCI_UP, &hdev->flags)) | |
279 | return -ENETDOWN; | |
280 | ||
281 | /* Serialize all requests */ | |
b504430c | 282 | hci_req_sync_lock(hdev); |
4ebeee2d | 283 | ret = __hci_req_sync(hdev, req, opt, timeout, hci_status); |
b504430c | 284 | hci_req_sync_unlock(hdev); |
be91cd05 JH |
285 | |
286 | return ret; | |
287 | } | |
288 | ||
0857dd3b JH |
289 | struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, |
290 | const void *param) | |
291 | { | |
292 | int len = HCI_COMMAND_HDR_SIZE + plen; | |
293 | struct hci_command_hdr *hdr; | |
294 | struct sk_buff *skb; | |
295 | ||
296 | skb = bt_skb_alloc(len, GFP_ATOMIC); | |
297 | if (!skb) | |
298 | return NULL; | |
299 | ||
300 | hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); | |
301 | hdr->opcode = cpu_to_le16(opcode); | |
302 | hdr->plen = plen; | |
303 | ||
304 | if (plen) | |
305 | memcpy(skb_put(skb, plen), param, plen); | |
306 | ||
307 | BT_DBG("skb len %d", skb->len); | |
308 | ||
d79f34e3 MH |
309 | hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; |
310 | hci_skb_opcode(skb) = opcode; | |
0857dd3b JH |
311 | |
312 | return skb; | |
313 | } | |
314 | ||
315 | /* Queue a command to an asynchronous HCI request */ | |
316 | void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, | |
317 | const void *param, u8 event) | |
318 | { | |
319 | struct hci_dev *hdev = req->hdev; | |
320 | struct sk_buff *skb; | |
321 | ||
322 | BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); | |
323 | ||
324 | /* If an error occurred during request building, there is no point in | |
325 | * queueing the HCI command. We can simply return. | |
326 | */ | |
327 | if (req->err) | |
328 | return; | |
329 | ||
330 | skb = hci_prepare_cmd(hdev, opcode, plen, param); | |
331 | if (!skb) { | |
332 | BT_ERR("%s no memory for command (opcode 0x%4.4x)", | |
333 | hdev->name, opcode); | |
334 | req->err = -ENOMEM; | |
335 | return; | |
336 | } | |
337 | ||
338 | if (skb_queue_empty(&req->cmd_q)) | |
44d27137 | 339 | bt_cb(skb)->hci.req_flags |= HCI_REQ_START; |
0857dd3b | 340 | |
242c0ebd | 341 | bt_cb(skb)->hci.req_event = event; |
0857dd3b JH |
342 | |
343 | skb_queue_tail(&req->cmd_q, skb); | |
344 | } | |
345 | ||
346 | void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, | |
347 | const void *param) | |
348 | { | |
349 | hci_req_add_ev(req, opcode, plen, param, 0); | |
350 | } | |
351 | ||
bf943cbf JH |
352 | void __hci_req_write_fast_connectable(struct hci_request *req, bool enable) |
353 | { | |
354 | struct hci_dev *hdev = req->hdev; | |
355 | struct hci_cp_write_page_scan_activity acp; | |
356 | u8 type; | |
357 | ||
358 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) | |
359 | return; | |
360 | ||
361 | if (hdev->hci_ver < BLUETOOTH_VER_1_2) | |
362 | return; | |
363 | ||
364 | if (enable) { | |
365 | type = PAGE_SCAN_TYPE_INTERLACED; | |
366 | ||
367 | /* 160 msec page scan interval */ | |
368 | acp.interval = cpu_to_le16(0x0100); | |
369 | } else { | |
370 | type = PAGE_SCAN_TYPE_STANDARD; /* default */ | |
371 | ||
372 | /* default 1.28 sec page scan */ | |
373 | acp.interval = cpu_to_le16(0x0800); | |
374 | } | |
375 | ||
376 | acp.window = cpu_to_le16(0x0012); | |
377 | ||
378 | if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval || | |
379 | __cpu_to_le16(hdev->page_scan_window) != acp.window) | |
380 | hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, | |
381 | sizeof(acp), &acp); | |
382 | ||
383 | if (hdev->page_scan_type != type) | |
384 | hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type); | |
385 | } | |
386 | ||
196a5e97 JH |
387 | /* This function controls the background scanning based on hdev->pend_le_conns |
388 | * list. If there are pending LE connection we start the background scanning, | |
389 | * otherwise we stop it. | |
390 | * | |
391 | * This function requires the caller holds hdev->lock. | |
392 | */ | |
393 | static void __hci_update_background_scan(struct hci_request *req) | |
394 | { | |
395 | struct hci_dev *hdev = req->hdev; | |
396 | ||
397 | if (!test_bit(HCI_UP, &hdev->flags) || | |
398 | test_bit(HCI_INIT, &hdev->flags) || | |
399 | hci_dev_test_flag(hdev, HCI_SETUP) || | |
400 | hci_dev_test_flag(hdev, HCI_CONFIG) || | |
401 | hci_dev_test_flag(hdev, HCI_AUTO_OFF) || | |
402 | hci_dev_test_flag(hdev, HCI_UNREGISTER)) | |
403 | return; | |
404 | ||
405 | /* No point in doing scanning if LE support hasn't been enabled */ | |
406 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) | |
407 | return; | |
408 | ||
409 | /* If discovery is active don't interfere with it */ | |
410 | if (hdev->discovery.state != DISCOVERY_STOPPED) | |
411 | return; | |
412 | ||
413 | /* Reset RSSI and UUID filters when starting background scanning | |
414 | * since these filters are meant for service discovery only. | |
415 | * | |
416 | * The Start Discovery and Start Service Discovery operations | |
417 | * ensure to set proper values for RSSI threshold and UUID | |
418 | * filter list. So it is safe to just reset them here. | |
419 | */ | |
420 | hci_discovery_filter_clear(hdev); | |
421 | ||
422 | if (list_empty(&hdev->pend_le_conns) && | |
423 | list_empty(&hdev->pend_le_reports)) { | |
424 | /* If there is no pending LE connections or devices | |
425 | * to be scanned for, we should stop the background | |
426 | * scanning. | |
427 | */ | |
428 | ||
429 | /* If controller is not scanning we are done. */ | |
430 | if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) | |
431 | return; | |
432 | ||
433 | hci_req_add_le_scan_disable(req); | |
434 | ||
435 | BT_DBG("%s stopping background scanning", hdev->name); | |
436 | } else { | |
437 | /* If there is at least one pending LE connection, we should | |
438 | * keep the background scan running. | |
439 | */ | |
440 | ||
441 | /* If controller is connecting, we should not start scanning | |
442 | * since some controllers are not able to scan and connect at | |
443 | * the same time. | |
444 | */ | |
445 | if (hci_lookup_le_connect(hdev)) | |
446 | return; | |
447 | ||
448 | /* If controller is currently scanning, we stop it to ensure we | |
449 | * don't miss any advertising (due to duplicates filter). | |
450 | */ | |
451 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) | |
452 | hci_req_add_le_scan_disable(req); | |
453 | ||
454 | hci_req_add_le_passive_scan(req); | |
455 | ||
456 | BT_DBG("%s starting background scanning", hdev->name); | |
457 | } | |
458 | } | |
459 | ||
00cf5040 JH |
460 | void __hci_req_update_name(struct hci_request *req) |
461 | { | |
462 | struct hci_dev *hdev = req->hdev; | |
463 | struct hci_cp_write_local_name cp; | |
464 | ||
465 | memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); | |
466 | ||
467 | hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp); | |
468 | } | |
469 | ||
b1a8917c JH |
470 | #define PNP_INFO_SVCLASS_ID 0x1200 |
471 | ||
472 | static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) | |
473 | { | |
474 | u8 *ptr = data, *uuids_start = NULL; | |
475 | struct bt_uuid *uuid; | |
476 | ||
477 | if (len < 4) | |
478 | return ptr; | |
479 | ||
480 | list_for_each_entry(uuid, &hdev->uuids, list) { | |
481 | u16 uuid16; | |
482 | ||
483 | if (uuid->size != 16) | |
484 | continue; | |
485 | ||
486 | uuid16 = get_unaligned_le16(&uuid->uuid[12]); | |
487 | if (uuid16 < 0x1100) | |
488 | continue; | |
489 | ||
490 | if (uuid16 == PNP_INFO_SVCLASS_ID) | |
491 | continue; | |
492 | ||
493 | if (!uuids_start) { | |
494 | uuids_start = ptr; | |
495 | uuids_start[0] = 1; | |
496 | uuids_start[1] = EIR_UUID16_ALL; | |
497 | ptr += 2; | |
498 | } | |
499 | ||
500 | /* Stop if not enough space to put next UUID */ | |
501 | if ((ptr - data) + sizeof(u16) > len) { | |
502 | uuids_start[1] = EIR_UUID16_SOME; | |
503 | break; | |
504 | } | |
505 | ||
506 | *ptr++ = (uuid16 & 0x00ff); | |
507 | *ptr++ = (uuid16 & 0xff00) >> 8; | |
508 | uuids_start[0] += sizeof(uuid16); | |
509 | } | |
510 | ||
511 | return ptr; | |
512 | } | |
513 | ||
514 | static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) | |
515 | { | |
516 | u8 *ptr = data, *uuids_start = NULL; | |
517 | struct bt_uuid *uuid; | |
518 | ||
519 | if (len < 6) | |
520 | return ptr; | |
521 | ||
522 | list_for_each_entry(uuid, &hdev->uuids, list) { | |
523 | if (uuid->size != 32) | |
524 | continue; | |
525 | ||
526 | if (!uuids_start) { | |
527 | uuids_start = ptr; | |
528 | uuids_start[0] = 1; | |
529 | uuids_start[1] = EIR_UUID32_ALL; | |
530 | ptr += 2; | |
531 | } | |
532 | ||
533 | /* Stop if not enough space to put next UUID */ | |
534 | if ((ptr - data) + sizeof(u32) > len) { | |
535 | uuids_start[1] = EIR_UUID32_SOME; | |
536 | break; | |
537 | } | |
538 | ||
539 | memcpy(ptr, &uuid->uuid[12], sizeof(u32)); | |
540 | ptr += sizeof(u32); | |
541 | uuids_start[0] += sizeof(u32); | |
542 | } | |
543 | ||
544 | return ptr; | |
545 | } | |
546 | ||
547 | static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) | |
548 | { | |
549 | u8 *ptr = data, *uuids_start = NULL; | |
550 | struct bt_uuid *uuid; | |
551 | ||
552 | if (len < 18) | |
553 | return ptr; | |
554 | ||
555 | list_for_each_entry(uuid, &hdev->uuids, list) { | |
556 | if (uuid->size != 128) | |
557 | continue; | |
558 | ||
559 | if (!uuids_start) { | |
560 | uuids_start = ptr; | |
561 | uuids_start[0] = 1; | |
562 | uuids_start[1] = EIR_UUID128_ALL; | |
563 | ptr += 2; | |
564 | } | |
565 | ||
566 | /* Stop if not enough space to put next UUID */ | |
567 | if ((ptr - data) + 16 > len) { | |
568 | uuids_start[1] = EIR_UUID128_SOME; | |
569 | break; | |
570 | } | |
571 | ||
572 | memcpy(ptr, uuid->uuid, 16); | |
573 | ptr += 16; | |
574 | uuids_start[0] += 16; | |
575 | } | |
576 | ||
577 | return ptr; | |
578 | } | |
579 | ||
580 | static void create_eir(struct hci_dev *hdev, u8 *data) | |
581 | { | |
582 | u8 *ptr = data; | |
583 | size_t name_len; | |
584 | ||
585 | name_len = strlen(hdev->dev_name); | |
586 | ||
587 | if (name_len > 0) { | |
588 | /* EIR Data type */ | |
589 | if (name_len > 48) { | |
590 | name_len = 48; | |
591 | ptr[1] = EIR_NAME_SHORT; | |
592 | } else | |
593 | ptr[1] = EIR_NAME_COMPLETE; | |
594 | ||
595 | /* EIR Data length */ | |
596 | ptr[0] = name_len + 1; | |
597 | ||
598 | memcpy(ptr + 2, hdev->dev_name, name_len); | |
599 | ||
600 | ptr += (name_len + 2); | |
601 | } | |
602 | ||
603 | if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) { | |
604 | ptr[0] = 2; | |
605 | ptr[1] = EIR_TX_POWER; | |
606 | ptr[2] = (u8) hdev->inq_tx_power; | |
607 | ||
608 | ptr += 3; | |
609 | } | |
610 | ||
611 | if (hdev->devid_source > 0) { | |
612 | ptr[0] = 9; | |
613 | ptr[1] = EIR_DEVICE_ID; | |
614 | ||
615 | put_unaligned_le16(hdev->devid_source, ptr + 2); | |
616 | put_unaligned_le16(hdev->devid_vendor, ptr + 4); | |
617 | put_unaligned_le16(hdev->devid_product, ptr + 6); | |
618 | put_unaligned_le16(hdev->devid_version, ptr + 8); | |
619 | ||
620 | ptr += 10; | |
621 | } | |
622 | ||
623 | ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); | |
624 | ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); | |
625 | ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); | |
626 | } | |
627 | ||
628 | void __hci_req_update_eir(struct hci_request *req) | |
629 | { | |
630 | struct hci_dev *hdev = req->hdev; | |
631 | struct hci_cp_write_eir cp; | |
632 | ||
633 | if (!hdev_is_powered(hdev)) | |
634 | return; | |
635 | ||
636 | if (!lmp_ext_inq_capable(hdev)) | |
637 | return; | |
638 | ||
639 | if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) | |
640 | return; | |
641 | ||
642 | if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) | |
643 | return; | |
644 | ||
645 | memset(&cp, 0, sizeof(cp)); | |
646 | ||
647 | create_eir(hdev, cp.data); | |
648 | ||
649 | if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) | |
650 | return; | |
651 | ||
652 | memcpy(hdev->eir, cp.data, sizeof(cp.data)); | |
653 | ||
654 | hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); | |
655 | } | |
656 | ||
0857dd3b JH |
657 | void hci_req_add_le_scan_disable(struct hci_request *req) |
658 | { | |
659 | struct hci_cp_le_set_scan_enable cp; | |
660 | ||
661 | memset(&cp, 0, sizeof(cp)); | |
662 | cp.enable = LE_SCAN_DISABLE; | |
663 | hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); | |
664 | } | |
665 | ||
666 | static void add_to_white_list(struct hci_request *req, | |
667 | struct hci_conn_params *params) | |
668 | { | |
669 | struct hci_cp_le_add_to_white_list cp; | |
670 | ||
671 | cp.bdaddr_type = params->addr_type; | |
672 | bacpy(&cp.bdaddr, ¶ms->addr); | |
673 | ||
674 | hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp); | |
675 | } | |
676 | ||
677 | static u8 update_white_list(struct hci_request *req) | |
678 | { | |
679 | struct hci_dev *hdev = req->hdev; | |
680 | struct hci_conn_params *params; | |
681 | struct bdaddr_list *b; | |
682 | uint8_t white_list_entries = 0; | |
683 | ||
684 | /* Go through the current white list programmed into the | |
685 | * controller one by one and check if that address is still | |
686 | * in the list of pending connections or list of devices to | |
687 | * report. If not present in either list, then queue the | |
688 | * command to remove it from the controller. | |
689 | */ | |
690 | list_for_each_entry(b, &hdev->le_white_list, list) { | |
691 | struct hci_cp_le_del_from_white_list cp; | |
692 | ||
693 | if (hci_pend_le_action_lookup(&hdev->pend_le_conns, | |
694 | &b->bdaddr, b->bdaddr_type) || | |
695 | hci_pend_le_action_lookup(&hdev->pend_le_reports, | |
696 | &b->bdaddr, b->bdaddr_type)) { | |
697 | white_list_entries++; | |
698 | continue; | |
699 | } | |
700 | ||
701 | cp.bdaddr_type = b->bdaddr_type; | |
702 | bacpy(&cp.bdaddr, &b->bdaddr); | |
703 | ||
704 | hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, | |
705 | sizeof(cp), &cp); | |
706 | } | |
707 | ||
708 | /* Since all no longer valid white list entries have been | |
709 | * removed, walk through the list of pending connections | |
710 | * and ensure that any new device gets programmed into | |
711 | * the controller. | |
712 | * | |
713 | * If the list of the devices is larger than the list of | |
714 | * available white list entries in the controller, then | |
715 | * just abort and return filer policy value to not use the | |
716 | * white list. | |
717 | */ | |
718 | list_for_each_entry(params, &hdev->pend_le_conns, action) { | |
719 | if (hci_bdaddr_list_lookup(&hdev->le_white_list, | |
720 | ¶ms->addr, params->addr_type)) | |
721 | continue; | |
722 | ||
723 | if (white_list_entries >= hdev->le_white_list_size) { | |
724 | /* Select filter policy to accept all advertising */ | |
725 | return 0x00; | |
726 | } | |
727 | ||
728 | if (hci_find_irk_by_addr(hdev, ¶ms->addr, | |
729 | params->addr_type)) { | |
730 | /* White list can not be used with RPAs */ | |
731 | return 0x00; | |
732 | } | |
733 | ||
734 | white_list_entries++; | |
735 | add_to_white_list(req, params); | |
736 | } | |
737 | ||
738 | /* After adding all new pending connections, walk through | |
739 | * the list of pending reports and also add these to the | |
740 | * white list if there is still space. | |
741 | */ | |
742 | list_for_each_entry(params, &hdev->pend_le_reports, action) { | |
743 | if (hci_bdaddr_list_lookup(&hdev->le_white_list, | |
744 | ¶ms->addr, params->addr_type)) | |
745 | continue; | |
746 | ||
747 | if (white_list_entries >= hdev->le_white_list_size) { | |
748 | /* Select filter policy to accept all advertising */ | |
749 | return 0x00; | |
750 | } | |
751 | ||
752 | if (hci_find_irk_by_addr(hdev, ¶ms->addr, | |
753 | params->addr_type)) { | |
754 | /* White list can not be used with RPAs */ | |
755 | return 0x00; | |
756 | } | |
757 | ||
758 | white_list_entries++; | |
759 | add_to_white_list(req, params); | |
760 | } | |
761 | ||
762 | /* Select filter policy to use white list */ | |
763 | return 0x01; | |
764 | } | |
765 | ||
766 | void hci_req_add_le_passive_scan(struct hci_request *req) | |
767 | { | |
768 | struct hci_cp_le_set_scan_param param_cp; | |
769 | struct hci_cp_le_set_scan_enable enable_cp; | |
770 | struct hci_dev *hdev = req->hdev; | |
771 | u8 own_addr_type; | |
772 | u8 filter_policy; | |
773 | ||
774 | /* Set require_privacy to false since no SCAN_REQ are send | |
775 | * during passive scanning. Not using an non-resolvable address | |
776 | * here is important so that peer devices using direct | |
777 | * advertising with our address will be correctly reported | |
778 | * by the controller. | |
779 | */ | |
780 | if (hci_update_random_address(req, false, &own_addr_type)) | |
781 | return; | |
782 | ||
783 | /* Adding or removing entries from the white list must | |
784 | * happen before enabling scanning. The controller does | |
785 | * not allow white list modification while scanning. | |
786 | */ | |
787 | filter_policy = update_white_list(req); | |
788 | ||
789 | /* When the controller is using random resolvable addresses and | |
790 | * with that having LE privacy enabled, then controllers with | |
791 | * Extended Scanner Filter Policies support can now enable support | |
792 | * for handling directed advertising. | |
793 | * | |
794 | * So instead of using filter polices 0x00 (no whitelist) | |
795 | * and 0x01 (whitelist enabled) use the new filter policies | |
796 | * 0x02 (no whitelist) and 0x03 (whitelist enabled). | |
797 | */ | |
d7a5a11d | 798 | if (hci_dev_test_flag(hdev, HCI_PRIVACY) && |
0857dd3b JH |
799 | (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) |
800 | filter_policy |= 0x02; | |
801 | ||
802 | memset(¶m_cp, 0, sizeof(param_cp)); | |
803 | param_cp.type = LE_SCAN_PASSIVE; | |
804 | param_cp.interval = cpu_to_le16(hdev->le_scan_interval); | |
805 | param_cp.window = cpu_to_le16(hdev->le_scan_window); | |
806 | param_cp.own_address_type = own_addr_type; | |
807 | param_cp.filter_policy = filter_policy; | |
808 | hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), | |
809 | ¶m_cp); | |
810 | ||
811 | memset(&enable_cp, 0, sizeof(enable_cp)); | |
812 | enable_cp.enable = LE_SCAN_ENABLE; | |
813 | enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; | |
814 | hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), | |
815 | &enable_cp); | |
816 | } | |
817 | ||
f2252570 JH |
818 | static u8 get_current_adv_instance(struct hci_dev *hdev) |
819 | { | |
820 | /* The "Set Advertising" setting supersedes the "Add Advertising" | |
821 | * setting. Here we set the advertising data based on which | |
822 | * setting was set. When neither apply, default to the global settings, | |
823 | * represented by instance "0". | |
824 | */ | |
825 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) && | |
826 | !hci_dev_test_flag(hdev, HCI_ADVERTISING)) | |
827 | return hdev->cur_adv_instance; | |
828 | ||
829 | return 0x00; | |
830 | } | |
831 | ||
832 | static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev) | |
833 | { | |
834 | u8 instance = get_current_adv_instance(hdev); | |
835 | struct adv_info *adv_instance; | |
836 | ||
837 | /* Ignore instance 0 */ | |
838 | if (instance == 0x00) | |
839 | return 0; | |
840 | ||
841 | adv_instance = hci_find_adv_instance(hdev, instance); | |
842 | if (!adv_instance) | |
843 | return 0; | |
844 | ||
845 | /* TODO: Take into account the "appearance" and "local-name" flags here. | |
846 | * These are currently being ignored as they are not supported. | |
847 | */ | |
848 | return adv_instance->scan_rsp_len; | |
849 | } | |
850 | ||
851 | void __hci_req_disable_advertising(struct hci_request *req) | |
852 | { | |
853 | u8 enable = 0x00; | |
854 | ||
855 | hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); | |
856 | } | |
857 | ||
858 | static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance) | |
859 | { | |
860 | u32 flags; | |
861 | struct adv_info *adv_instance; | |
862 | ||
863 | if (instance == 0x00) { | |
864 | /* Instance 0 always manages the "Tx Power" and "Flags" | |
865 | * fields | |
866 | */ | |
867 | flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; | |
868 | ||
869 | /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting | |
870 | * corresponds to the "connectable" instance flag. | |
871 | */ | |
872 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) | |
873 | flags |= MGMT_ADV_FLAG_CONNECTABLE; | |
874 | ||
875 | return flags; | |
876 | } | |
877 | ||
878 | adv_instance = hci_find_adv_instance(hdev, instance); | |
879 | ||
880 | /* Return 0 when we got an invalid instance identifier. */ | |
881 | if (!adv_instance) | |
882 | return 0; | |
883 | ||
884 | return adv_instance->flags; | |
885 | } | |
886 | ||
887 | void __hci_req_enable_advertising(struct hci_request *req) | |
888 | { | |
889 | struct hci_dev *hdev = req->hdev; | |
890 | struct hci_cp_le_set_adv_param cp; | |
891 | u8 own_addr_type, enable = 0x01; | |
892 | bool connectable; | |
893 | u8 instance; | |
894 | u32 flags; | |
895 | ||
896 | if (hci_conn_num(hdev, LE_LINK) > 0) | |
897 | return; | |
898 | ||
899 | if (hci_dev_test_flag(hdev, HCI_LE_ADV)) | |
900 | __hci_req_disable_advertising(req); | |
901 | ||
902 | /* Clear the HCI_LE_ADV bit temporarily so that the | |
903 | * hci_update_random_address knows that it's safe to go ahead | |
904 | * and write a new random address. The flag will be set back on | |
905 | * as soon as the SET_ADV_ENABLE HCI command completes. | |
906 | */ | |
907 | hci_dev_clear_flag(hdev, HCI_LE_ADV); | |
908 | ||
909 | instance = get_current_adv_instance(hdev); | |
910 | flags = get_adv_instance_flags(hdev, instance); | |
911 | ||
912 | /* If the "connectable" instance flag was not set, then choose between | |
913 | * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. | |
914 | */ | |
915 | connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || | |
916 | mgmt_get_connectable(hdev); | |
917 | ||
918 | /* Set require_privacy to true only when non-connectable | |
919 | * advertising is used. In that case it is fine to use a | |
920 | * non-resolvable private address. | |
921 | */ | |
922 | if (hci_update_random_address(req, !connectable, &own_addr_type) < 0) | |
923 | return; | |
924 | ||
925 | memset(&cp, 0, sizeof(cp)); | |
926 | cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval); | |
927 | cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval); | |
928 | ||
929 | if (connectable) | |
930 | cp.type = LE_ADV_IND; | |
931 | else if (get_cur_adv_instance_scan_rsp_len(hdev)) | |
932 | cp.type = LE_ADV_SCAN_IND; | |
933 | else | |
934 | cp.type = LE_ADV_NONCONN_IND; | |
935 | ||
936 | cp.own_address_type = own_addr_type; | |
937 | cp.channel_map = hdev->le_adv_channel_map; | |
938 | ||
939 | hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); | |
940 | ||
941 | hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); | |
942 | } | |
943 | ||
944 | static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr) | |
945 | { | |
946 | u8 ad_len = 0; | |
947 | size_t name_len; | |
948 | ||
949 | name_len = strlen(hdev->dev_name); | |
950 | if (name_len > 0) { | |
951 | size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2; | |
952 | ||
953 | if (name_len > max_len) { | |
954 | name_len = max_len; | |
955 | ptr[1] = EIR_NAME_SHORT; | |
956 | } else | |
957 | ptr[1] = EIR_NAME_COMPLETE; | |
958 | ||
959 | ptr[0] = name_len + 1; | |
960 | ||
961 | memcpy(ptr + 2, hdev->dev_name, name_len); | |
962 | ||
963 | ad_len += (name_len + 2); | |
964 | ptr += (name_len + 2); | |
965 | } | |
966 | ||
967 | return ad_len; | |
968 | } | |
969 | ||
970 | static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance, | |
971 | u8 *ptr) | |
972 | { | |
973 | struct adv_info *adv_instance; | |
974 | ||
975 | adv_instance = hci_find_adv_instance(hdev, instance); | |
976 | if (!adv_instance) | |
977 | return 0; | |
978 | ||
979 | /* TODO: Set the appropriate entries based on advertising instance flags | |
980 | * here once flags other than 0 are supported. | |
981 | */ | |
982 | memcpy(ptr, adv_instance->scan_rsp_data, | |
983 | adv_instance->scan_rsp_len); | |
984 | ||
985 | return adv_instance->scan_rsp_len; | |
986 | } | |
987 | ||
988 | static void update_inst_scan_rsp_data(struct hci_request *req, u8 instance) | |
989 | { | |
990 | struct hci_dev *hdev = req->hdev; | |
991 | struct hci_cp_le_set_scan_rsp_data cp; | |
992 | u8 len; | |
993 | ||
994 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) | |
995 | return; | |
996 | ||
997 | memset(&cp, 0, sizeof(cp)); | |
998 | ||
999 | if (instance) | |
1000 | len = create_instance_scan_rsp_data(hdev, instance, cp.data); | |
1001 | else | |
1002 | len = create_default_scan_rsp_data(hdev, cp.data); | |
1003 | ||
1004 | if (hdev->scan_rsp_data_len == len && | |
1005 | !memcmp(cp.data, hdev->scan_rsp_data, len)) | |
1006 | return; | |
1007 | ||
1008 | memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); | |
1009 | hdev->scan_rsp_data_len = len; | |
1010 | ||
1011 | cp.length = len; | |
1012 | ||
1013 | hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp); | |
1014 | } | |
1015 | ||
1016 | void __hci_req_update_scan_rsp_data(struct hci_request *req, int instance) | |
1017 | { | |
1018 | if (instance == HCI_ADV_CURRENT) | |
1019 | instance = get_current_adv_instance(req->hdev); | |
1020 | ||
1021 | update_inst_scan_rsp_data(req, get_current_adv_instance(req->hdev)); | |
1022 | } | |
1023 | ||
1024 | static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) | |
1025 | { | |
1026 | struct adv_info *adv_instance = NULL; | |
1027 | u8 ad_len = 0, flags = 0; | |
1028 | u32 instance_flags; | |
1029 | ||
1030 | /* Return 0 when the current instance identifier is invalid. */ | |
1031 | if (instance) { | |
1032 | adv_instance = hci_find_adv_instance(hdev, instance); | |
1033 | if (!adv_instance) | |
1034 | return 0; | |
1035 | } | |
1036 | ||
1037 | instance_flags = get_adv_instance_flags(hdev, instance); | |
1038 | ||
1039 | /* The Add Advertising command allows userspace to set both the general | |
1040 | * and limited discoverable flags. | |
1041 | */ | |
1042 | if (instance_flags & MGMT_ADV_FLAG_DISCOV) | |
1043 | flags |= LE_AD_GENERAL; | |
1044 | ||
1045 | if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV) | |
1046 | flags |= LE_AD_LIMITED; | |
1047 | ||
1048 | if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) { | |
1049 | /* If a discovery flag wasn't provided, simply use the global | |
1050 | * settings. | |
1051 | */ | |
1052 | if (!flags) | |
1053 | flags |= mgmt_get_adv_discov_flags(hdev); | |
1054 | ||
1055 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) | |
1056 | flags |= LE_AD_NO_BREDR; | |
1057 | ||
1058 | /* If flags would still be empty, then there is no need to | |
1059 | * include the "Flags" AD field". | |
1060 | */ | |
1061 | if (flags) { | |
1062 | ptr[0] = 0x02; | |
1063 | ptr[1] = EIR_FLAGS; | |
1064 | ptr[2] = flags; | |
1065 | ||
1066 | ad_len += 3; | |
1067 | ptr += 3; | |
1068 | } | |
1069 | } | |
1070 | ||
1071 | if (adv_instance) { | |
1072 | memcpy(ptr, adv_instance->adv_data, | |
1073 | adv_instance->adv_data_len); | |
1074 | ad_len += adv_instance->adv_data_len; | |
1075 | ptr += adv_instance->adv_data_len; | |
1076 | } | |
1077 | ||
1078 | /* Provide Tx Power only if we can provide a valid value for it */ | |
1079 | if (hdev->adv_tx_power != HCI_TX_POWER_INVALID && | |
1080 | (instance_flags & MGMT_ADV_FLAG_TX_POWER)) { | |
1081 | ptr[0] = 0x02; | |
1082 | ptr[1] = EIR_TX_POWER; | |
1083 | ptr[2] = (u8)hdev->adv_tx_power; | |
1084 | ||
1085 | ad_len += 3; | |
1086 | ptr += 3; | |
1087 | } | |
1088 | ||
1089 | return ad_len; | |
1090 | } | |
1091 | ||
1092 | static void update_inst_adv_data(struct hci_request *req, u8 instance) | |
1093 | { | |
1094 | struct hci_dev *hdev = req->hdev; | |
1095 | struct hci_cp_le_set_adv_data cp; | |
1096 | u8 len; | |
1097 | ||
1098 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) | |
1099 | return; | |
1100 | ||
1101 | memset(&cp, 0, sizeof(cp)); | |
1102 | ||
1103 | len = create_instance_adv_data(hdev, instance, cp.data); | |
1104 | ||
1105 | /* There's nothing to do if the data hasn't changed */ | |
1106 | if (hdev->adv_data_len == len && | |
1107 | memcmp(cp.data, hdev->adv_data, len) == 0) | |
1108 | return; | |
1109 | ||
1110 | memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); | |
1111 | hdev->adv_data_len = len; | |
1112 | ||
1113 | cp.length = len; | |
1114 | ||
1115 | hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); | |
1116 | } | |
1117 | ||
1118 | void __hci_req_update_adv_data(struct hci_request *req, int instance) | |
1119 | { | |
1120 | if (instance == HCI_ADV_CURRENT) | |
1121 | instance = get_current_adv_instance(req->hdev); | |
1122 | ||
1123 | update_inst_adv_data(req, instance); | |
1124 | } | |
1125 | ||
1126 | int hci_req_update_adv_data(struct hci_dev *hdev, int instance) | |
1127 | { | |
1128 | struct hci_request req; | |
1129 | ||
1130 | hci_req_init(&req, hdev); | |
1131 | __hci_req_update_adv_data(&req, instance); | |
1132 | ||
1133 | return hci_req_run(&req, NULL); | |
1134 | } | |
1135 | ||
1136 | static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) | |
1137 | { | |
1138 | BT_DBG("%s status %u", hdev->name, status); | |
1139 | } | |
1140 | ||
1141 | void hci_req_reenable_advertising(struct hci_dev *hdev) | |
1142 | { | |
1143 | struct hci_request req; | |
1144 | u8 instance; | |
1145 | ||
1146 | if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && | |
1147 | !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) | |
1148 | return; | |
1149 | ||
1150 | instance = get_current_adv_instance(hdev); | |
1151 | ||
1152 | hci_req_init(&req, hdev); | |
1153 | ||
1154 | if (instance) { | |
1155 | __hci_req_schedule_adv_instance(&req, instance, true); | |
1156 | } else { | |
1157 | __hci_req_update_adv_data(&req, HCI_ADV_CURRENT); | |
1158 | __hci_req_update_scan_rsp_data(&req, HCI_ADV_CURRENT); | |
1159 | __hci_req_enable_advertising(&req); | |
1160 | } | |
1161 | ||
1162 | hci_req_run(&req, adv_enable_complete); | |
1163 | } | |
1164 | ||
1165 | static void adv_timeout_expire(struct work_struct *work) | |
1166 | { | |
1167 | struct hci_dev *hdev = container_of(work, struct hci_dev, | |
1168 | adv_instance_expire.work); | |
1169 | ||
1170 | struct hci_request req; | |
1171 | u8 instance; | |
1172 | ||
1173 | BT_DBG("%s", hdev->name); | |
1174 | ||
1175 | hci_dev_lock(hdev); | |
1176 | ||
1177 | hdev->adv_instance_timeout = 0; | |
1178 | ||
1179 | instance = get_current_adv_instance(hdev); | |
1180 | if (instance == 0x00) | |
1181 | goto unlock; | |
1182 | ||
1183 | hci_req_init(&req, hdev); | |
1184 | ||
1185 | hci_req_clear_adv_instance(hdev, &req, instance, false); | |
1186 | ||
1187 | if (list_empty(&hdev->adv_instances)) | |
1188 | __hci_req_disable_advertising(&req); | |
1189 | ||
1190 | if (!skb_queue_empty(&req.cmd_q)) | |
1191 | hci_req_run(&req, NULL); | |
1192 | ||
1193 | unlock: | |
1194 | hci_dev_unlock(hdev); | |
1195 | } | |
1196 | ||
1197 | int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance, | |
1198 | bool force) | |
1199 | { | |
1200 | struct hci_dev *hdev = req->hdev; | |
1201 | struct adv_info *adv_instance = NULL; | |
1202 | u16 timeout; | |
1203 | ||
1204 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || | |
1205 | !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) | |
1206 | return -EPERM; | |
1207 | ||
1208 | if (hdev->adv_instance_timeout) | |
1209 | return -EBUSY; | |
1210 | ||
1211 | adv_instance = hci_find_adv_instance(hdev, instance); | |
1212 | if (!adv_instance) | |
1213 | return -ENOENT; | |
1214 | ||
1215 | /* A zero timeout means unlimited advertising. As long as there is | |
1216 | * only one instance, duration should be ignored. We still set a timeout | |
1217 | * in case further instances are being added later on. | |
1218 | * | |
1219 | * If the remaining lifetime of the instance is more than the duration | |
1220 | * then the timeout corresponds to the duration, otherwise it will be | |
1221 | * reduced to the remaining instance lifetime. | |
1222 | */ | |
1223 | if (adv_instance->timeout == 0 || | |
1224 | adv_instance->duration <= adv_instance->remaining_time) | |
1225 | timeout = adv_instance->duration; | |
1226 | else | |
1227 | timeout = adv_instance->remaining_time; | |
1228 | ||
1229 | /* The remaining time is being reduced unless the instance is being | |
1230 | * advertised without time limit. | |
1231 | */ | |
1232 | if (adv_instance->timeout) | |
1233 | adv_instance->remaining_time = | |
1234 | adv_instance->remaining_time - timeout; | |
1235 | ||
1236 | hdev->adv_instance_timeout = timeout; | |
1237 | queue_delayed_work(hdev->req_workqueue, | |
1238 | &hdev->adv_instance_expire, | |
1239 | msecs_to_jiffies(timeout * 1000)); | |
1240 | ||
1241 | /* If we're just re-scheduling the same instance again then do not | |
1242 | * execute any HCI commands. This happens when a single instance is | |
1243 | * being advertised. | |
1244 | */ | |
1245 | if (!force && hdev->cur_adv_instance == instance && | |
1246 | hci_dev_test_flag(hdev, HCI_LE_ADV)) | |
1247 | return 0; | |
1248 | ||
1249 | hdev->cur_adv_instance = instance; | |
1250 | __hci_req_update_adv_data(req, HCI_ADV_CURRENT); | |
1251 | __hci_req_update_scan_rsp_data(req, HCI_ADV_CURRENT); | |
1252 | __hci_req_enable_advertising(req); | |
1253 | ||
1254 | return 0; | |
1255 | } | |
1256 | ||
1257 | static void cancel_adv_timeout(struct hci_dev *hdev) | |
1258 | { | |
1259 | if (hdev->adv_instance_timeout) { | |
1260 | hdev->adv_instance_timeout = 0; | |
1261 | cancel_delayed_work(&hdev->adv_instance_expire); | |
1262 | } | |
1263 | } | |
1264 | ||
1265 | /* For a single instance: | |
1266 | * - force == true: The instance will be removed even when its remaining | |
1267 | * lifetime is not zero. | |
1268 | * - force == false: the instance will be deactivated but kept stored unless | |
1269 | * the remaining lifetime is zero. | |
1270 | * | |
1271 | * For instance == 0x00: | |
1272 | * - force == true: All instances will be removed regardless of their timeout | |
1273 | * setting. | |
1274 | * - force == false: Only instances that have a timeout will be removed. | |
1275 | */ | |
1276 | void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req, | |
1277 | u8 instance, bool force) | |
1278 | { | |
1279 | struct adv_info *adv_instance, *n, *next_instance = NULL; | |
1280 | int err; | |
1281 | u8 rem_inst; | |
1282 | ||
1283 | /* Cancel any timeout concerning the removed instance(s). */ | |
1284 | if (!instance || hdev->cur_adv_instance == instance) | |
1285 | cancel_adv_timeout(hdev); | |
1286 | ||
1287 | /* Get the next instance to advertise BEFORE we remove | |
1288 | * the current one. This can be the same instance again | |
1289 | * if there is only one instance. | |
1290 | */ | |
1291 | if (instance && hdev->cur_adv_instance == instance) | |
1292 | next_instance = hci_get_next_instance(hdev, instance); | |
1293 | ||
1294 | if (instance == 0x00) { | |
1295 | list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, | |
1296 | list) { | |
1297 | if (!(force || adv_instance->timeout)) | |
1298 | continue; | |
1299 | ||
1300 | rem_inst = adv_instance->instance; | |
1301 | err = hci_remove_adv_instance(hdev, rem_inst); | |
1302 | if (!err) | |
1303 | mgmt_advertising_removed(NULL, hdev, rem_inst); | |
1304 | } | |
1305 | hdev->cur_adv_instance = 0x00; | |
1306 | } else { | |
1307 | adv_instance = hci_find_adv_instance(hdev, instance); | |
1308 | ||
1309 | if (force || (adv_instance && adv_instance->timeout && | |
1310 | !adv_instance->remaining_time)) { | |
1311 | /* Don't advertise a removed instance. */ | |
1312 | if (next_instance && | |
1313 | next_instance->instance == instance) | |
1314 | next_instance = NULL; | |
1315 | ||
1316 | err = hci_remove_adv_instance(hdev, instance); | |
1317 | if (!err) | |
1318 | mgmt_advertising_removed(NULL, hdev, instance); | |
1319 | } | |
1320 | } | |
1321 | ||
1322 | if (list_empty(&hdev->adv_instances)) { | |
1323 | hdev->cur_adv_instance = 0x00; | |
1324 | hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE); | |
1325 | } | |
1326 | ||
1327 | if (!req || !hdev_is_powered(hdev) || | |
1328 | hci_dev_test_flag(hdev, HCI_ADVERTISING)) | |
1329 | return; | |
1330 | ||
1331 | if (next_instance) | |
1332 | __hci_req_schedule_adv_instance(req, next_instance->instance, | |
1333 | false); | |
1334 | } | |
1335 | ||
0857dd3b JH |
1336 | static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) |
1337 | { | |
1338 | struct hci_dev *hdev = req->hdev; | |
1339 | ||
1340 | /* If we're advertising or initiating an LE connection we can't | |
1341 | * go ahead and change the random address at this time. This is | |
1342 | * because the eventual initiator address used for the | |
1343 | * subsequently created connection will be undefined (some | |
1344 | * controllers use the new address and others the one we had | |
1345 | * when the operation started). | |
1346 | * | |
1347 | * In this kind of scenario skip the update and let the random | |
1348 | * address be updated at the next cycle. | |
1349 | */ | |
d7a5a11d | 1350 | if (hci_dev_test_flag(hdev, HCI_LE_ADV) || |
e7d9ab73 | 1351 | hci_lookup_le_connect(hdev)) { |
0857dd3b | 1352 | BT_DBG("Deferring random address update"); |
a1536da2 | 1353 | hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); |
0857dd3b JH |
1354 | return; |
1355 | } | |
1356 | ||
1357 | hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa); | |
1358 | } | |
1359 | ||
1360 | int hci_update_random_address(struct hci_request *req, bool require_privacy, | |
1361 | u8 *own_addr_type) | |
1362 | { | |
1363 | struct hci_dev *hdev = req->hdev; | |
1364 | int err; | |
1365 | ||
1366 | /* If privacy is enabled use a resolvable private address. If | |
1367 | * current RPA has expired or there is something else than | |
1368 | * the current RPA in use, then generate a new one. | |
1369 | */ | |
d7a5a11d | 1370 | if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { |
0857dd3b JH |
1371 | int to; |
1372 | ||
1373 | *own_addr_type = ADDR_LE_DEV_RANDOM; | |
1374 | ||
a69d8927 | 1375 | if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) && |
0857dd3b JH |
1376 | !bacmp(&hdev->random_addr, &hdev->rpa)) |
1377 | return 0; | |
1378 | ||
1379 | err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); | |
1380 | if (err < 0) { | |
1381 | BT_ERR("%s failed to generate new RPA", hdev->name); | |
1382 | return err; | |
1383 | } | |
1384 | ||
1385 | set_random_addr(req, &hdev->rpa); | |
1386 | ||
1387 | to = msecs_to_jiffies(hdev->rpa_timeout * 1000); | |
1388 | queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to); | |
1389 | ||
1390 | return 0; | |
1391 | } | |
1392 | ||
1393 | /* In case of required privacy without resolvable private address, | |
1394 | * use an non-resolvable private address. This is useful for active | |
1395 | * scanning and non-connectable advertising. | |
1396 | */ | |
1397 | if (require_privacy) { | |
1398 | bdaddr_t nrpa; | |
1399 | ||
1400 | while (true) { | |
1401 | /* The non-resolvable private address is generated | |
1402 | * from random six bytes with the two most significant | |
1403 | * bits cleared. | |
1404 | */ | |
1405 | get_random_bytes(&nrpa, 6); | |
1406 | nrpa.b[5] &= 0x3f; | |
1407 | ||
1408 | /* The non-resolvable private address shall not be | |
1409 | * equal to the public address. | |
1410 | */ | |
1411 | if (bacmp(&hdev->bdaddr, &nrpa)) | |
1412 | break; | |
1413 | } | |
1414 | ||
1415 | *own_addr_type = ADDR_LE_DEV_RANDOM; | |
1416 | set_random_addr(req, &nrpa); | |
1417 | return 0; | |
1418 | } | |
1419 | ||
1420 | /* If forcing static address is in use or there is no public | |
1421 | * address use the static address as random address (but skip | |
1422 | * the HCI command if the current random address is already the | |
1423 | * static one. | |
50b5b952 MH |
1424 | * |
1425 | * In case BR/EDR has been disabled on a dual-mode controller | |
1426 | * and a static address has been configured, then use that | |
1427 | * address instead of the public BR/EDR address. | |
0857dd3b | 1428 | */ |
b7cb93e5 | 1429 | if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || |
50b5b952 | 1430 | !bacmp(&hdev->bdaddr, BDADDR_ANY) || |
d7a5a11d | 1431 | (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && |
50b5b952 | 1432 | bacmp(&hdev->static_addr, BDADDR_ANY))) { |
0857dd3b JH |
1433 | *own_addr_type = ADDR_LE_DEV_RANDOM; |
1434 | if (bacmp(&hdev->static_addr, &hdev->random_addr)) | |
1435 | hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, | |
1436 | &hdev->static_addr); | |
1437 | return 0; | |
1438 | } | |
1439 | ||
1440 | /* Neither privacy nor static address is being used so use a | |
1441 | * public address. | |
1442 | */ | |
1443 | *own_addr_type = ADDR_LE_DEV_PUBLIC; | |
1444 | ||
1445 | return 0; | |
1446 | } | |
2cf22218 | 1447 | |
405a2611 JH |
1448 | static bool disconnected_whitelist_entries(struct hci_dev *hdev) |
1449 | { | |
1450 | struct bdaddr_list *b; | |
1451 | ||
1452 | list_for_each_entry(b, &hdev->whitelist, list) { | |
1453 | struct hci_conn *conn; | |
1454 | ||
1455 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr); | |
1456 | if (!conn) | |
1457 | return true; | |
1458 | ||
1459 | if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) | |
1460 | return true; | |
1461 | } | |
1462 | ||
1463 | return false; | |
1464 | } | |
1465 | ||
01b1cb87 | 1466 | void __hci_req_update_scan(struct hci_request *req) |
405a2611 JH |
1467 | { |
1468 | struct hci_dev *hdev = req->hdev; | |
1469 | u8 scan; | |
1470 | ||
d7a5a11d | 1471 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) |
405a2611 JH |
1472 | return; |
1473 | ||
1474 | if (!hdev_is_powered(hdev)) | |
1475 | return; | |
1476 | ||
1477 | if (mgmt_powering_down(hdev)) | |
1478 | return; | |
1479 | ||
d7a5a11d | 1480 | if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) || |
405a2611 JH |
1481 | disconnected_whitelist_entries(hdev)) |
1482 | scan = SCAN_PAGE; | |
1483 | else | |
1484 | scan = SCAN_DISABLED; | |
1485 | ||
d7a5a11d | 1486 | if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) |
405a2611 JH |
1487 | scan |= SCAN_INQUIRY; |
1488 | ||
01b1cb87 JH |
1489 | if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && |
1490 | test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) | |
1491 | return; | |
1492 | ||
405a2611 JH |
1493 | hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); |
1494 | } | |
1495 | ||
01b1cb87 | 1496 | static int update_scan(struct hci_request *req, unsigned long opt) |
405a2611 | 1497 | { |
01b1cb87 JH |
1498 | hci_dev_lock(req->hdev); |
1499 | __hci_req_update_scan(req); | |
1500 | hci_dev_unlock(req->hdev); | |
1501 | return 0; | |
1502 | } | |
405a2611 | 1503 | |
01b1cb87 JH |
1504 | static void scan_update_work(struct work_struct *work) |
1505 | { | |
1506 | struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update); | |
1507 | ||
1508 | hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL); | |
405a2611 JH |
1509 | } |
1510 | ||
53c0ba74 JH |
1511 | static int connectable_update(struct hci_request *req, unsigned long opt) |
1512 | { | |
1513 | struct hci_dev *hdev = req->hdev; | |
1514 | ||
1515 | hci_dev_lock(hdev); | |
1516 | ||
1517 | __hci_req_update_scan(req); | |
1518 | ||
1519 | /* If BR/EDR is not enabled and we disable advertising as a | |
1520 | * by-product of disabling connectable, we need to update the | |
1521 | * advertising flags. | |
1522 | */ | |
1523 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) | |
1524 | __hci_req_update_adv_data(req, HCI_ADV_CURRENT); | |
1525 | ||
1526 | /* Update the advertising parameters if necessary */ | |
1527 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || | |
1528 | hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) | |
1529 | __hci_req_enable_advertising(req); | |
1530 | ||
1531 | __hci_update_background_scan(req); | |
1532 | ||
1533 | hci_dev_unlock(hdev); | |
1534 | ||
1535 | return 0; | |
1536 | } | |
1537 | ||
1538 | static void connectable_update_work(struct work_struct *work) | |
1539 | { | |
1540 | struct hci_dev *hdev = container_of(work, struct hci_dev, | |
1541 | connectable_update); | |
1542 | u8 status; | |
1543 | ||
1544 | hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status); | |
1545 | mgmt_set_connectable_complete(hdev, status); | |
1546 | } | |
1547 | ||
14bf5eac JH |
1548 | static u8 get_service_classes(struct hci_dev *hdev) |
1549 | { | |
1550 | struct bt_uuid *uuid; | |
1551 | u8 val = 0; | |
1552 | ||
1553 | list_for_each_entry(uuid, &hdev->uuids, list) | |
1554 | val |= uuid->svc_hint; | |
1555 | ||
1556 | return val; | |
1557 | } | |
1558 | ||
1559 | void __hci_req_update_class(struct hci_request *req) | |
1560 | { | |
1561 | struct hci_dev *hdev = req->hdev; | |
1562 | u8 cod[3]; | |
1563 | ||
1564 | BT_DBG("%s", hdev->name); | |
1565 | ||
1566 | if (!hdev_is_powered(hdev)) | |
1567 | return; | |
1568 | ||
1569 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) | |
1570 | return; | |
1571 | ||
1572 | if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) | |
1573 | return; | |
1574 | ||
1575 | cod[0] = hdev->minor_class; | |
1576 | cod[1] = hdev->major_class; | |
1577 | cod[2] = get_service_classes(hdev); | |
1578 | ||
1579 | if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) | |
1580 | cod[1] |= 0x20; | |
1581 | ||
1582 | if (memcmp(cod, hdev->dev_class, 3) == 0) | |
1583 | return; | |
1584 | ||
1585 | hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); | |
1586 | } | |
1587 | ||
aed1a885 JH |
1588 | static void write_iac(struct hci_request *req) |
1589 | { | |
1590 | struct hci_dev *hdev = req->hdev; | |
1591 | struct hci_cp_write_current_iac_lap cp; | |
1592 | ||
1593 | if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) | |
1594 | return; | |
1595 | ||
1596 | if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { | |
1597 | /* Limited discoverable mode */ | |
1598 | cp.num_iac = min_t(u8, hdev->num_iac, 2); | |
1599 | cp.iac_lap[0] = 0x00; /* LIAC */ | |
1600 | cp.iac_lap[1] = 0x8b; | |
1601 | cp.iac_lap[2] = 0x9e; | |
1602 | cp.iac_lap[3] = 0x33; /* GIAC */ | |
1603 | cp.iac_lap[4] = 0x8b; | |
1604 | cp.iac_lap[5] = 0x9e; | |
1605 | } else { | |
1606 | /* General discoverable mode */ | |
1607 | cp.num_iac = 1; | |
1608 | cp.iac_lap[0] = 0x33; /* GIAC */ | |
1609 | cp.iac_lap[1] = 0x8b; | |
1610 | cp.iac_lap[2] = 0x9e; | |
1611 | } | |
1612 | ||
1613 | hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP, | |
1614 | (cp.num_iac * 3) + 1, &cp); | |
1615 | } | |
1616 | ||
1617 | static int discoverable_update(struct hci_request *req, unsigned long opt) | |
1618 | { | |
1619 | struct hci_dev *hdev = req->hdev; | |
1620 | ||
1621 | hci_dev_lock(hdev); | |
1622 | ||
1623 | if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { | |
1624 | write_iac(req); | |
1625 | __hci_req_update_scan(req); | |
1626 | __hci_req_update_class(req); | |
1627 | } | |
1628 | ||
1629 | /* Advertising instances don't use the global discoverable setting, so | |
1630 | * only update AD if advertising was enabled using Set Advertising. | |
1631 | */ | |
1632 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) | |
1633 | __hci_req_update_adv_data(req, HCI_ADV_CURRENT); | |
1634 | ||
1635 | hci_dev_unlock(hdev); | |
1636 | ||
1637 | return 0; | |
1638 | } | |
1639 | ||
1640 | static void discoverable_update_work(struct work_struct *work) | |
1641 | { | |
1642 | struct hci_dev *hdev = container_of(work, struct hci_dev, | |
1643 | discoverable_update); | |
1644 | u8 status; | |
1645 | ||
1646 | hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status); | |
1647 | mgmt_set_discoverable_complete(hdev, status); | |
1648 | } | |
1649 | ||
dcc0f0d9 JH |
1650 | void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn, |
1651 | u8 reason) | |
1652 | { | |
1653 | switch (conn->state) { | |
1654 | case BT_CONNECTED: | |
1655 | case BT_CONFIG: | |
1656 | if (conn->type == AMP_LINK) { | |
1657 | struct hci_cp_disconn_phy_link cp; | |
1658 | ||
1659 | cp.phy_handle = HCI_PHY_HANDLE(conn->handle); | |
1660 | cp.reason = reason; | |
1661 | hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp), | |
1662 | &cp); | |
1663 | } else { | |
1664 | struct hci_cp_disconnect dc; | |
1665 | ||
1666 | dc.handle = cpu_to_le16(conn->handle); | |
1667 | dc.reason = reason; | |
1668 | hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc); | |
1669 | } | |
1670 | ||
1671 | conn->state = BT_DISCONN; | |
1672 | ||
1673 | break; | |
1674 | case BT_CONNECT: | |
1675 | if (conn->type == LE_LINK) { | |
1676 | if (test_bit(HCI_CONN_SCANNING, &conn->flags)) | |
1677 | break; | |
1678 | hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL, | |
1679 | 0, NULL); | |
1680 | } else if (conn->type == ACL_LINK) { | |
1681 | if (req->hdev->hci_ver < BLUETOOTH_VER_1_2) | |
1682 | break; | |
1683 | hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL, | |
1684 | 6, &conn->dst); | |
1685 | } | |
1686 | break; | |
1687 | case BT_CONNECT2: | |
1688 | if (conn->type == ACL_LINK) { | |
1689 | struct hci_cp_reject_conn_req rej; | |
1690 | ||
1691 | bacpy(&rej.bdaddr, &conn->dst); | |
1692 | rej.reason = reason; | |
1693 | ||
1694 | hci_req_add(req, HCI_OP_REJECT_CONN_REQ, | |
1695 | sizeof(rej), &rej); | |
1696 | } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) { | |
1697 | struct hci_cp_reject_sync_conn_req rej; | |
1698 | ||
1699 | bacpy(&rej.bdaddr, &conn->dst); | |
1700 | ||
1701 | /* SCO rejection has its own limited set of | |
1702 | * allowed error values (0x0D-0x0F) which isn't | |
1703 | * compatible with most values passed to this | |
1704 | * function. To be safe hard-code one of the | |
1705 | * values that's suitable for SCO. | |
1706 | */ | |
1707 | rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES; | |
1708 | ||
1709 | hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ, | |
1710 | sizeof(rej), &rej); | |
1711 | } | |
1712 | break; | |
1713 | default: | |
1714 | conn->state = BT_CLOSED; | |
1715 | break; | |
1716 | } | |
1717 | } | |
1718 | ||
1719 | static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode) | |
1720 | { | |
1721 | if (status) | |
1722 | BT_DBG("Failed to abort connection: status 0x%2.2x", status); | |
1723 | } | |
1724 | ||
1725 | int hci_abort_conn(struct hci_conn *conn, u8 reason) | |
1726 | { | |
1727 | struct hci_request req; | |
1728 | int err; | |
1729 | ||
1730 | hci_req_init(&req, conn->hdev); | |
1731 | ||
1732 | __hci_abort_conn(&req, conn, reason); | |
1733 | ||
1734 | err = hci_req_run(&req, abort_conn_complete); | |
1735 | if (err && err != -ENODATA) { | |
1736 | BT_ERR("Failed to run HCI request: err %d", err); | |
1737 | return err; | |
1738 | } | |
1739 | ||
1740 | return 0; | |
1741 | } | |
5fc16cc4 | 1742 | |
a1d01db1 | 1743 | static int update_bg_scan(struct hci_request *req, unsigned long opt) |
2e93e53b JH |
1744 | { |
1745 | hci_dev_lock(req->hdev); | |
1746 | __hci_update_background_scan(req); | |
1747 | hci_dev_unlock(req->hdev); | |
a1d01db1 | 1748 | return 0; |
2e93e53b JH |
1749 | } |
1750 | ||
1751 | static void bg_scan_update(struct work_struct *work) | |
1752 | { | |
1753 | struct hci_dev *hdev = container_of(work, struct hci_dev, | |
1754 | bg_scan_update); | |
84235d22 JH |
1755 | struct hci_conn *conn; |
1756 | u8 status; | |
1757 | int err; | |
1758 | ||
1759 | err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status); | |
1760 | if (!err) | |
1761 | return; | |
1762 | ||
1763 | hci_dev_lock(hdev); | |
1764 | ||
1765 | conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); | |
1766 | if (conn) | |
1767 | hci_le_conn_failed(conn, status); | |
2e93e53b | 1768 | |
84235d22 | 1769 | hci_dev_unlock(hdev); |
2e93e53b JH |
1770 | } |
1771 | ||
f4a2cb4d | 1772 | static int le_scan_disable(struct hci_request *req, unsigned long opt) |
7c1fbed2 | 1773 | { |
f4a2cb4d JH |
1774 | hci_req_add_le_scan_disable(req); |
1775 | return 0; | |
7c1fbed2 JH |
1776 | } |
1777 | ||
f4a2cb4d | 1778 | static int bredr_inquiry(struct hci_request *req, unsigned long opt) |
7c1fbed2 | 1779 | { |
f4a2cb4d | 1780 | u8 length = opt; |
7c1fbed2 JH |
1781 | /* General inquiry access code (GIAC) */ |
1782 | u8 lap[3] = { 0x33, 0x8b, 0x9e }; | |
1783 | struct hci_cp_inquiry cp; | |
7c1fbed2 | 1784 | |
f4a2cb4d | 1785 | BT_DBG("%s", req->hdev->name); |
7c1fbed2 | 1786 | |
f4a2cb4d JH |
1787 | hci_dev_lock(req->hdev); |
1788 | hci_inquiry_cache_flush(req->hdev); | |
1789 | hci_dev_unlock(req->hdev); | |
7c1fbed2 | 1790 | |
f4a2cb4d JH |
1791 | memset(&cp, 0, sizeof(cp)); |
1792 | memcpy(&cp.lap, lap, sizeof(cp.lap)); | |
1793 | cp.length = length; | |
7c1fbed2 | 1794 | |
f4a2cb4d | 1795 | hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); |
7c1fbed2 | 1796 | |
a1d01db1 | 1797 | return 0; |
7c1fbed2 JH |
1798 | } |
1799 | ||
1800 | static void le_scan_disable_work(struct work_struct *work) | |
1801 | { | |
1802 | struct hci_dev *hdev = container_of(work, struct hci_dev, | |
1803 | le_scan_disable.work); | |
1804 | u8 status; | |
7c1fbed2 JH |
1805 | |
1806 | BT_DBG("%s", hdev->name); | |
1807 | ||
f4a2cb4d JH |
1808 | if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) |
1809 | return; | |
1810 | ||
7c1fbed2 JH |
1811 | cancel_delayed_work(&hdev->le_scan_restart); |
1812 | ||
f4a2cb4d JH |
1813 | hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status); |
1814 | if (status) { | |
1815 | BT_ERR("Failed to disable LE scan: status 0x%02x", status); | |
1816 | return; | |
1817 | } | |
1818 | ||
1819 | hdev->discovery.scan_start = 0; | |
1820 | ||
1821 | /* If we were running LE only scan, change discovery state. If | |
1822 | * we were running both LE and BR/EDR inquiry simultaneously, | |
1823 | * and BR/EDR inquiry is already finished, stop discovery, | |
1824 | * otherwise BR/EDR inquiry will stop discovery when finished. | |
1825 | * If we will resolve remote device name, do not change | |
1826 | * discovery state. | |
1827 | */ | |
1828 | ||
1829 | if (hdev->discovery.type == DISCOV_TYPE_LE) | |
1830 | goto discov_stopped; | |
1831 | ||
1832 | if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) | |
7c1fbed2 JH |
1833 | return; |
1834 | ||
f4a2cb4d JH |
1835 | if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { |
1836 | if (!test_bit(HCI_INQUIRY, &hdev->flags) && | |
1837 | hdev->discovery.state != DISCOVERY_RESOLVING) | |
1838 | goto discov_stopped; | |
1839 | ||
1840 | return; | |
1841 | } | |
1842 | ||
1843 | hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN, | |
1844 | HCI_CMD_TIMEOUT, &status); | |
1845 | if (status) { | |
1846 | BT_ERR("Inquiry failed: status 0x%02x", status); | |
1847 | goto discov_stopped; | |
1848 | } | |
1849 | ||
1850 | return; | |
1851 | ||
1852 | discov_stopped: | |
1853 | hci_dev_lock(hdev); | |
1854 | hci_discovery_set_state(hdev, DISCOVERY_STOPPED); | |
1855 | hci_dev_unlock(hdev); | |
7c1fbed2 JH |
1856 | } |
1857 | ||
3dfe5905 JH |
1858 | static int le_scan_restart(struct hci_request *req, unsigned long opt) |
1859 | { | |
1860 | struct hci_dev *hdev = req->hdev; | |
1861 | struct hci_cp_le_set_scan_enable cp; | |
1862 | ||
1863 | /* If controller is not scanning we are done. */ | |
1864 | if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) | |
1865 | return 0; | |
1866 | ||
1867 | hci_req_add_le_scan_disable(req); | |
1868 | ||
1869 | memset(&cp, 0, sizeof(cp)); | |
1870 | cp.enable = LE_SCAN_ENABLE; | |
1871 | cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; | |
1872 | hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); | |
1873 | ||
1874 | return 0; | |
1875 | } | |
1876 | ||
1877 | static void le_scan_restart_work(struct work_struct *work) | |
7c1fbed2 | 1878 | { |
3dfe5905 JH |
1879 | struct hci_dev *hdev = container_of(work, struct hci_dev, |
1880 | le_scan_restart.work); | |
7c1fbed2 | 1881 | unsigned long timeout, duration, scan_start, now; |
3dfe5905 | 1882 | u8 status; |
7c1fbed2 JH |
1883 | |
1884 | BT_DBG("%s", hdev->name); | |
1885 | ||
3dfe5905 | 1886 | hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status); |
7c1fbed2 JH |
1887 | if (status) { |
1888 | BT_ERR("Failed to restart LE scan: status %d", status); | |
1889 | return; | |
1890 | } | |
1891 | ||
1892 | hci_dev_lock(hdev); | |
1893 | ||
1894 | if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) || | |
1895 | !hdev->discovery.scan_start) | |
1896 | goto unlock; | |
1897 | ||
1898 | /* When the scan was started, hdev->le_scan_disable has been queued | |
1899 | * after duration from scan_start. During scan restart this job | |
1900 | * has been canceled, and we need to queue it again after proper | |
1901 | * timeout, to make sure that scan does not run indefinitely. | |
1902 | */ | |
1903 | duration = hdev->discovery.scan_duration; | |
1904 | scan_start = hdev->discovery.scan_start; | |
1905 | now = jiffies; | |
1906 | if (now - scan_start <= duration) { | |
1907 | int elapsed; | |
1908 | ||
1909 | if (now >= scan_start) | |
1910 | elapsed = now - scan_start; | |
1911 | else | |
1912 | elapsed = ULONG_MAX - scan_start + now; | |
1913 | ||
1914 | timeout = duration - elapsed; | |
1915 | } else { | |
1916 | timeout = 0; | |
1917 | } | |
1918 | ||
1919 | queue_delayed_work(hdev->req_workqueue, | |
1920 | &hdev->le_scan_disable, timeout); | |
1921 | ||
1922 | unlock: | |
1923 | hci_dev_unlock(hdev); | |
1924 | } | |
1925 | ||
e68f072b JH |
1926 | static void disable_advertising(struct hci_request *req) |
1927 | { | |
1928 | u8 enable = 0x00; | |
1929 | ||
1930 | hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); | |
1931 | } | |
1932 | ||
1933 | static int active_scan(struct hci_request *req, unsigned long opt) | |
1934 | { | |
1935 | uint16_t interval = opt; | |
1936 | struct hci_dev *hdev = req->hdev; | |
1937 | struct hci_cp_le_set_scan_param param_cp; | |
1938 | struct hci_cp_le_set_scan_enable enable_cp; | |
1939 | u8 own_addr_type; | |
1940 | int err; | |
1941 | ||
1942 | BT_DBG("%s", hdev->name); | |
1943 | ||
1944 | if (hci_dev_test_flag(hdev, HCI_LE_ADV)) { | |
1945 | hci_dev_lock(hdev); | |
1946 | ||
1947 | /* Don't let discovery abort an outgoing connection attempt | |
1948 | * that's using directed advertising. | |
1949 | */ | |
1950 | if (hci_lookup_le_connect(hdev)) { | |
1951 | hci_dev_unlock(hdev); | |
1952 | return -EBUSY; | |
1953 | } | |
1954 | ||
1955 | cancel_adv_timeout(hdev); | |
1956 | hci_dev_unlock(hdev); | |
1957 | ||
1958 | disable_advertising(req); | |
1959 | } | |
1960 | ||
1961 | /* If controller is scanning, it means the background scanning is | |
1962 | * running. Thus, we should temporarily stop it in order to set the | |
1963 | * discovery scanning parameters. | |
1964 | */ | |
1965 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) | |
1966 | hci_req_add_le_scan_disable(req); | |
1967 | ||
1968 | /* All active scans will be done with either a resolvable private | |
1969 | * address (when privacy feature has been enabled) or non-resolvable | |
1970 | * private address. | |
1971 | */ | |
1972 | err = hci_update_random_address(req, true, &own_addr_type); | |
1973 | if (err < 0) | |
1974 | own_addr_type = ADDR_LE_DEV_PUBLIC; | |
1975 | ||
1976 | memset(¶m_cp, 0, sizeof(param_cp)); | |
1977 | param_cp.type = LE_SCAN_ACTIVE; | |
1978 | param_cp.interval = cpu_to_le16(interval); | |
1979 | param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN); | |
1980 | param_cp.own_address_type = own_addr_type; | |
1981 | ||
1982 | hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), | |
1983 | ¶m_cp); | |
1984 | ||
1985 | memset(&enable_cp, 0, sizeof(enable_cp)); | |
1986 | enable_cp.enable = LE_SCAN_ENABLE; | |
1987 | enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; | |
1988 | ||
1989 | hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), | |
1990 | &enable_cp); | |
1991 | ||
1992 | return 0; | |
1993 | } | |
1994 | ||
1995 | static int interleaved_discov(struct hci_request *req, unsigned long opt) | |
1996 | { | |
1997 | int err; | |
1998 | ||
1999 | BT_DBG("%s", req->hdev->name); | |
2000 | ||
2001 | err = active_scan(req, opt); | |
2002 | if (err) | |
2003 | return err; | |
2004 | ||
7df26b56 | 2005 | return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN); |
e68f072b JH |
2006 | } |
2007 | ||
2008 | static void start_discovery(struct hci_dev *hdev, u8 *status) | |
2009 | { | |
2010 | unsigned long timeout; | |
2011 | ||
2012 | BT_DBG("%s type %u", hdev->name, hdev->discovery.type); | |
2013 | ||
2014 | switch (hdev->discovery.type) { | |
2015 | case DISCOV_TYPE_BREDR: | |
2016 | if (!hci_dev_test_flag(hdev, HCI_INQUIRY)) | |
7df26b56 JH |
2017 | hci_req_sync(hdev, bredr_inquiry, |
2018 | DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT, | |
e68f072b JH |
2019 | status); |
2020 | return; | |
2021 | case DISCOV_TYPE_INTERLEAVED: | |
2022 | /* When running simultaneous discovery, the LE scanning time | |
2023 | * should occupy the whole discovery time sine BR/EDR inquiry | |
2024 | * and LE scanning are scheduled by the controller. | |
2025 | * | |
2026 | * For interleaving discovery in comparison, BR/EDR inquiry | |
2027 | * and LE scanning are done sequentially with separate | |
2028 | * timeouts. | |
2029 | */ | |
2030 | if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, | |
2031 | &hdev->quirks)) { | |
2032 | timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); | |
2033 | /* During simultaneous discovery, we double LE scan | |
2034 | * interval. We must leave some time for the controller | |
2035 | * to do BR/EDR inquiry. | |
2036 | */ | |
2037 | hci_req_sync(hdev, interleaved_discov, | |
2038 | DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT, | |
2039 | status); | |
2040 | break; | |
2041 | } | |
2042 | ||
2043 | timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); | |
2044 | hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT, | |
2045 | HCI_CMD_TIMEOUT, status); | |
2046 | break; | |
2047 | case DISCOV_TYPE_LE: | |
2048 | timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); | |
2049 | hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT, | |
2050 | HCI_CMD_TIMEOUT, status); | |
2051 | break; | |
2052 | default: | |
2053 | *status = HCI_ERROR_UNSPECIFIED; | |
2054 | return; | |
2055 | } | |
2056 | ||
2057 | if (*status) | |
2058 | return; | |
2059 | ||
2060 | BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout)); | |
2061 | ||
2062 | /* When service discovery is used and the controller has a | |
2063 | * strict duplicate filter, it is important to remember the | |
2064 | * start and duration of the scan. This is required for | |
2065 | * restarting scanning during the discovery phase. | |
2066 | */ | |
2067 | if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && | |
2068 | hdev->discovery.result_filtering) { | |
2069 | hdev->discovery.scan_start = jiffies; | |
2070 | hdev->discovery.scan_duration = timeout; | |
2071 | } | |
2072 | ||
2073 | queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, | |
2074 | timeout); | |
2075 | } | |
2076 | ||
2154d3f4 JH |
2077 | bool hci_req_stop_discovery(struct hci_request *req) |
2078 | { | |
2079 | struct hci_dev *hdev = req->hdev; | |
2080 | struct discovery_state *d = &hdev->discovery; | |
2081 | struct hci_cp_remote_name_req_cancel cp; | |
2082 | struct inquiry_entry *e; | |
2083 | bool ret = false; | |
2084 | ||
2085 | BT_DBG("%s state %u", hdev->name, hdev->discovery.state); | |
2086 | ||
2087 | if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { | |
2088 | if (test_bit(HCI_INQUIRY, &hdev->flags)) | |
2089 | hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL); | |
2090 | ||
2091 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { | |
2092 | cancel_delayed_work(&hdev->le_scan_disable); | |
2093 | hci_req_add_le_scan_disable(req); | |
2094 | } | |
2095 | ||
2096 | ret = true; | |
2097 | } else { | |
2098 | /* Passive scanning */ | |
2099 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { | |
2100 | hci_req_add_le_scan_disable(req); | |
2101 | ret = true; | |
2102 | } | |
2103 | } | |
2104 | ||
2105 | /* No further actions needed for LE-only discovery */ | |
2106 | if (d->type == DISCOV_TYPE_LE) | |
2107 | return ret; | |
2108 | ||
2109 | if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) { | |
2110 | e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, | |
2111 | NAME_PENDING); | |
2112 | if (!e) | |
2113 | return ret; | |
2114 | ||
2115 | bacpy(&cp.bdaddr, &e->data.bdaddr); | |
2116 | hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp), | |
2117 | &cp); | |
2118 | ret = true; | |
2119 | } | |
2120 | ||
2121 | return ret; | |
2122 | } | |
2123 | ||
2124 | static int stop_discovery(struct hci_request *req, unsigned long opt) | |
2125 | { | |
2126 | hci_dev_lock(req->hdev); | |
2127 | hci_req_stop_discovery(req); | |
2128 | hci_dev_unlock(req->hdev); | |
2129 | ||
2130 | return 0; | |
2131 | } | |
2132 | ||
e68f072b JH |
2133 | static void discov_update(struct work_struct *work) |
2134 | { | |
2135 | struct hci_dev *hdev = container_of(work, struct hci_dev, | |
2136 | discov_update); | |
2137 | u8 status = 0; | |
2138 | ||
2139 | switch (hdev->discovery.state) { | |
2140 | case DISCOVERY_STARTING: | |
2141 | start_discovery(hdev, &status); | |
2142 | mgmt_start_discovery_complete(hdev, status); | |
2143 | if (status) | |
2144 | hci_discovery_set_state(hdev, DISCOVERY_STOPPED); | |
2145 | else | |
2146 | hci_discovery_set_state(hdev, DISCOVERY_FINDING); | |
2147 | break; | |
2154d3f4 JH |
2148 | case DISCOVERY_STOPPING: |
2149 | hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status); | |
2150 | mgmt_stop_discovery_complete(hdev, status); | |
2151 | if (!status) | |
2152 | hci_discovery_set_state(hdev, DISCOVERY_STOPPED); | |
2153 | break; | |
e68f072b JH |
2154 | case DISCOVERY_STOPPED: |
2155 | default: | |
2156 | return; | |
2157 | } | |
2158 | } | |
2159 | ||
c366f555 JH |
2160 | static void discov_off(struct work_struct *work) |
2161 | { | |
2162 | struct hci_dev *hdev = container_of(work, struct hci_dev, | |
2163 | discov_off.work); | |
2164 | ||
2165 | BT_DBG("%s", hdev->name); | |
2166 | ||
2167 | hci_dev_lock(hdev); | |
2168 | ||
2169 | /* When discoverable timeout triggers, then just make sure | |
2170 | * the limited discoverable flag is cleared. Even in the case | |
2171 | * of a timeout triggered from general discoverable, it is | |
2172 | * safe to unconditionally clear the flag. | |
2173 | */ | |
2174 | hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); | |
2175 | hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); | |
2176 | hdev->discov_timeout = 0; | |
2177 | ||
2178 | hci_dev_unlock(hdev); | |
2179 | ||
2180 | hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL); | |
2181 | mgmt_new_settings(hdev); | |
2182 | } | |
2183 | ||
2ff13894 JH |
2184 | static int powered_update_hci(struct hci_request *req, unsigned long opt) |
2185 | { | |
2186 | struct hci_dev *hdev = req->hdev; | |
2187 | struct adv_info *adv_instance; | |
2188 | u8 link_sec; | |
2189 | ||
2190 | hci_dev_lock(hdev); | |
2191 | ||
2192 | if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && | |
2193 | !lmp_host_ssp_capable(hdev)) { | |
2194 | u8 mode = 0x01; | |
2195 | ||
2196 | hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode); | |
2197 | ||
2198 | if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) { | |
2199 | u8 support = 0x01; | |
2200 | ||
2201 | hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, | |
2202 | sizeof(support), &support); | |
2203 | } | |
2204 | } | |
2205 | ||
2206 | if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) && | |
2207 | lmp_bredr_capable(hdev)) { | |
2208 | struct hci_cp_write_le_host_supported cp; | |
2209 | ||
2210 | cp.le = 0x01; | |
2211 | cp.simul = 0x00; | |
2212 | ||
2213 | /* Check first if we already have the right | |
2214 | * host state (host features set) | |
2215 | */ | |
2216 | if (cp.le != lmp_host_le_capable(hdev) || | |
2217 | cp.simul != lmp_host_le_br_capable(hdev)) | |
2218 | hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, | |
2219 | sizeof(cp), &cp); | |
2220 | } | |
2221 | ||
2222 | if (lmp_le_capable(hdev)) { | |
2223 | /* Make sure the controller has a good default for | |
2224 | * advertising data. This also applies to the case | |
2225 | * where BR/EDR was toggled during the AUTO_OFF phase. | |
2226 | */ | |
2227 | if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) && | |
2228 | (hci_dev_test_flag(hdev, HCI_ADVERTISING) || | |
2229 | !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))) { | |
2230 | __hci_req_update_adv_data(req, HCI_ADV_CURRENT); | |
2231 | __hci_req_update_scan_rsp_data(req, HCI_ADV_CURRENT); | |
2232 | } | |
2233 | ||
2234 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) && | |
2235 | hdev->cur_adv_instance == 0x00 && | |
2236 | !list_empty(&hdev->adv_instances)) { | |
2237 | adv_instance = list_first_entry(&hdev->adv_instances, | |
2238 | struct adv_info, list); | |
2239 | hdev->cur_adv_instance = adv_instance->instance; | |
2240 | } | |
2241 | ||
2242 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) | |
2243 | __hci_req_enable_advertising(req); | |
2244 | else if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) && | |
2245 | hdev->cur_adv_instance) | |
2246 | __hci_req_schedule_adv_instance(req, | |
2247 | hdev->cur_adv_instance, | |
2248 | true); | |
2249 | } | |
2250 | ||
2251 | link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY); | |
2252 | if (link_sec != test_bit(HCI_AUTH, &hdev->flags)) | |
2253 | hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, | |
2254 | sizeof(link_sec), &link_sec); | |
2255 | ||
2256 | if (lmp_bredr_capable(hdev)) { | |
2257 | if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) | |
2258 | __hci_req_write_fast_connectable(req, true); | |
2259 | else | |
2260 | __hci_req_write_fast_connectable(req, false); | |
2261 | __hci_req_update_scan(req); | |
2262 | __hci_req_update_class(req); | |
2263 | __hci_req_update_name(req); | |
2264 | __hci_req_update_eir(req); | |
2265 | } | |
2266 | ||
2267 | hci_dev_unlock(hdev); | |
2268 | return 0; | |
2269 | } | |
2270 | ||
2271 | int __hci_req_hci_power_on(struct hci_dev *hdev) | |
2272 | { | |
2273 | /* Register the available SMP channels (BR/EDR and LE) only when | |
2274 | * successfully powering on the controller. This late | |
2275 | * registration is required so that LE SMP can clearly decide if | |
2276 | * the public address or static address is used. | |
2277 | */ | |
2278 | smp_register(hdev); | |
2279 | ||
2280 | return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT, | |
2281 | NULL); | |
2282 | } | |
2283 | ||
5fc16cc4 JH |
2284 | void hci_request_setup(struct hci_dev *hdev) |
2285 | { | |
e68f072b | 2286 | INIT_WORK(&hdev->discov_update, discov_update); |
2e93e53b | 2287 | INIT_WORK(&hdev->bg_scan_update, bg_scan_update); |
01b1cb87 | 2288 | INIT_WORK(&hdev->scan_update, scan_update_work); |
53c0ba74 | 2289 | INIT_WORK(&hdev->connectable_update, connectable_update_work); |
aed1a885 | 2290 | INIT_WORK(&hdev->discoverable_update, discoverable_update_work); |
c366f555 | 2291 | INIT_DELAYED_WORK(&hdev->discov_off, discov_off); |
7c1fbed2 JH |
2292 | INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); |
2293 | INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work); | |
f2252570 | 2294 | INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); |
5fc16cc4 JH |
2295 | } |
2296 | ||
2297 | void hci_request_cancel_all(struct hci_dev *hdev) | |
2298 | { | |
7df0f73e JH |
2299 | hci_req_sync_cancel(hdev, ENODEV); |
2300 | ||
e68f072b | 2301 | cancel_work_sync(&hdev->discov_update); |
2e93e53b | 2302 | cancel_work_sync(&hdev->bg_scan_update); |
01b1cb87 | 2303 | cancel_work_sync(&hdev->scan_update); |
53c0ba74 | 2304 | cancel_work_sync(&hdev->connectable_update); |
aed1a885 | 2305 | cancel_work_sync(&hdev->discoverable_update); |
c366f555 | 2306 | cancel_delayed_work_sync(&hdev->discov_off); |
7c1fbed2 JH |
2307 | cancel_delayed_work_sync(&hdev->le_scan_disable); |
2308 | cancel_delayed_work_sync(&hdev->le_scan_restart); | |
f2252570 JH |
2309 | |
2310 | if (hdev->adv_instance_timeout) { | |
2311 | cancel_delayed_work_sync(&hdev->adv_instance_expire); | |
2312 | hdev->adv_instance_timeout = 0; | |
2313 | } | |
5fc16cc4 | 2314 | } |