2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
95 static const u16 mgmt_events[] = {
96 MGMT_EV_CONTROLLER_ERROR,
98 MGMT_EV_INDEX_REMOVED,
100 MGMT_EV_CLASS_OF_DEV_CHANGED,
101 MGMT_EV_LOCAL_NAME_CHANGED,
102 MGMT_EV_NEW_LINK_KEY,
103 MGMT_EV_NEW_LONG_TERM_KEY,
104 MGMT_EV_DEVICE_CONNECTED,
105 MGMT_EV_DEVICE_DISCONNECTED,
106 MGMT_EV_CONNECT_FAILED,
107 MGMT_EV_PIN_CODE_REQUEST,
108 MGMT_EV_USER_CONFIRM_REQUEST,
109 MGMT_EV_USER_PASSKEY_REQUEST,
111 MGMT_EV_DEVICE_FOUND,
113 MGMT_EV_DEVICE_BLOCKED,
114 MGMT_EV_DEVICE_UNBLOCKED,
115 MGMT_EV_DEVICE_UNPAIRED,
116 MGMT_EV_PASSKEY_NOTIFY,
119 MGMT_EV_DEVICE_ADDED,
120 MGMT_EV_DEVICE_REMOVED,
121 MGMT_EV_NEW_CONN_PARAM,
122 MGMT_EV_UNCONF_INDEX_ADDED,
123 MGMT_EV_UNCONF_INDEX_REMOVED,
126 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
128 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
129 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
132 struct list_head list;
140 /* HCI to MGMT error code conversion table */
141 static u8 mgmt_status_table[] = {
143 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
144 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
145 MGMT_STATUS_FAILED, /* Hardware Failure */
146 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
147 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
148 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
149 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
150 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
151 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
152 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
153 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
154 MGMT_STATUS_BUSY, /* Command Disallowed */
155 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
156 MGMT_STATUS_REJECTED, /* Rejected Security */
157 MGMT_STATUS_REJECTED, /* Rejected Personal */
158 MGMT_STATUS_TIMEOUT, /* Host Timeout */
159 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
160 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
161 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
162 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
163 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
164 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
165 MGMT_STATUS_BUSY, /* Repeated Attempts */
166 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
167 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
168 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
169 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
170 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
171 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
172 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
173 MGMT_STATUS_FAILED, /* Unspecified Error */
174 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
175 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
176 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
177 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
178 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
179 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
180 MGMT_STATUS_FAILED, /* Unit Link Key Used */
181 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
182 MGMT_STATUS_TIMEOUT, /* Instant Passed */
183 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
184 MGMT_STATUS_FAILED, /* Transaction Collision */
185 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
186 MGMT_STATUS_REJECTED, /* QoS Rejected */
187 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
188 MGMT_STATUS_REJECTED, /* Insufficient Security */
189 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
190 MGMT_STATUS_BUSY, /* Role Switch Pending */
191 MGMT_STATUS_FAILED, /* Slot Violation */
192 MGMT_STATUS_FAILED, /* Role Switch Failed */
193 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
194 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
195 MGMT_STATUS_BUSY, /* Host Busy Pairing */
196 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
197 MGMT_STATUS_BUSY, /* Controller Busy */
198 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
199 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
200 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
201 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
202 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
205 static u8 mgmt_status(u8 hci_status)
207 if (hci_status < ARRAY_SIZE(mgmt_status_table))
208 return mgmt_status_table[hci_status];
210 return MGMT_STATUS_FAILED;
213 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
216 struct mgmt_hdr *hdr;
217 struct mgmt_ev_cmd_status *ev;
220 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
222 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
226 hdr = (void *) skb_put(skb, sizeof(*hdr));
228 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
229 hdr->index = cpu_to_le16(index);
230 hdr->len = cpu_to_le16(sizeof(*ev));
232 ev = (void *) skb_put(skb, sizeof(*ev));
234 ev->opcode = cpu_to_le16(cmd);
236 err = sock_queue_rcv_skb(sk, skb);
243 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
244 void *rp, size_t rp_len)
247 struct mgmt_hdr *hdr;
248 struct mgmt_ev_cmd_complete *ev;
251 BT_DBG("sock %p", sk);
253 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
257 hdr = (void *) skb_put(skb, sizeof(*hdr));
259 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
260 hdr->index = cpu_to_le16(index);
261 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
263 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
264 ev->opcode = cpu_to_le16(cmd);
268 memcpy(ev->data, rp, rp_len);
270 err = sock_queue_rcv_skb(sk, skb);
277 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
280 struct mgmt_rp_read_version rp;
282 BT_DBG("sock %p", sk);
284 rp.version = MGMT_VERSION;
285 rp.revision = cpu_to_le16(MGMT_REVISION);
287 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
291 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
294 struct mgmt_rp_read_commands *rp;
295 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
296 const u16 num_events = ARRAY_SIZE(mgmt_events);
301 BT_DBG("sock %p", sk);
303 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
305 rp = kmalloc(rp_size, GFP_KERNEL);
309 rp->num_commands = cpu_to_le16(num_commands);
310 rp->num_events = cpu_to_le16(num_events);
312 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
313 put_unaligned_le16(mgmt_commands[i], opcode);
315 for (i = 0; i < num_events; i++, opcode++)
316 put_unaligned_le16(mgmt_events[i], opcode);
318 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
325 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
328 struct mgmt_rp_read_index_list *rp;
334 BT_DBG("sock %p", sk);
336 read_lock(&hci_dev_list_lock);
339 list_for_each_entry(d, &hci_dev_list, list) {
340 if (d->dev_type == HCI_BREDR &&
341 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
345 rp_len = sizeof(*rp) + (2 * count);
346 rp = kmalloc(rp_len, GFP_ATOMIC);
348 read_unlock(&hci_dev_list_lock);
353 list_for_each_entry(d, &hci_dev_list, list) {
354 if (test_bit(HCI_SETUP, &d->dev_flags) ||
355 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
358 /* Devices marked as raw-only are neither configured
359 * nor unconfigured controllers.
361 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
364 if (d->dev_type == HCI_BREDR &&
365 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
366 rp->index[count++] = cpu_to_le16(d->id);
367 BT_DBG("Added hci%u", d->id);
371 rp->num_controllers = cpu_to_le16(count);
372 rp_len = sizeof(*rp) + (2 * count);
374 read_unlock(&hci_dev_list_lock);
376 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
384 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
385 void *data, u16 data_len)
387 struct mgmt_rp_read_unconf_index_list *rp;
393 BT_DBG("sock %p", sk);
395 read_lock(&hci_dev_list_lock);
398 list_for_each_entry(d, &hci_dev_list, list) {
399 if (d->dev_type == HCI_BREDR &&
400 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
404 rp_len = sizeof(*rp) + (2 * count);
405 rp = kmalloc(rp_len, GFP_ATOMIC);
407 read_unlock(&hci_dev_list_lock);
412 list_for_each_entry(d, &hci_dev_list, list) {
413 if (test_bit(HCI_SETUP, &d->dev_flags) ||
414 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
417 /* Devices marked as raw-only are neither configured
418 * nor unconfigured controllers.
420 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
423 if (d->dev_type == HCI_BREDR &&
424 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
425 rp->index[count++] = cpu_to_le16(d->id);
426 BT_DBG("Added hci%u", d->id);
430 rp->num_controllers = cpu_to_le16(count);
431 rp_len = sizeof(*rp) + (2 * count);
433 read_unlock(&hci_dev_list_lock);
435 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
443 static u32 get_supported_settings(struct hci_dev *hdev)
447 settings |= MGMT_SETTING_POWERED;
448 settings |= MGMT_SETTING_PAIRABLE;
449 settings |= MGMT_SETTING_DEBUG_KEYS;
451 if (lmp_bredr_capable(hdev)) {
452 settings |= MGMT_SETTING_CONNECTABLE;
453 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
454 settings |= MGMT_SETTING_FAST_CONNECTABLE;
455 settings |= MGMT_SETTING_DISCOVERABLE;
456 settings |= MGMT_SETTING_BREDR;
457 settings |= MGMT_SETTING_LINK_SECURITY;
459 if (lmp_ssp_capable(hdev)) {
460 settings |= MGMT_SETTING_SSP;
461 settings |= MGMT_SETTING_HS;
464 if (lmp_sc_capable(hdev) ||
465 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
466 settings |= MGMT_SETTING_SECURE_CONN;
469 if (lmp_le_capable(hdev)) {
470 settings |= MGMT_SETTING_LE;
471 settings |= MGMT_SETTING_ADVERTISING;
472 settings |= MGMT_SETTING_PRIVACY;
478 static u32 get_current_settings(struct hci_dev *hdev)
482 if (hdev_is_powered(hdev))
483 settings |= MGMT_SETTING_POWERED;
485 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
486 settings |= MGMT_SETTING_CONNECTABLE;
488 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
489 settings |= MGMT_SETTING_FAST_CONNECTABLE;
491 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
492 settings |= MGMT_SETTING_DISCOVERABLE;
494 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
495 settings |= MGMT_SETTING_PAIRABLE;
497 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
498 settings |= MGMT_SETTING_BREDR;
500 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
501 settings |= MGMT_SETTING_LE;
503 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
504 settings |= MGMT_SETTING_LINK_SECURITY;
506 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
507 settings |= MGMT_SETTING_SSP;
509 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
510 settings |= MGMT_SETTING_HS;
512 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
513 settings |= MGMT_SETTING_ADVERTISING;
515 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
516 settings |= MGMT_SETTING_SECURE_CONN;
518 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
519 settings |= MGMT_SETTING_DEBUG_KEYS;
521 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
522 settings |= MGMT_SETTING_PRIVACY;
527 #define PNP_INFO_SVCLASS_ID 0x1200
529 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
531 u8 *ptr = data, *uuids_start = NULL;
532 struct bt_uuid *uuid;
537 list_for_each_entry(uuid, &hdev->uuids, list) {
540 if (uuid->size != 16)
543 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
547 if (uuid16 == PNP_INFO_SVCLASS_ID)
553 uuids_start[1] = EIR_UUID16_ALL;
557 /* Stop if not enough space to put next UUID */
558 if ((ptr - data) + sizeof(u16) > len) {
559 uuids_start[1] = EIR_UUID16_SOME;
563 *ptr++ = (uuid16 & 0x00ff);
564 *ptr++ = (uuid16 & 0xff00) >> 8;
565 uuids_start[0] += sizeof(uuid16);
571 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
573 u8 *ptr = data, *uuids_start = NULL;
574 struct bt_uuid *uuid;
579 list_for_each_entry(uuid, &hdev->uuids, list) {
580 if (uuid->size != 32)
586 uuids_start[1] = EIR_UUID32_ALL;
590 /* Stop if not enough space to put next UUID */
591 if ((ptr - data) + sizeof(u32) > len) {
592 uuids_start[1] = EIR_UUID32_SOME;
596 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
598 uuids_start[0] += sizeof(u32);
604 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
606 u8 *ptr = data, *uuids_start = NULL;
607 struct bt_uuid *uuid;
612 list_for_each_entry(uuid, &hdev->uuids, list) {
613 if (uuid->size != 128)
619 uuids_start[1] = EIR_UUID128_ALL;
623 /* Stop if not enough space to put next UUID */
624 if ((ptr - data) + 16 > len) {
625 uuids_start[1] = EIR_UUID128_SOME;
629 memcpy(ptr, uuid->uuid, 16);
631 uuids_start[0] += 16;
637 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
639 struct pending_cmd *cmd;
641 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
642 if (cmd->opcode == opcode)
649 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
650 struct hci_dev *hdev,
653 struct pending_cmd *cmd;
655 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
656 if (cmd->user_data != data)
658 if (cmd->opcode == opcode)
665 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
670 name_len = strlen(hdev->dev_name);
672 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
674 if (name_len > max_len) {
676 ptr[1] = EIR_NAME_SHORT;
678 ptr[1] = EIR_NAME_COMPLETE;
680 ptr[0] = name_len + 1;
682 memcpy(ptr + 2, hdev->dev_name, name_len);
684 ad_len += (name_len + 2);
685 ptr += (name_len + 2);
691 static void update_scan_rsp_data(struct hci_request *req)
693 struct hci_dev *hdev = req->hdev;
694 struct hci_cp_le_set_scan_rsp_data cp;
697 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
700 memset(&cp, 0, sizeof(cp));
702 len = create_scan_rsp_data(hdev, cp.data);
704 if (hdev->scan_rsp_data_len == len &&
705 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
708 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
709 hdev->scan_rsp_data_len = len;
713 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
716 static u8 get_adv_discov_flags(struct hci_dev *hdev)
718 struct pending_cmd *cmd;
720 /* If there's a pending mgmt command the flags will not yet have
721 * their final values, so check for this first.
723 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
725 struct mgmt_mode *cp = cmd->param;
727 return LE_AD_GENERAL;
728 else if (cp->val == 0x02)
729 return LE_AD_LIMITED;
731 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
732 return LE_AD_LIMITED;
733 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
734 return LE_AD_GENERAL;
740 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
742 u8 ad_len = 0, flags = 0;
744 flags |= get_adv_discov_flags(hdev);
746 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
747 flags |= LE_AD_NO_BREDR;
750 BT_DBG("adv flags 0x%02x", flags);
760 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
762 ptr[1] = EIR_TX_POWER;
763 ptr[2] = (u8) hdev->adv_tx_power;
772 static void update_adv_data(struct hci_request *req)
774 struct hci_dev *hdev = req->hdev;
775 struct hci_cp_le_set_adv_data cp;
778 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
781 memset(&cp, 0, sizeof(cp));
783 len = create_adv_data(hdev, cp.data);
785 if (hdev->adv_data_len == len &&
786 memcmp(cp.data, hdev->adv_data, len) == 0)
789 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
790 hdev->adv_data_len = len;
794 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
797 static void create_eir(struct hci_dev *hdev, u8 *data)
802 name_len = strlen(hdev->dev_name);
808 ptr[1] = EIR_NAME_SHORT;
810 ptr[1] = EIR_NAME_COMPLETE;
812 /* EIR Data length */
813 ptr[0] = name_len + 1;
815 memcpy(ptr + 2, hdev->dev_name, name_len);
817 ptr += (name_len + 2);
820 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
822 ptr[1] = EIR_TX_POWER;
823 ptr[2] = (u8) hdev->inq_tx_power;
828 if (hdev->devid_source > 0) {
830 ptr[1] = EIR_DEVICE_ID;
832 put_unaligned_le16(hdev->devid_source, ptr + 2);
833 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
834 put_unaligned_le16(hdev->devid_product, ptr + 6);
835 put_unaligned_le16(hdev->devid_version, ptr + 8);
840 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
841 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
842 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
845 static void update_eir(struct hci_request *req)
847 struct hci_dev *hdev = req->hdev;
848 struct hci_cp_write_eir cp;
850 if (!hdev_is_powered(hdev))
853 if (!lmp_ext_inq_capable(hdev))
856 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
859 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
862 memset(&cp, 0, sizeof(cp));
864 create_eir(hdev, cp.data);
866 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
869 memcpy(hdev->eir, cp.data, sizeof(cp.data));
871 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
874 static u8 get_service_classes(struct hci_dev *hdev)
876 struct bt_uuid *uuid;
879 list_for_each_entry(uuid, &hdev->uuids, list)
880 val |= uuid->svc_hint;
885 static void update_class(struct hci_request *req)
887 struct hci_dev *hdev = req->hdev;
890 BT_DBG("%s", hdev->name);
892 if (!hdev_is_powered(hdev))
895 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
898 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
901 cod[0] = hdev->minor_class;
902 cod[1] = hdev->major_class;
903 cod[2] = get_service_classes(hdev);
905 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
908 if (memcmp(cod, hdev->dev_class, 3) == 0)
911 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
914 static bool get_connectable(struct hci_dev *hdev)
916 struct pending_cmd *cmd;
918 /* If there's a pending mgmt command the flag will not yet have
919 * it's final value, so check for this first.
921 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
923 struct mgmt_mode *cp = cmd->param;
927 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
930 static void enable_advertising(struct hci_request *req)
932 struct hci_dev *hdev = req->hdev;
933 struct hci_cp_le_set_adv_param cp;
934 u8 own_addr_type, enable = 0x01;
937 /* Clear the HCI_ADVERTISING bit temporarily so that the
938 * hci_update_random_address knows that it's safe to go ahead
939 * and write a new random address. The flag will be set back on
940 * as soon as the SET_ADV_ENABLE HCI command completes.
942 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
944 connectable = get_connectable(hdev);
946 /* Set require_privacy to true only when non-connectable
947 * advertising is used. In that case it is fine to use a
948 * non-resolvable private address.
950 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
953 memset(&cp, 0, sizeof(cp));
954 cp.min_interval = cpu_to_le16(0x0800);
955 cp.max_interval = cpu_to_le16(0x0800);
956 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
957 cp.own_address_type = own_addr_type;
958 cp.channel_map = hdev->le_adv_channel_map;
960 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
962 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
965 static void disable_advertising(struct hci_request *req)
969 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
972 static void service_cache_off(struct work_struct *work)
974 struct hci_dev *hdev = container_of(work, struct hci_dev,
976 struct hci_request req;
978 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
981 hci_req_init(&req, hdev);
988 hci_dev_unlock(hdev);
990 hci_req_run(&req, NULL);
993 static void rpa_expired(struct work_struct *work)
995 struct hci_dev *hdev = container_of(work, struct hci_dev,
997 struct hci_request req;
1001 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1003 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
1004 hci_conn_num(hdev, LE_LINK) > 0)
1007 /* The generation of a new RPA and programming it into the
1008 * controller happens in the enable_advertising() function.
1011 hci_req_init(&req, hdev);
1013 disable_advertising(&req);
1014 enable_advertising(&req);
1016 hci_req_run(&req, NULL);
1019 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1021 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1024 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1025 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1027 /* Non-mgmt controlled devices get this bit set
1028 * implicitly so that pairing works for them, however
1029 * for mgmt we require user-space to explicitly enable
1032 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1035 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1036 void *data, u16 data_len)
1038 struct mgmt_rp_read_info rp;
1040 BT_DBG("sock %p %s", sk, hdev->name);
1044 memset(&rp, 0, sizeof(rp));
1046 bacpy(&rp.bdaddr, &hdev->bdaddr);
1048 rp.version = hdev->hci_ver;
1049 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1051 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1052 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1054 memcpy(rp.dev_class, hdev->dev_class, 3);
1056 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1057 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1059 hci_dev_unlock(hdev);
1061 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1065 static void mgmt_pending_free(struct pending_cmd *cmd)
1072 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1073 struct hci_dev *hdev, void *data,
1076 struct pending_cmd *cmd;
1078 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1082 cmd->opcode = opcode;
1083 cmd->index = hdev->id;
1085 cmd->param = kmalloc(len, GFP_KERNEL);
1092 memcpy(cmd->param, data, len);
1097 list_add(&cmd->list, &hdev->mgmt_pending);
1102 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1103 void (*cb)(struct pending_cmd *cmd,
1107 struct pending_cmd *cmd, *tmp;
1109 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1110 if (opcode > 0 && cmd->opcode != opcode)
1117 static void mgmt_pending_remove(struct pending_cmd *cmd)
1119 list_del(&cmd->list);
1120 mgmt_pending_free(cmd);
1123 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1125 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1127 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1131 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1133 BT_DBG("%s status 0x%02x", hdev->name, status);
1135 if (hci_conn_count(hdev) == 0) {
1136 cancel_delayed_work(&hdev->power_off);
1137 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1141 static void hci_stop_discovery(struct hci_request *req)
1143 struct hci_dev *hdev = req->hdev;
1144 struct hci_cp_remote_name_req_cancel cp;
1145 struct inquiry_entry *e;
1147 switch (hdev->discovery.state) {
1148 case DISCOVERY_FINDING:
1149 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1150 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1152 cancel_delayed_work(&hdev->le_scan_disable);
1153 hci_req_add_le_scan_disable(req);
1158 case DISCOVERY_RESOLVING:
1159 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1164 bacpy(&cp.bdaddr, &e->data.bdaddr);
1165 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1171 /* Passive scanning */
1172 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1173 hci_req_add_le_scan_disable(req);
1178 static int clean_up_hci_state(struct hci_dev *hdev)
1180 struct hci_request req;
1181 struct hci_conn *conn;
1183 hci_req_init(&req, hdev);
1185 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1186 test_bit(HCI_PSCAN, &hdev->flags)) {
1188 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1191 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1192 disable_advertising(&req);
1194 hci_stop_discovery(&req);
1196 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1197 struct hci_cp_disconnect dc;
1198 struct hci_cp_reject_conn_req rej;
1200 switch (conn->state) {
1203 dc.handle = cpu_to_le16(conn->handle);
1204 dc.reason = 0x15; /* Terminated due to Power Off */
1205 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1208 if (conn->type == LE_LINK)
1209 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1211 else if (conn->type == ACL_LINK)
1212 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1216 bacpy(&rej.bdaddr, &conn->dst);
1217 rej.reason = 0x15; /* Terminated due to Power Off */
1218 if (conn->type == ACL_LINK)
1219 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1221 else if (conn->type == SCO_LINK)
1222 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1228 return hci_req_run(&req, clean_up_hci_complete);
1231 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1234 struct mgmt_mode *cp = data;
1235 struct pending_cmd *cmd;
1238 BT_DBG("request for %s", hdev->name);
1240 if (cp->val != 0x00 && cp->val != 0x01)
1241 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1242 MGMT_STATUS_INVALID_PARAMS);
1246 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1247 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1252 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1253 cancel_delayed_work(&hdev->power_off);
1256 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1258 err = mgmt_powered(hdev, 1);
1263 if (!!cp->val == hdev_is_powered(hdev)) {
1264 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1268 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1275 queue_work(hdev->req_workqueue, &hdev->power_on);
1278 /* Disconnect connections, stop scans, etc */
1279 err = clean_up_hci_state(hdev);
1281 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1282 HCI_POWER_OFF_TIMEOUT);
1284 /* ENODATA means there were no HCI commands queued */
1285 if (err == -ENODATA) {
1286 cancel_delayed_work(&hdev->power_off);
1287 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1293 hci_dev_unlock(hdev);
1297 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1298 struct sock *skip_sk)
1300 struct sk_buff *skb;
1301 struct mgmt_hdr *hdr;
1303 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1307 hdr = (void *) skb_put(skb, sizeof(*hdr));
1308 hdr->opcode = cpu_to_le16(event);
1310 hdr->index = cpu_to_le16(hdev->id);
1312 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1313 hdr->len = cpu_to_le16(data_len);
1316 memcpy(skb_put(skb, data_len), data, data_len);
1319 __net_timestamp(skb);
1321 hci_send_to_control(skb, skip_sk);
1327 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1331 ev = cpu_to_le32(get_current_settings(hdev));
1333 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1338 struct hci_dev *hdev;
1342 static void settings_rsp(struct pending_cmd *cmd, void *data)
1344 struct cmd_lookup *match = data;
1346 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1348 list_del(&cmd->list);
1350 if (match->sk == NULL) {
1351 match->sk = cmd->sk;
1352 sock_hold(match->sk);
1355 mgmt_pending_free(cmd);
1358 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1362 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1363 mgmt_pending_remove(cmd);
1366 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1368 if (!lmp_bredr_capable(hdev))
1369 return MGMT_STATUS_NOT_SUPPORTED;
1370 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1371 return MGMT_STATUS_REJECTED;
1373 return MGMT_STATUS_SUCCESS;
1376 static u8 mgmt_le_support(struct hci_dev *hdev)
1378 if (!lmp_le_capable(hdev))
1379 return MGMT_STATUS_NOT_SUPPORTED;
1380 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1381 return MGMT_STATUS_REJECTED;
1383 return MGMT_STATUS_SUCCESS;
1386 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1388 struct pending_cmd *cmd;
1389 struct mgmt_mode *cp;
1390 struct hci_request req;
1393 BT_DBG("status 0x%02x", status);
1397 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1402 u8 mgmt_err = mgmt_status(status);
1403 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1404 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1410 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1413 if (hdev->discov_timeout > 0) {
1414 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1415 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1419 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1423 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1426 new_settings(hdev, cmd->sk);
1428 /* When the discoverable mode gets changed, make sure
1429 * that class of device has the limited discoverable
1430 * bit correctly set.
1432 hci_req_init(&req, hdev);
1434 hci_req_run(&req, NULL);
1437 mgmt_pending_remove(cmd);
1440 hci_dev_unlock(hdev);
1443 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1446 struct mgmt_cp_set_discoverable *cp = data;
1447 struct pending_cmd *cmd;
1448 struct hci_request req;
1453 BT_DBG("request for %s", hdev->name);
1455 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1456 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1457 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1458 MGMT_STATUS_REJECTED);
1460 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1461 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1462 MGMT_STATUS_INVALID_PARAMS);
1464 timeout = __le16_to_cpu(cp->timeout);
1466 /* Disabling discoverable requires that no timeout is set,
1467 * and enabling limited discoverable requires a timeout.
1469 if ((cp->val == 0x00 && timeout > 0) ||
1470 (cp->val == 0x02 && timeout == 0))
1471 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1472 MGMT_STATUS_INVALID_PARAMS);
1476 if (!hdev_is_powered(hdev) && timeout > 0) {
1477 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1478 MGMT_STATUS_NOT_POWERED);
1482 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1483 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1484 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1489 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1490 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1491 MGMT_STATUS_REJECTED);
1495 if (!hdev_is_powered(hdev)) {
1496 bool changed = false;
1498 /* Setting limited discoverable when powered off is
1499 * not a valid operation since it requires a timeout
1500 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1502 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1503 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1507 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1512 err = new_settings(hdev, sk);
1517 /* If the current mode is the same, then just update the timeout
1518 * value with the new value. And if only the timeout gets updated,
1519 * then no need for any HCI transactions.
1521 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1522 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1523 &hdev->dev_flags)) {
1524 cancel_delayed_work(&hdev->discov_off);
1525 hdev->discov_timeout = timeout;
1527 if (cp->val && hdev->discov_timeout > 0) {
1528 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1529 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1533 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1537 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1543 /* Cancel any potential discoverable timeout that might be
1544 * still active and store new timeout value. The arming of
1545 * the timeout happens in the complete handler.
1547 cancel_delayed_work(&hdev->discov_off);
1548 hdev->discov_timeout = timeout;
1550 /* Limited discoverable mode */
1551 if (cp->val == 0x02)
1552 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1554 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1556 hci_req_init(&req, hdev);
1558 /* The procedure for LE-only controllers is much simpler - just
1559 * update the advertising data.
1561 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1567 struct hci_cp_write_current_iac_lap hci_cp;
1569 if (cp->val == 0x02) {
1570 /* Limited discoverable mode */
1571 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1572 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1573 hci_cp.iac_lap[1] = 0x8b;
1574 hci_cp.iac_lap[2] = 0x9e;
1575 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1576 hci_cp.iac_lap[4] = 0x8b;
1577 hci_cp.iac_lap[5] = 0x9e;
1579 /* General discoverable mode */
1581 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1582 hci_cp.iac_lap[1] = 0x8b;
1583 hci_cp.iac_lap[2] = 0x9e;
1586 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1587 (hci_cp.num_iac * 3) + 1, &hci_cp);
1589 scan |= SCAN_INQUIRY;
1591 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1594 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1597 update_adv_data(&req);
1599 err = hci_req_run(&req, set_discoverable_complete);
1601 mgmt_pending_remove(cmd);
1604 hci_dev_unlock(hdev);
1608 static void write_fast_connectable(struct hci_request *req, bool enable)
1610 struct hci_dev *hdev = req->hdev;
1611 struct hci_cp_write_page_scan_activity acp;
1614 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1617 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1621 type = PAGE_SCAN_TYPE_INTERLACED;
1623 /* 160 msec page scan interval */
1624 acp.interval = cpu_to_le16(0x0100);
1626 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1628 /* default 1.28 sec page scan */
1629 acp.interval = cpu_to_le16(0x0800);
1632 acp.window = cpu_to_le16(0x0012);
1634 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1635 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1636 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1639 if (hdev->page_scan_type != type)
1640 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1643 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1645 struct pending_cmd *cmd;
1646 struct mgmt_mode *cp;
1649 BT_DBG("status 0x%02x", status);
1653 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1658 u8 mgmt_err = mgmt_status(status);
1659 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1665 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1667 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1669 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1672 new_settings(hdev, cmd->sk);
1675 mgmt_pending_remove(cmd);
1678 hci_dev_unlock(hdev);
1681 static int set_connectable_update_settings(struct hci_dev *hdev,
1682 struct sock *sk, u8 val)
1684 bool changed = false;
1687 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1691 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1693 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1694 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1697 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1702 return new_settings(hdev, sk);
1707 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1710 struct mgmt_mode *cp = data;
1711 struct pending_cmd *cmd;
1712 struct hci_request req;
1716 BT_DBG("request for %s", hdev->name);
1718 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1719 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1720 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1721 MGMT_STATUS_REJECTED);
1723 if (cp->val != 0x00 && cp->val != 0x01)
1724 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1725 MGMT_STATUS_INVALID_PARAMS);
1729 if (!hdev_is_powered(hdev)) {
1730 err = set_connectable_update_settings(hdev, sk, cp->val);
1734 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1735 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1736 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1741 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1747 hci_req_init(&req, hdev);
1749 /* If BR/EDR is not enabled and we disable advertising as a
1750 * by-product of disabling connectable, we need to update the
1751 * advertising flags.
1753 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1755 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1756 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1758 update_adv_data(&req);
1759 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1765 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1766 hdev->discov_timeout > 0)
1767 cancel_delayed_work(&hdev->discov_off);
1770 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1773 /* If we're going from non-connectable to connectable or
1774 * vice-versa when fast connectable is enabled ensure that fast
1775 * connectable gets disabled. write_fast_connectable won't do
1776 * anything if the page scan parameters are already what they
1779 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1780 write_fast_connectable(&req, false);
1782 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1783 hci_conn_num(hdev, LE_LINK) == 0) {
1784 disable_advertising(&req);
1785 enable_advertising(&req);
1788 err = hci_req_run(&req, set_connectable_complete);
1790 mgmt_pending_remove(cmd);
1791 if (err == -ENODATA)
1792 err = set_connectable_update_settings(hdev, sk,
1798 hci_dev_unlock(hdev);
1802 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1805 struct mgmt_mode *cp = data;
1809 BT_DBG("request for %s", hdev->name);
1811 if (cp->val != 0x00 && cp->val != 0x01)
1812 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1813 MGMT_STATUS_INVALID_PARAMS);
1818 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1820 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1822 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1827 err = new_settings(hdev, sk);
1830 hci_dev_unlock(hdev);
1834 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1837 struct mgmt_mode *cp = data;
1838 struct pending_cmd *cmd;
1842 BT_DBG("request for %s", hdev->name);
1844 status = mgmt_bredr_support(hdev);
1846 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1849 if (cp->val != 0x00 && cp->val != 0x01)
1850 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1851 MGMT_STATUS_INVALID_PARAMS);
1855 if (!hdev_is_powered(hdev)) {
1856 bool changed = false;
1858 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1859 &hdev->dev_flags)) {
1860 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1864 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1869 err = new_settings(hdev, sk);
1874 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1875 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1882 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1883 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1887 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1893 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1895 mgmt_pending_remove(cmd);
1900 hci_dev_unlock(hdev);
1904 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1906 struct mgmt_mode *cp = data;
1907 struct pending_cmd *cmd;
1911 BT_DBG("request for %s", hdev->name);
1913 status = mgmt_bredr_support(hdev);
1915 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1917 if (!lmp_ssp_capable(hdev))
1918 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1919 MGMT_STATUS_NOT_SUPPORTED);
1921 if (cp->val != 0x00 && cp->val != 0x01)
1922 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1923 MGMT_STATUS_INVALID_PARAMS);
1927 if (!hdev_is_powered(hdev)) {
1931 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1934 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1937 changed = test_and_clear_bit(HCI_HS_ENABLED,
1940 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1943 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1948 err = new_settings(hdev, sk);
1953 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1954 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1955 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1960 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1961 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1965 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1971 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
1972 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1973 sizeof(cp->val), &cp->val);
1975 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1977 mgmt_pending_remove(cmd);
1982 hci_dev_unlock(hdev);
1986 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1988 struct mgmt_mode *cp = data;
1993 BT_DBG("request for %s", hdev->name);
1995 status = mgmt_bredr_support(hdev);
1997 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1999 if (!lmp_ssp_capable(hdev))
2000 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2001 MGMT_STATUS_NOT_SUPPORTED);
2003 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2004 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2005 MGMT_STATUS_REJECTED);
2007 if (cp->val != 0x00 && cp->val != 0x01)
2008 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2009 MGMT_STATUS_INVALID_PARAMS);
2014 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2016 if (hdev_is_powered(hdev)) {
2017 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2018 MGMT_STATUS_REJECTED);
2022 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2025 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2030 err = new_settings(hdev, sk);
2033 hci_dev_unlock(hdev);
2037 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2039 struct cmd_lookup match = { NULL, hdev };
2042 u8 mgmt_err = mgmt_status(status);
2044 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2049 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2051 new_settings(hdev, match.sk);
2056 /* Make sure the controller has a good default for
2057 * advertising data. Restrict the update to when LE
2058 * has actually been enabled. During power on, the
2059 * update in powered_update_hci will take care of it.
2061 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2062 struct hci_request req;
2066 hci_req_init(&req, hdev);
2067 update_adv_data(&req);
2068 update_scan_rsp_data(&req);
2069 hci_req_run(&req, NULL);
2071 hci_dev_unlock(hdev);
2075 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2077 struct mgmt_mode *cp = data;
2078 struct hci_cp_write_le_host_supported hci_cp;
2079 struct pending_cmd *cmd;
2080 struct hci_request req;
2084 BT_DBG("request for %s", hdev->name);
2086 if (!lmp_le_capable(hdev))
2087 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2088 MGMT_STATUS_NOT_SUPPORTED);
2090 if (cp->val != 0x00 && cp->val != 0x01)
2091 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2092 MGMT_STATUS_INVALID_PARAMS);
2094 /* LE-only devices do not allow toggling LE on/off */
2095 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2096 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2097 MGMT_STATUS_REJECTED);
2102 enabled = lmp_host_le_capable(hdev);
2104 if (!hdev_is_powered(hdev) || val == enabled) {
2105 bool changed = false;
2107 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2108 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2112 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2113 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2117 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2122 err = new_settings(hdev, sk);
2127 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2128 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2129 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2134 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2140 hci_req_init(&req, hdev);
2142 memset(&hci_cp, 0, sizeof(hci_cp));
2146 hci_cp.simul = lmp_le_br_capable(hdev);
2148 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2149 disable_advertising(&req);
2152 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2155 err = hci_req_run(&req, le_enable_complete);
2157 mgmt_pending_remove(cmd);
2160 hci_dev_unlock(hdev);
2164 /* This is a helper function to test for pending mgmt commands that can
2165 * cause CoD or EIR HCI commands. We can only allow one such pending
2166 * mgmt command at a time since otherwise we cannot easily track what
2167 * the current values are, will be, and based on that calculate if a new
2168 * HCI command needs to be sent and if yes with what value.
2170 static bool pending_eir_or_class(struct hci_dev *hdev)
2172 struct pending_cmd *cmd;
2174 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2175 switch (cmd->opcode) {
2176 case MGMT_OP_ADD_UUID:
2177 case MGMT_OP_REMOVE_UUID:
2178 case MGMT_OP_SET_DEV_CLASS:
2179 case MGMT_OP_SET_POWERED:
2187 static const u8 bluetooth_base_uuid[] = {
2188 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2189 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2192 static u8 get_uuid_size(const u8 *uuid)
2196 if (memcmp(uuid, bluetooth_base_uuid, 12))
2199 val = get_unaligned_le32(&uuid[12]);
2206 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2208 struct pending_cmd *cmd;
2212 cmd = mgmt_pending_find(mgmt_op, hdev);
2216 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2217 hdev->dev_class, 3);
2219 mgmt_pending_remove(cmd);
2222 hci_dev_unlock(hdev);
2225 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2227 BT_DBG("status 0x%02x", status);
2229 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2232 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2234 struct mgmt_cp_add_uuid *cp = data;
2235 struct pending_cmd *cmd;
2236 struct hci_request req;
2237 struct bt_uuid *uuid;
2240 BT_DBG("request for %s", hdev->name);
2244 if (pending_eir_or_class(hdev)) {
2245 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2250 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2256 memcpy(uuid->uuid, cp->uuid, 16);
2257 uuid->svc_hint = cp->svc_hint;
2258 uuid->size = get_uuid_size(cp->uuid);
2260 list_add_tail(&uuid->list, &hdev->uuids);
2262 hci_req_init(&req, hdev);
2267 err = hci_req_run(&req, add_uuid_complete);
2269 if (err != -ENODATA)
2272 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2273 hdev->dev_class, 3);
2277 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2286 hci_dev_unlock(hdev);
2290 static bool enable_service_cache(struct hci_dev *hdev)
2292 if (!hdev_is_powered(hdev))
2295 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2296 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2304 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2306 BT_DBG("status 0x%02x", status);
2308 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2311 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2314 struct mgmt_cp_remove_uuid *cp = data;
2315 struct pending_cmd *cmd;
2316 struct bt_uuid *match, *tmp;
2317 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2318 struct hci_request req;
2321 BT_DBG("request for %s", hdev->name);
2325 if (pending_eir_or_class(hdev)) {
2326 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2331 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2332 hci_uuids_clear(hdev);
2334 if (enable_service_cache(hdev)) {
2335 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2336 0, hdev->dev_class, 3);
2345 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2346 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2349 list_del(&match->list);
2355 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2356 MGMT_STATUS_INVALID_PARAMS);
2361 hci_req_init(&req, hdev);
2366 err = hci_req_run(&req, remove_uuid_complete);
2368 if (err != -ENODATA)
2371 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2372 hdev->dev_class, 3);
2376 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2385 hci_dev_unlock(hdev);
2389 static void set_class_complete(struct hci_dev *hdev, u8 status)
2391 BT_DBG("status 0x%02x", status);
2393 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2396 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2399 struct mgmt_cp_set_dev_class *cp = data;
2400 struct pending_cmd *cmd;
2401 struct hci_request req;
2404 BT_DBG("request for %s", hdev->name);
2406 if (!lmp_bredr_capable(hdev))
2407 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2408 MGMT_STATUS_NOT_SUPPORTED);
2412 if (pending_eir_or_class(hdev)) {
2413 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2418 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2419 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2420 MGMT_STATUS_INVALID_PARAMS);
2424 hdev->major_class = cp->major;
2425 hdev->minor_class = cp->minor;
2427 if (!hdev_is_powered(hdev)) {
2428 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2429 hdev->dev_class, 3);
2433 hci_req_init(&req, hdev);
2435 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2436 hci_dev_unlock(hdev);
2437 cancel_delayed_work_sync(&hdev->service_cache);
2444 err = hci_req_run(&req, set_class_complete);
2446 if (err != -ENODATA)
2449 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2450 hdev->dev_class, 3);
2454 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2463 hci_dev_unlock(hdev);
2467 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2470 struct mgmt_cp_load_link_keys *cp = data;
2471 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2472 sizeof(struct mgmt_link_key_info));
2473 u16 key_count, expected_len;
2477 BT_DBG("request for %s", hdev->name);
2479 if (!lmp_bredr_capable(hdev))
2480 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2481 MGMT_STATUS_NOT_SUPPORTED);
2483 key_count = __le16_to_cpu(cp->key_count);
2484 if (key_count > max_key_count) {
2485 BT_ERR("load_link_keys: too big key_count value %u",
2487 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2488 MGMT_STATUS_INVALID_PARAMS);
2491 expected_len = sizeof(*cp) + key_count *
2492 sizeof(struct mgmt_link_key_info);
2493 if (expected_len != len) {
2494 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2496 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2497 MGMT_STATUS_INVALID_PARAMS);
2500 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2501 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2502 MGMT_STATUS_INVALID_PARAMS);
2504 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2507 for (i = 0; i < key_count; i++) {
2508 struct mgmt_link_key_info *key = &cp->keys[i];
2510 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2511 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2512 MGMT_STATUS_INVALID_PARAMS);
2517 hci_link_keys_clear(hdev);
2520 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2523 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2527 new_settings(hdev, NULL);
2529 for (i = 0; i < key_count; i++) {
2530 struct mgmt_link_key_info *key = &cp->keys[i];
2532 /* Always ignore debug keys and require a new pairing if
2533 * the user wants to use them.
2535 if (key->type == HCI_LK_DEBUG_COMBINATION)
2538 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2539 key->type, key->pin_len, NULL);
2542 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2544 hci_dev_unlock(hdev);
2549 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2550 u8 addr_type, struct sock *skip_sk)
2552 struct mgmt_ev_device_unpaired ev;
2554 bacpy(&ev.addr.bdaddr, bdaddr);
2555 ev.addr.type = addr_type;
2557 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2561 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2564 struct mgmt_cp_unpair_device *cp = data;
2565 struct mgmt_rp_unpair_device rp;
2566 struct hci_cp_disconnect dc;
2567 struct pending_cmd *cmd;
2568 struct hci_conn *conn;
2571 memset(&rp, 0, sizeof(rp));
2572 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2573 rp.addr.type = cp->addr.type;
2575 if (!bdaddr_type_is_valid(cp->addr.type))
2576 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2577 MGMT_STATUS_INVALID_PARAMS,
2580 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2581 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2582 MGMT_STATUS_INVALID_PARAMS,
2587 if (!hdev_is_powered(hdev)) {
2588 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2589 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2593 if (cp->addr.type == BDADDR_BREDR) {
2594 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2598 if (cp->addr.type == BDADDR_LE_PUBLIC)
2599 addr_type = ADDR_LE_DEV_PUBLIC;
2601 addr_type = ADDR_LE_DEV_RANDOM;
2603 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2605 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2607 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2611 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2612 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2616 if (cp->disconnect) {
2617 if (cp->addr.type == BDADDR_BREDR)
2618 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2621 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2628 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2630 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2634 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2641 dc.handle = cpu_to_le16(conn->handle);
2642 dc.reason = 0x13; /* Remote User Terminated Connection */
2643 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2645 mgmt_pending_remove(cmd);
2648 hci_dev_unlock(hdev);
2652 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2655 struct mgmt_cp_disconnect *cp = data;
2656 struct mgmt_rp_disconnect rp;
2657 struct hci_cp_disconnect dc;
2658 struct pending_cmd *cmd;
2659 struct hci_conn *conn;
2664 memset(&rp, 0, sizeof(rp));
2665 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2666 rp.addr.type = cp->addr.type;
2668 if (!bdaddr_type_is_valid(cp->addr.type))
2669 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2670 MGMT_STATUS_INVALID_PARAMS,
2675 if (!test_bit(HCI_UP, &hdev->flags)) {
2676 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2677 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2681 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2682 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2683 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2687 if (cp->addr.type == BDADDR_BREDR)
2688 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2691 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2693 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2694 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2695 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2699 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2705 dc.handle = cpu_to_le16(conn->handle);
2706 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2708 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2710 mgmt_pending_remove(cmd);
2713 hci_dev_unlock(hdev);
2717 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2719 switch (link_type) {
2721 switch (addr_type) {
2722 case ADDR_LE_DEV_PUBLIC:
2723 return BDADDR_LE_PUBLIC;
2726 /* Fallback to LE Random address type */
2727 return BDADDR_LE_RANDOM;
2731 /* Fallback to BR/EDR type */
2732 return BDADDR_BREDR;
2736 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2739 struct mgmt_rp_get_connections *rp;
2749 if (!hdev_is_powered(hdev)) {
2750 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2751 MGMT_STATUS_NOT_POWERED);
2756 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2757 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2761 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2762 rp = kmalloc(rp_len, GFP_KERNEL);
2769 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2770 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2772 bacpy(&rp->addr[i].bdaddr, &c->dst);
2773 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2774 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2779 rp->conn_count = cpu_to_le16(i);
2781 /* Recalculate length in case of filtered SCO connections, etc */
2782 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2784 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2790 hci_dev_unlock(hdev);
2794 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2795 struct mgmt_cp_pin_code_neg_reply *cp)
2797 struct pending_cmd *cmd;
2800 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2805 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2806 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2808 mgmt_pending_remove(cmd);
2813 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2816 struct hci_conn *conn;
2817 struct mgmt_cp_pin_code_reply *cp = data;
2818 struct hci_cp_pin_code_reply reply;
2819 struct pending_cmd *cmd;
2826 if (!hdev_is_powered(hdev)) {
2827 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2828 MGMT_STATUS_NOT_POWERED);
2832 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2834 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2835 MGMT_STATUS_NOT_CONNECTED);
2839 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2840 struct mgmt_cp_pin_code_neg_reply ncp;
2842 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2844 BT_ERR("PIN code is not 16 bytes long");
2846 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2848 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2849 MGMT_STATUS_INVALID_PARAMS);
2854 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2860 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2861 reply.pin_len = cp->pin_len;
2862 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2864 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2866 mgmt_pending_remove(cmd);
2869 hci_dev_unlock(hdev);
2873 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2876 struct mgmt_cp_set_io_capability *cp = data;
2880 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2881 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2882 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
2886 hdev->io_capability = cp->io_capability;
2888 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2889 hdev->io_capability);
2891 hci_dev_unlock(hdev);
2893 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2897 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2899 struct hci_dev *hdev = conn->hdev;
2900 struct pending_cmd *cmd;
2902 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2903 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2906 if (cmd->user_data != conn)
2915 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2917 struct mgmt_rp_pair_device rp;
2918 struct hci_conn *conn = cmd->user_data;
2920 bacpy(&rp.addr.bdaddr, &conn->dst);
2921 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2923 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2926 /* So we don't get further callbacks for this connection */
2927 conn->connect_cfm_cb = NULL;
2928 conn->security_cfm_cb = NULL;
2929 conn->disconn_cfm_cb = NULL;
2931 hci_conn_drop(conn);
2933 mgmt_pending_remove(cmd);
2936 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2938 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2939 struct pending_cmd *cmd;
2941 cmd = find_pairing(conn);
2943 pairing_complete(cmd, status);
2946 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2948 struct pending_cmd *cmd;
2950 BT_DBG("status %u", status);
2952 cmd = find_pairing(conn);
2954 BT_DBG("Unable to find a pending command");
2956 pairing_complete(cmd, mgmt_status(status));
2959 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2961 struct pending_cmd *cmd;
2963 BT_DBG("status %u", status);
2968 cmd = find_pairing(conn);
2970 BT_DBG("Unable to find a pending command");
2972 pairing_complete(cmd, mgmt_status(status));
2975 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2978 struct mgmt_cp_pair_device *cp = data;
2979 struct mgmt_rp_pair_device rp;
2980 struct pending_cmd *cmd;
2981 u8 sec_level, auth_type;
2982 struct hci_conn *conn;
2987 memset(&rp, 0, sizeof(rp));
2988 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2989 rp.addr.type = cp->addr.type;
2991 if (!bdaddr_type_is_valid(cp->addr.type))
2992 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2993 MGMT_STATUS_INVALID_PARAMS,
2996 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2997 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2998 MGMT_STATUS_INVALID_PARAMS,
3003 if (!hdev_is_powered(hdev)) {
3004 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3005 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3009 sec_level = BT_SECURITY_MEDIUM;
3010 auth_type = HCI_AT_DEDICATED_BONDING;
3012 if (cp->addr.type == BDADDR_BREDR) {
3013 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3018 /* Convert from L2CAP channel address type to HCI address type
3020 if (cp->addr.type == BDADDR_LE_PUBLIC)
3021 addr_type = ADDR_LE_DEV_PUBLIC;
3023 addr_type = ADDR_LE_DEV_RANDOM;
3025 /* When pairing a new device, it is expected to remember
3026 * this device for future connections. Adding the connection
3027 * parameter information ahead of time allows tracking
3028 * of the slave preferred values and will speed up any
3029 * further connection establishment.
3031 * If connection parameters already exist, then they
3032 * will be kept and this function does nothing.
3034 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3036 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3037 sec_level, auth_type);
3043 if (PTR_ERR(conn) == -EBUSY)
3044 status = MGMT_STATUS_BUSY;
3046 status = MGMT_STATUS_CONNECT_FAILED;
3048 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3054 if (conn->connect_cfm_cb) {
3055 hci_conn_drop(conn);
3056 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3057 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3061 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3064 hci_conn_drop(conn);
3068 /* For LE, just connecting isn't a proof that the pairing finished */
3069 if (cp->addr.type == BDADDR_BREDR) {
3070 conn->connect_cfm_cb = pairing_complete_cb;
3071 conn->security_cfm_cb = pairing_complete_cb;
3072 conn->disconn_cfm_cb = pairing_complete_cb;
3074 conn->connect_cfm_cb = le_pairing_complete_cb;
3075 conn->security_cfm_cb = le_pairing_complete_cb;
3076 conn->disconn_cfm_cb = le_pairing_complete_cb;
3079 conn->io_capability = cp->io_cap;
3080 cmd->user_data = conn;
3082 if (conn->state == BT_CONNECTED &&
3083 hci_conn_security(conn, sec_level, auth_type))
3084 pairing_complete(cmd, 0);
3089 hci_dev_unlock(hdev);
3093 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3096 struct mgmt_addr_info *addr = data;
3097 struct pending_cmd *cmd;
3098 struct hci_conn *conn;
3105 if (!hdev_is_powered(hdev)) {
3106 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3107 MGMT_STATUS_NOT_POWERED);
3111 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3113 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3114 MGMT_STATUS_INVALID_PARAMS);
3118 conn = cmd->user_data;
3120 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3121 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3122 MGMT_STATUS_INVALID_PARAMS);
3126 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3128 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3129 addr, sizeof(*addr));
3131 hci_dev_unlock(hdev);
3135 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3136 struct mgmt_addr_info *addr, u16 mgmt_op,
3137 u16 hci_op, __le32 passkey)
3139 struct pending_cmd *cmd;
3140 struct hci_conn *conn;
3145 if (!hdev_is_powered(hdev)) {
3146 err = cmd_complete(sk, hdev->id, mgmt_op,
3147 MGMT_STATUS_NOT_POWERED, addr,
3152 if (addr->type == BDADDR_BREDR)
3153 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3155 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3158 err = cmd_complete(sk, hdev->id, mgmt_op,
3159 MGMT_STATUS_NOT_CONNECTED, addr,
3164 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3165 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3167 err = cmd_complete(sk, hdev->id, mgmt_op,
3168 MGMT_STATUS_SUCCESS, addr,
3171 err = cmd_complete(sk, hdev->id, mgmt_op,
3172 MGMT_STATUS_FAILED, addr,
3178 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3184 /* Continue with pairing via HCI */
3185 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3186 struct hci_cp_user_passkey_reply cp;
3188 bacpy(&cp.bdaddr, &addr->bdaddr);
3189 cp.passkey = passkey;
3190 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3192 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3196 mgmt_pending_remove(cmd);
3199 hci_dev_unlock(hdev);
3203 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3204 void *data, u16 len)
3206 struct mgmt_cp_pin_code_neg_reply *cp = data;
3210 return user_pairing_resp(sk, hdev, &cp->addr,
3211 MGMT_OP_PIN_CODE_NEG_REPLY,
3212 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3215 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3218 struct mgmt_cp_user_confirm_reply *cp = data;
3222 if (len != sizeof(*cp))
3223 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3224 MGMT_STATUS_INVALID_PARAMS);
3226 return user_pairing_resp(sk, hdev, &cp->addr,
3227 MGMT_OP_USER_CONFIRM_REPLY,
3228 HCI_OP_USER_CONFIRM_REPLY, 0);
3231 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3232 void *data, u16 len)
3234 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3238 return user_pairing_resp(sk, hdev, &cp->addr,
3239 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3240 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3243 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3246 struct mgmt_cp_user_passkey_reply *cp = data;
3250 return user_pairing_resp(sk, hdev, &cp->addr,
3251 MGMT_OP_USER_PASSKEY_REPLY,
3252 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3255 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3256 void *data, u16 len)
3258 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3262 return user_pairing_resp(sk, hdev, &cp->addr,
3263 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3264 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3267 static void update_name(struct hci_request *req)
3269 struct hci_dev *hdev = req->hdev;
3270 struct hci_cp_write_local_name cp;
3272 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3274 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3277 static void set_name_complete(struct hci_dev *hdev, u8 status)
3279 struct mgmt_cp_set_local_name *cp;
3280 struct pending_cmd *cmd;
3282 BT_DBG("status 0x%02x", status);
3286 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3293 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3294 mgmt_status(status));
3296 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3299 mgmt_pending_remove(cmd);
3302 hci_dev_unlock(hdev);
3305 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3308 struct mgmt_cp_set_local_name *cp = data;
3309 struct pending_cmd *cmd;
3310 struct hci_request req;
3317 /* If the old values are the same as the new ones just return a
3318 * direct command complete event.
3320 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3321 !memcmp(hdev->short_name, cp->short_name,
3322 sizeof(hdev->short_name))) {
3323 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3328 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3330 if (!hdev_is_powered(hdev)) {
3331 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3333 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3338 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3344 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3350 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3352 hci_req_init(&req, hdev);
3354 if (lmp_bredr_capable(hdev)) {
3359 /* The name is stored in the scan response data and so
3360 * no need to udpate the advertising data here.
3362 if (lmp_le_capable(hdev))
3363 update_scan_rsp_data(&req);
3365 err = hci_req_run(&req, set_name_complete);
3367 mgmt_pending_remove(cmd);
3370 hci_dev_unlock(hdev);
3374 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3375 void *data, u16 data_len)
3377 struct pending_cmd *cmd;
3380 BT_DBG("%s", hdev->name);
3384 if (!hdev_is_powered(hdev)) {
3385 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3386 MGMT_STATUS_NOT_POWERED);
3390 if (!lmp_ssp_capable(hdev)) {
3391 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3392 MGMT_STATUS_NOT_SUPPORTED);
3396 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3397 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3402 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3408 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3409 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3412 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3415 mgmt_pending_remove(cmd);
3418 hci_dev_unlock(hdev);
3422 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3423 void *data, u16 len)
3427 BT_DBG("%s ", hdev->name);
3431 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3432 struct mgmt_cp_add_remote_oob_data *cp = data;
3435 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3436 cp->hash, cp->randomizer);
3438 status = MGMT_STATUS_FAILED;
3440 status = MGMT_STATUS_SUCCESS;
3442 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3443 status, &cp->addr, sizeof(cp->addr));
3444 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3445 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3448 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3454 status = MGMT_STATUS_FAILED;
3456 status = MGMT_STATUS_SUCCESS;
3458 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3459 status, &cp->addr, sizeof(cp->addr));
3461 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3462 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3463 MGMT_STATUS_INVALID_PARAMS);
3466 hci_dev_unlock(hdev);
3470 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3471 void *data, u16 len)
3473 struct mgmt_cp_remove_remote_oob_data *cp = data;
3477 BT_DBG("%s", hdev->name);
3481 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3483 status = MGMT_STATUS_INVALID_PARAMS;
3485 status = MGMT_STATUS_SUCCESS;
3487 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3488 status, &cp->addr, sizeof(cp->addr));
3490 hci_dev_unlock(hdev);
3494 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3496 struct pending_cmd *cmd;
3500 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3502 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3506 type = hdev->discovery.type;
3508 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3509 &type, sizeof(type));
3510 mgmt_pending_remove(cmd);
3515 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3517 unsigned long timeout = 0;
3519 BT_DBG("status %d", status);
3523 mgmt_start_discovery_failed(hdev, status);
3524 hci_dev_unlock(hdev);
3529 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3530 hci_dev_unlock(hdev);
3532 switch (hdev->discovery.type) {
3533 case DISCOV_TYPE_LE:
3534 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3537 case DISCOV_TYPE_INTERLEAVED:
3538 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3541 case DISCOV_TYPE_BREDR:
3545 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3551 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3554 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3555 void *data, u16 len)
3557 struct mgmt_cp_start_discovery *cp = data;
3558 struct pending_cmd *cmd;
3559 struct hci_cp_le_set_scan_param param_cp;
3560 struct hci_cp_le_set_scan_enable enable_cp;
3561 struct hci_cp_inquiry inq_cp;
3562 struct hci_request req;
3563 /* General inquiry access code (GIAC) */
3564 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3565 u8 status, own_addr_type;
3568 BT_DBG("%s", hdev->name);
3572 if (!hdev_is_powered(hdev)) {
3573 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3574 MGMT_STATUS_NOT_POWERED);
3578 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3579 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3584 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3585 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3590 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3596 hdev->discovery.type = cp->type;
3598 hci_req_init(&req, hdev);
3600 switch (hdev->discovery.type) {
3601 case DISCOV_TYPE_BREDR:
3602 status = mgmt_bredr_support(hdev);
3604 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3606 mgmt_pending_remove(cmd);
3610 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3611 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3613 mgmt_pending_remove(cmd);
3617 hci_inquiry_cache_flush(hdev);
3619 memset(&inq_cp, 0, sizeof(inq_cp));
3620 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3621 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3622 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3625 case DISCOV_TYPE_LE:
3626 case DISCOV_TYPE_INTERLEAVED:
3627 status = mgmt_le_support(hdev);
3629 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3631 mgmt_pending_remove(cmd);
3635 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3636 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3637 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3638 MGMT_STATUS_NOT_SUPPORTED);
3639 mgmt_pending_remove(cmd);
3643 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3644 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3645 MGMT_STATUS_REJECTED);
3646 mgmt_pending_remove(cmd);
3650 /* If controller is scanning, it means the background scanning
3651 * is running. Thus, we should temporarily stop it in order to
3652 * set the discovery scanning parameters.
3654 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3655 hci_req_add_le_scan_disable(&req);
3657 memset(¶m_cp, 0, sizeof(param_cp));
3659 /* All active scans will be done with either a resolvable
3660 * private address (when privacy feature has been enabled)
3661 * or unresolvable private address.
3663 err = hci_update_random_address(&req, true, &own_addr_type);
3665 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3666 MGMT_STATUS_FAILED);
3667 mgmt_pending_remove(cmd);
3671 param_cp.type = LE_SCAN_ACTIVE;
3672 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3673 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3674 param_cp.own_address_type = own_addr_type;
3675 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3678 memset(&enable_cp, 0, sizeof(enable_cp));
3679 enable_cp.enable = LE_SCAN_ENABLE;
3680 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3681 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3686 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3687 MGMT_STATUS_INVALID_PARAMS);
3688 mgmt_pending_remove(cmd);
3692 err = hci_req_run(&req, start_discovery_complete);
3694 mgmt_pending_remove(cmd);
3696 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3699 hci_dev_unlock(hdev);
3703 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3705 struct pending_cmd *cmd;
3708 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3712 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3713 &hdev->discovery.type, sizeof(hdev->discovery.type));
3714 mgmt_pending_remove(cmd);
3719 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3721 BT_DBG("status %d", status);
3726 mgmt_stop_discovery_failed(hdev, status);
3730 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3733 hci_dev_unlock(hdev);
3736 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3739 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3740 struct pending_cmd *cmd;
3741 struct hci_request req;
3744 BT_DBG("%s", hdev->name);
3748 if (!hci_discovery_active(hdev)) {
3749 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3750 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3751 sizeof(mgmt_cp->type));
3755 if (hdev->discovery.type != mgmt_cp->type) {
3756 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3757 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3758 sizeof(mgmt_cp->type));
3762 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3768 hci_req_init(&req, hdev);
3770 hci_stop_discovery(&req);
3772 err = hci_req_run(&req, stop_discovery_complete);
3774 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3778 mgmt_pending_remove(cmd);
3780 /* If no HCI commands were sent we're done */
3781 if (err == -ENODATA) {
3782 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3783 &mgmt_cp->type, sizeof(mgmt_cp->type));
3784 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3788 hci_dev_unlock(hdev);
3792 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3795 struct mgmt_cp_confirm_name *cp = data;
3796 struct inquiry_entry *e;
3799 BT_DBG("%s", hdev->name);
3803 if (!hci_discovery_active(hdev)) {
3804 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3805 MGMT_STATUS_FAILED, &cp->addr,
3810 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3812 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3813 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3818 if (cp->name_known) {
3819 e->name_state = NAME_KNOWN;
3822 e->name_state = NAME_NEEDED;
3823 hci_inquiry_cache_update_resolve(hdev, e);
3826 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3830 hci_dev_unlock(hdev);
3834 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3837 struct mgmt_cp_block_device *cp = data;
3841 BT_DBG("%s", hdev->name);
3843 if (!bdaddr_type_is_valid(cp->addr.type))
3844 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3845 MGMT_STATUS_INVALID_PARAMS,
3846 &cp->addr, sizeof(cp->addr));
3850 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3852 status = MGMT_STATUS_FAILED;
3856 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3858 status = MGMT_STATUS_SUCCESS;
3861 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3862 &cp->addr, sizeof(cp->addr));
3864 hci_dev_unlock(hdev);
3869 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3872 struct mgmt_cp_unblock_device *cp = data;
3876 BT_DBG("%s", hdev->name);
3878 if (!bdaddr_type_is_valid(cp->addr.type))
3879 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3880 MGMT_STATUS_INVALID_PARAMS,
3881 &cp->addr, sizeof(cp->addr));
3885 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3887 status = MGMT_STATUS_INVALID_PARAMS;
3891 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3893 status = MGMT_STATUS_SUCCESS;
3896 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3897 &cp->addr, sizeof(cp->addr));
3899 hci_dev_unlock(hdev);
3904 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3907 struct mgmt_cp_set_device_id *cp = data;
3908 struct hci_request req;
3912 BT_DBG("%s", hdev->name);
3914 source = __le16_to_cpu(cp->source);
3916 if (source > 0x0002)
3917 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3918 MGMT_STATUS_INVALID_PARAMS);
3922 hdev->devid_source = source;
3923 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3924 hdev->devid_product = __le16_to_cpu(cp->product);
3925 hdev->devid_version = __le16_to_cpu(cp->version);
3927 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3929 hci_req_init(&req, hdev);
3931 hci_req_run(&req, NULL);
3933 hci_dev_unlock(hdev);
3938 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3940 struct cmd_lookup match = { NULL, hdev };
3943 u8 mgmt_err = mgmt_status(status);
3945 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3946 cmd_status_rsp, &mgmt_err);
3950 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3953 new_settings(hdev, match.sk);
3959 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3962 struct mgmt_mode *cp = data;
3963 struct pending_cmd *cmd;
3964 struct hci_request req;
3965 u8 val, enabled, status;
3968 BT_DBG("request for %s", hdev->name);
3970 status = mgmt_le_support(hdev);
3972 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3975 if (cp->val != 0x00 && cp->val != 0x01)
3976 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3977 MGMT_STATUS_INVALID_PARAMS);
3982 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3984 /* The following conditions are ones which mean that we should
3985 * not do any HCI communication but directly send a mgmt
3986 * response to user space (after toggling the flag if
3989 if (!hdev_is_powered(hdev) || val == enabled ||
3990 hci_conn_num(hdev, LE_LINK) > 0) {
3991 bool changed = false;
3993 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3994 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3998 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4003 err = new_settings(hdev, sk);
4008 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4009 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4010 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4015 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4021 hci_req_init(&req, hdev);
4024 enable_advertising(&req);
4026 disable_advertising(&req);
4028 err = hci_req_run(&req, set_advertising_complete);
4030 mgmt_pending_remove(cmd);
4033 hci_dev_unlock(hdev);
4037 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4038 void *data, u16 len)
4040 struct mgmt_cp_set_static_address *cp = data;
4043 BT_DBG("%s", hdev->name);
4045 if (!lmp_le_capable(hdev))
4046 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4047 MGMT_STATUS_NOT_SUPPORTED);
4049 if (hdev_is_powered(hdev))
4050 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4051 MGMT_STATUS_REJECTED);
4053 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4054 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4055 return cmd_status(sk, hdev->id,
4056 MGMT_OP_SET_STATIC_ADDRESS,
4057 MGMT_STATUS_INVALID_PARAMS);
4059 /* Two most significant bits shall be set */
4060 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4061 return cmd_status(sk, hdev->id,
4062 MGMT_OP_SET_STATIC_ADDRESS,
4063 MGMT_STATUS_INVALID_PARAMS);
4068 bacpy(&hdev->static_addr, &cp->bdaddr);
4070 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4072 hci_dev_unlock(hdev);
4077 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4078 void *data, u16 len)
4080 struct mgmt_cp_set_scan_params *cp = data;
4081 __u16 interval, window;
4084 BT_DBG("%s", hdev->name);
4086 if (!lmp_le_capable(hdev))
4087 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4088 MGMT_STATUS_NOT_SUPPORTED);
4090 interval = __le16_to_cpu(cp->interval);
4092 if (interval < 0x0004 || interval > 0x4000)
4093 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4094 MGMT_STATUS_INVALID_PARAMS);
4096 window = __le16_to_cpu(cp->window);
4098 if (window < 0x0004 || window > 0x4000)
4099 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4100 MGMT_STATUS_INVALID_PARAMS);
4102 if (window > interval)
4103 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4104 MGMT_STATUS_INVALID_PARAMS);
4108 hdev->le_scan_interval = interval;
4109 hdev->le_scan_window = window;
4111 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4113 /* If background scan is running, restart it so new parameters are
4116 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4117 hdev->discovery.state == DISCOVERY_STOPPED) {
4118 struct hci_request req;
4120 hci_req_init(&req, hdev);
4122 hci_req_add_le_scan_disable(&req);
4123 hci_req_add_le_passive_scan(&req);
4125 hci_req_run(&req, NULL);
4128 hci_dev_unlock(hdev);
4133 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4135 struct pending_cmd *cmd;
4137 BT_DBG("status 0x%02x", status);
4141 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4146 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4147 mgmt_status(status));
4149 struct mgmt_mode *cp = cmd->param;
4152 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4154 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4156 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4157 new_settings(hdev, cmd->sk);
4160 mgmt_pending_remove(cmd);
4163 hci_dev_unlock(hdev);
4166 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4167 void *data, u16 len)
4169 struct mgmt_mode *cp = data;
4170 struct pending_cmd *cmd;
4171 struct hci_request req;
4174 BT_DBG("%s", hdev->name);
4176 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4177 hdev->hci_ver < BLUETOOTH_VER_1_2)
4178 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4179 MGMT_STATUS_NOT_SUPPORTED);
4181 if (cp->val != 0x00 && cp->val != 0x01)
4182 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4183 MGMT_STATUS_INVALID_PARAMS);
4185 if (!hdev_is_powered(hdev))
4186 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4187 MGMT_STATUS_NOT_POWERED);
4189 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4190 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4191 MGMT_STATUS_REJECTED);
4195 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4196 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4201 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4202 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4207 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4214 hci_req_init(&req, hdev);
4216 write_fast_connectable(&req, cp->val);
4218 err = hci_req_run(&req, fast_connectable_complete);
4220 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4221 MGMT_STATUS_FAILED);
4222 mgmt_pending_remove(cmd);
4226 hci_dev_unlock(hdev);
4231 static void set_bredr_scan(struct hci_request *req)
4233 struct hci_dev *hdev = req->hdev;
4236 /* Ensure that fast connectable is disabled. This function will
4237 * not do anything if the page scan parameters are already what
4240 write_fast_connectable(req, false);
4242 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4244 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4245 scan |= SCAN_INQUIRY;
4248 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4251 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4253 struct pending_cmd *cmd;
4255 BT_DBG("status 0x%02x", status);
4259 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4264 u8 mgmt_err = mgmt_status(status);
4266 /* We need to restore the flag if related HCI commands
4269 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4271 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4273 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4274 new_settings(hdev, cmd->sk);
4277 mgmt_pending_remove(cmd);
4280 hci_dev_unlock(hdev);
4283 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4285 struct mgmt_mode *cp = data;
4286 struct pending_cmd *cmd;
4287 struct hci_request req;
4290 BT_DBG("request for %s", hdev->name);
4292 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4293 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4294 MGMT_STATUS_NOT_SUPPORTED);
4296 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4297 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4298 MGMT_STATUS_REJECTED);
4300 if (cp->val != 0x00 && cp->val != 0x01)
4301 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4302 MGMT_STATUS_INVALID_PARAMS);
4306 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4307 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4311 if (!hdev_is_powered(hdev)) {
4313 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4314 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4315 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4316 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4317 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4320 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4322 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4326 err = new_settings(hdev, sk);
4330 /* Reject disabling when powered on */
4332 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4333 MGMT_STATUS_REJECTED);
4337 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4338 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4343 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4349 /* We need to flip the bit already here so that update_adv_data
4350 * generates the correct flags.
4352 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4354 hci_req_init(&req, hdev);
4356 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4357 set_bredr_scan(&req);
4359 /* Since only the advertising data flags will change, there
4360 * is no need to update the scan response data.
4362 update_adv_data(&req);
4364 err = hci_req_run(&req, set_bredr_complete);
4366 mgmt_pending_remove(cmd);
4369 hci_dev_unlock(hdev);
4373 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4374 void *data, u16 len)
4376 struct mgmt_mode *cp = data;
4377 struct pending_cmd *cmd;
4381 BT_DBG("request for %s", hdev->name);
4383 status = mgmt_bredr_support(hdev);
4385 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4388 if (!lmp_sc_capable(hdev) &&
4389 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4390 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4391 MGMT_STATUS_NOT_SUPPORTED);
4393 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4394 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4395 MGMT_STATUS_INVALID_PARAMS);
4399 if (!hdev_is_powered(hdev)) {
4403 changed = !test_and_set_bit(HCI_SC_ENABLED,
4405 if (cp->val == 0x02)
4406 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4408 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4410 changed = test_and_clear_bit(HCI_SC_ENABLED,
4412 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4415 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4420 err = new_settings(hdev, sk);
4425 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4426 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4433 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4434 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4435 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4439 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4445 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4447 mgmt_pending_remove(cmd);
4451 if (cp->val == 0x02)
4452 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4454 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4457 hci_dev_unlock(hdev);
4461 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4462 void *data, u16 len)
4464 struct mgmt_mode *cp = data;
4465 bool changed, use_changed;
4468 BT_DBG("request for %s", hdev->name);
4470 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4471 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4472 MGMT_STATUS_INVALID_PARAMS);
4477 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4480 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4483 if (cp->val == 0x02)
4484 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4487 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4490 if (hdev_is_powered(hdev) && use_changed &&
4491 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4492 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4493 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4494 sizeof(mode), &mode);
4497 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4502 err = new_settings(hdev, sk);
4505 hci_dev_unlock(hdev);
4509 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4512 struct mgmt_cp_set_privacy *cp = cp_data;
4516 BT_DBG("request for %s", hdev->name);
4518 if (!lmp_le_capable(hdev))
4519 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4520 MGMT_STATUS_NOT_SUPPORTED);
4522 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4523 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4524 MGMT_STATUS_INVALID_PARAMS);
4526 if (hdev_is_powered(hdev))
4527 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4528 MGMT_STATUS_REJECTED);
4532 /* If user space supports this command it is also expected to
4533 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4535 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4538 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4539 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4540 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4542 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4543 memset(hdev->irk, 0, sizeof(hdev->irk));
4544 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4547 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4552 err = new_settings(hdev, sk);
4555 hci_dev_unlock(hdev);
4559 static bool irk_is_valid(struct mgmt_irk_info *irk)
4561 switch (irk->addr.type) {
4562 case BDADDR_LE_PUBLIC:
4565 case BDADDR_LE_RANDOM:
4566 /* Two most significant bits shall be set */
4567 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4575 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4578 struct mgmt_cp_load_irks *cp = cp_data;
4579 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4580 sizeof(struct mgmt_irk_info));
4581 u16 irk_count, expected_len;
4584 BT_DBG("request for %s", hdev->name);
4586 if (!lmp_le_capable(hdev))
4587 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4588 MGMT_STATUS_NOT_SUPPORTED);
4590 irk_count = __le16_to_cpu(cp->irk_count);
4591 if (irk_count > max_irk_count) {
4592 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4593 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4594 MGMT_STATUS_INVALID_PARAMS);
4597 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4598 if (expected_len != len) {
4599 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4601 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4602 MGMT_STATUS_INVALID_PARAMS);
4605 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4607 for (i = 0; i < irk_count; i++) {
4608 struct mgmt_irk_info *key = &cp->irks[i];
4610 if (!irk_is_valid(key))
4611 return cmd_status(sk, hdev->id,
4613 MGMT_STATUS_INVALID_PARAMS);
4618 hci_smp_irks_clear(hdev);
4620 for (i = 0; i < irk_count; i++) {
4621 struct mgmt_irk_info *irk = &cp->irks[i];
4624 if (irk->addr.type == BDADDR_LE_PUBLIC)
4625 addr_type = ADDR_LE_DEV_PUBLIC;
4627 addr_type = ADDR_LE_DEV_RANDOM;
4629 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4633 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4635 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4637 hci_dev_unlock(hdev);
4642 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4644 if (key->master != 0x00 && key->master != 0x01)
4647 switch (key->addr.type) {
4648 case BDADDR_LE_PUBLIC:
4651 case BDADDR_LE_RANDOM:
4652 /* Two most significant bits shall be set */
4653 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4661 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4662 void *cp_data, u16 len)
4664 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4665 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4666 sizeof(struct mgmt_ltk_info));
4667 u16 key_count, expected_len;
4670 BT_DBG("request for %s", hdev->name);
4672 if (!lmp_le_capable(hdev))
4673 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4674 MGMT_STATUS_NOT_SUPPORTED);
4676 key_count = __le16_to_cpu(cp->key_count);
4677 if (key_count > max_key_count) {
4678 BT_ERR("load_ltks: too big key_count value %u", key_count);
4679 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4680 MGMT_STATUS_INVALID_PARAMS);
4683 expected_len = sizeof(*cp) + key_count *
4684 sizeof(struct mgmt_ltk_info);
4685 if (expected_len != len) {
4686 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4688 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4689 MGMT_STATUS_INVALID_PARAMS);
4692 BT_DBG("%s key_count %u", hdev->name, key_count);
4694 for (i = 0; i < key_count; i++) {
4695 struct mgmt_ltk_info *key = &cp->keys[i];
4697 if (!ltk_is_valid(key))
4698 return cmd_status(sk, hdev->id,
4699 MGMT_OP_LOAD_LONG_TERM_KEYS,
4700 MGMT_STATUS_INVALID_PARAMS);
4705 hci_smp_ltks_clear(hdev);
4707 for (i = 0; i < key_count; i++) {
4708 struct mgmt_ltk_info *key = &cp->keys[i];
4709 u8 type, addr_type, authenticated;
4711 if (key->addr.type == BDADDR_LE_PUBLIC)
4712 addr_type = ADDR_LE_DEV_PUBLIC;
4714 addr_type = ADDR_LE_DEV_RANDOM;
4719 type = SMP_LTK_SLAVE;
4721 switch (key->type) {
4722 case MGMT_LTK_UNAUTHENTICATED:
4723 authenticated = 0x00;
4725 case MGMT_LTK_AUTHENTICATED:
4726 authenticated = 0x01;
4732 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4733 authenticated, key->val, key->enc_size, key->ediv,
4737 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4740 hci_dev_unlock(hdev);
4745 struct cmd_conn_lookup {
4746 struct hci_conn *conn;
4747 bool valid_tx_power;
4751 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4753 struct cmd_conn_lookup *match = data;
4754 struct mgmt_cp_get_conn_info *cp;
4755 struct mgmt_rp_get_conn_info rp;
4756 struct hci_conn *conn = cmd->user_data;
4758 if (conn != match->conn)
4761 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4763 memset(&rp, 0, sizeof(rp));
4764 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4765 rp.addr.type = cp->addr.type;
4767 if (!match->mgmt_status) {
4768 rp.rssi = conn->rssi;
4770 if (match->valid_tx_power) {
4771 rp.tx_power = conn->tx_power;
4772 rp.max_tx_power = conn->max_tx_power;
4774 rp.tx_power = HCI_TX_POWER_INVALID;
4775 rp.max_tx_power = HCI_TX_POWER_INVALID;
4779 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4780 match->mgmt_status, &rp, sizeof(rp));
4782 hci_conn_drop(conn);
4784 mgmt_pending_remove(cmd);
4787 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4789 struct hci_cp_read_rssi *cp;
4790 struct hci_conn *conn;
4791 struct cmd_conn_lookup match;
4794 BT_DBG("status 0x%02x", status);
4798 /* TX power data is valid in case request completed successfully,
4799 * otherwise we assume it's not valid. At the moment we assume that
4800 * either both or none of current and max values are valid to keep code
4803 match.valid_tx_power = !status;
4805 /* Commands sent in request are either Read RSSI or Read Transmit Power
4806 * Level so we check which one was last sent to retrieve connection
4807 * handle. Both commands have handle as first parameter so it's safe to
4808 * cast data on the same command struct.
4810 * First command sent is always Read RSSI and we fail only if it fails.
4811 * In other case we simply override error to indicate success as we
4812 * already remembered if TX power value is actually valid.
4814 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4816 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4821 BT_ERR("invalid sent_cmd in response");
4825 handle = __le16_to_cpu(cp->handle);
4826 conn = hci_conn_hash_lookup_handle(hdev, handle);
4828 BT_ERR("unknown handle (%d) in response", handle);
4833 match.mgmt_status = mgmt_status(status);
4835 /* Cache refresh is complete, now reply for mgmt request for given
4838 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4839 get_conn_info_complete, &match);
4842 hci_dev_unlock(hdev);
4845 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4848 struct mgmt_cp_get_conn_info *cp = data;
4849 struct mgmt_rp_get_conn_info rp;
4850 struct hci_conn *conn;
4851 unsigned long conn_info_age;
4854 BT_DBG("%s", hdev->name);
4856 memset(&rp, 0, sizeof(rp));
4857 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4858 rp.addr.type = cp->addr.type;
4860 if (!bdaddr_type_is_valid(cp->addr.type))
4861 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4862 MGMT_STATUS_INVALID_PARAMS,
4867 if (!hdev_is_powered(hdev)) {
4868 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4869 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4873 if (cp->addr.type == BDADDR_BREDR)
4874 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4877 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4879 if (!conn || conn->state != BT_CONNECTED) {
4880 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4881 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4885 /* To avoid client trying to guess when to poll again for information we
4886 * calculate conn info age as random value between min/max set in hdev.
4888 conn_info_age = hdev->conn_info_min_age +
4889 prandom_u32_max(hdev->conn_info_max_age -
4890 hdev->conn_info_min_age);
4892 /* Query controller to refresh cached values if they are too old or were
4895 if (time_after(jiffies, conn->conn_info_timestamp +
4896 msecs_to_jiffies(conn_info_age)) ||
4897 !conn->conn_info_timestamp) {
4898 struct hci_request req;
4899 struct hci_cp_read_tx_power req_txp_cp;
4900 struct hci_cp_read_rssi req_rssi_cp;
4901 struct pending_cmd *cmd;
4903 hci_req_init(&req, hdev);
4904 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4905 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4908 /* For LE links TX power does not change thus we don't need to
4909 * query for it once value is known.
4911 if (!bdaddr_type_is_le(cp->addr.type) ||
4912 conn->tx_power == HCI_TX_POWER_INVALID) {
4913 req_txp_cp.handle = cpu_to_le16(conn->handle);
4914 req_txp_cp.type = 0x00;
4915 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4916 sizeof(req_txp_cp), &req_txp_cp);
4919 /* Max TX power needs to be read only once per connection */
4920 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4921 req_txp_cp.handle = cpu_to_le16(conn->handle);
4922 req_txp_cp.type = 0x01;
4923 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4924 sizeof(req_txp_cp), &req_txp_cp);
4927 err = hci_req_run(&req, conn_info_refresh_complete);
4931 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4938 hci_conn_hold(conn);
4939 cmd->user_data = conn;
4941 conn->conn_info_timestamp = jiffies;
4943 /* Cache is valid, just reply with values cached in hci_conn */
4944 rp.rssi = conn->rssi;
4945 rp.tx_power = conn->tx_power;
4946 rp.max_tx_power = conn->max_tx_power;
4948 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4949 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4953 hci_dev_unlock(hdev);
4957 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
4959 struct mgmt_cp_get_clock_info *cp;
4960 struct mgmt_rp_get_clock_info rp;
4961 struct hci_cp_read_clock *hci_cp;
4962 struct pending_cmd *cmd;
4963 struct hci_conn *conn;
4965 BT_DBG("%s status %u", hdev->name, status);
4969 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
4973 if (hci_cp->which) {
4974 u16 handle = __le16_to_cpu(hci_cp->handle);
4975 conn = hci_conn_hash_lookup_handle(hdev, handle);
4980 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
4986 memset(&rp, 0, sizeof(rp));
4987 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
4992 rp.local_clock = cpu_to_le32(hdev->clock);
4995 rp.piconet_clock = cpu_to_le32(conn->clock);
4996 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5000 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5002 mgmt_pending_remove(cmd);
5004 hci_conn_drop(conn);
5007 hci_dev_unlock(hdev);
5010 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5013 struct mgmt_cp_get_clock_info *cp = data;
5014 struct mgmt_rp_get_clock_info rp;
5015 struct hci_cp_read_clock hci_cp;
5016 struct pending_cmd *cmd;
5017 struct hci_request req;
5018 struct hci_conn *conn;
5021 BT_DBG("%s", hdev->name);
5023 memset(&rp, 0, sizeof(rp));
5024 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5025 rp.addr.type = cp->addr.type;
5027 if (cp->addr.type != BDADDR_BREDR)
5028 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5029 MGMT_STATUS_INVALID_PARAMS,
5034 if (!hdev_is_powered(hdev)) {
5035 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5036 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5040 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5041 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5043 if (!conn || conn->state != BT_CONNECTED) {
5044 err = cmd_complete(sk, hdev->id,
5045 MGMT_OP_GET_CLOCK_INFO,
5046 MGMT_STATUS_NOT_CONNECTED,
5054 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5060 hci_req_init(&req, hdev);
5062 memset(&hci_cp, 0, sizeof(hci_cp));
5063 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5066 hci_conn_hold(conn);
5067 cmd->user_data = conn;
5069 hci_cp.handle = cpu_to_le16(conn->handle);
5070 hci_cp.which = 0x01; /* Piconet clock */
5071 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5074 err = hci_req_run(&req, get_clock_info_complete);
5076 mgmt_pending_remove(cmd);
5079 hci_dev_unlock(hdev);
5083 static void device_added(struct sock *sk, struct hci_dev *hdev,
5084 bdaddr_t *bdaddr, u8 type, u8 action)
5086 struct mgmt_ev_device_added ev;
5088 bacpy(&ev.addr.bdaddr, bdaddr);
5089 ev.addr.type = type;
5092 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5095 static int add_device(struct sock *sk, struct hci_dev *hdev,
5096 void *data, u16 len)
5098 struct mgmt_cp_add_device *cp = data;
5099 u8 auto_conn, addr_type;
5102 BT_DBG("%s", hdev->name);
5104 if (!bdaddr_type_is_le(cp->addr.type) ||
5105 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5106 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5107 MGMT_STATUS_INVALID_PARAMS,
5108 &cp->addr, sizeof(cp->addr));
5110 if (cp->action != 0x00 && cp->action != 0x01)
5111 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5112 MGMT_STATUS_INVALID_PARAMS,
5113 &cp->addr, sizeof(cp->addr));
5117 if (cp->addr.type == BDADDR_LE_PUBLIC)
5118 addr_type = ADDR_LE_DEV_PUBLIC;
5120 addr_type = ADDR_LE_DEV_RANDOM;
5123 auto_conn = HCI_AUTO_CONN_ALWAYS;
5125 auto_conn = HCI_AUTO_CONN_REPORT;
5127 /* If the connection parameters don't exist for this device,
5128 * they will be created and configured with defaults.
5130 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5132 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5134 &cp->addr, sizeof(cp->addr));
5138 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5140 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5141 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5144 hci_dev_unlock(hdev);
5148 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5149 bdaddr_t *bdaddr, u8 type)
5151 struct mgmt_ev_device_removed ev;
5153 bacpy(&ev.addr.bdaddr, bdaddr);
5154 ev.addr.type = type;
5156 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5159 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5160 void *data, u16 len)
5162 struct mgmt_cp_remove_device *cp = data;
5165 BT_DBG("%s", hdev->name);
5169 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5170 struct hci_conn_params *params;
5173 if (!bdaddr_type_is_le(cp->addr.type)) {
5174 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5175 MGMT_STATUS_INVALID_PARAMS,
5176 &cp->addr, sizeof(cp->addr));
5180 if (cp->addr.type == BDADDR_LE_PUBLIC)
5181 addr_type = ADDR_LE_DEV_PUBLIC;
5183 addr_type = ADDR_LE_DEV_RANDOM;
5185 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5188 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5189 MGMT_STATUS_INVALID_PARAMS,
5190 &cp->addr, sizeof(cp->addr));
5194 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5195 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5196 MGMT_STATUS_INVALID_PARAMS,
5197 &cp->addr, sizeof(cp->addr));
5201 hci_pend_le_conn_del(hdev, &cp->addr.bdaddr, addr_type);
5202 list_del(¶ms->list);
5205 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5207 if (cp->addr.type) {
5208 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5209 MGMT_STATUS_INVALID_PARAMS,
5210 &cp->addr, sizeof(cp->addr));
5214 hci_conn_params_clear_enabled(hdev);
5217 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5218 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5221 hci_dev_unlock(hdev);
5225 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5228 struct mgmt_cp_load_conn_param *cp = data;
5229 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5230 sizeof(struct mgmt_conn_param));
5231 u16 param_count, expected_len;
5234 if (!lmp_le_capable(hdev))
5235 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5236 MGMT_STATUS_NOT_SUPPORTED);
5238 param_count = __le16_to_cpu(cp->param_count);
5239 if (param_count > max_param_count) {
5240 BT_ERR("load_conn_param: too big param_count value %u",
5242 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5243 MGMT_STATUS_INVALID_PARAMS);
5246 expected_len = sizeof(*cp) + param_count *
5247 sizeof(struct mgmt_conn_param);
5248 if (expected_len != len) {
5249 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5251 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5252 MGMT_STATUS_INVALID_PARAMS);
5255 BT_DBG("%s param_count %u", hdev->name, param_count);
5259 hci_conn_params_clear_disabled(hdev);
5261 for (i = 0; i < param_count; i++) {
5262 struct mgmt_conn_param *param = &cp->params[i];
5263 struct hci_conn_params *hci_param;
5264 u16 min, max, latency, timeout;
5267 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5270 if (param->addr.type == BDADDR_LE_PUBLIC) {
5271 addr_type = ADDR_LE_DEV_PUBLIC;
5272 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5273 addr_type = ADDR_LE_DEV_RANDOM;
5275 BT_ERR("Ignoring invalid connection parameters");
5279 min = le16_to_cpu(param->min_interval);
5280 max = le16_to_cpu(param->max_interval);
5281 latency = le16_to_cpu(param->latency);
5282 timeout = le16_to_cpu(param->timeout);
5284 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5285 min, max, latency, timeout);
5287 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5288 BT_ERR("Ignoring invalid connection parameters");
5292 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5295 BT_ERR("Failed to add connection parameters");
5299 hci_param->conn_min_interval = min;
5300 hci_param->conn_max_interval = max;
5301 hci_param->conn_latency = latency;
5302 hci_param->supervision_timeout = timeout;
5305 hci_dev_unlock(hdev);
5307 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5310 static const struct mgmt_handler {
5311 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5315 } mgmt_handlers[] = {
5316 { NULL }, /* 0x0000 (no command) */
5317 { read_version, false, MGMT_READ_VERSION_SIZE },
5318 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5319 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5320 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5321 { set_powered, false, MGMT_SETTING_SIZE },
5322 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5323 { set_connectable, false, MGMT_SETTING_SIZE },
5324 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5325 { set_pairable, false, MGMT_SETTING_SIZE },
5326 { set_link_security, false, MGMT_SETTING_SIZE },
5327 { set_ssp, false, MGMT_SETTING_SIZE },
5328 { set_hs, false, MGMT_SETTING_SIZE },
5329 { set_le, false, MGMT_SETTING_SIZE },
5330 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5331 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5332 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5333 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5334 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5335 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5336 { disconnect, false, MGMT_DISCONNECT_SIZE },
5337 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5338 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5339 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5340 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5341 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5342 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5343 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5344 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5345 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5346 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5347 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5348 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5349 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5350 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5351 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5352 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5353 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5354 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5355 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5356 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5357 { set_advertising, false, MGMT_SETTING_SIZE },
5358 { set_bredr, false, MGMT_SETTING_SIZE },
5359 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5360 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5361 { set_secure_conn, false, MGMT_SETTING_SIZE },
5362 { set_debug_keys, false, MGMT_SETTING_SIZE },
5363 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5364 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5365 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5366 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5367 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5368 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5369 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5370 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5373 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5377 struct mgmt_hdr *hdr;
5378 u16 opcode, index, len;
5379 struct hci_dev *hdev = NULL;
5380 const struct mgmt_handler *handler;
5383 BT_DBG("got %zu bytes", msglen);
5385 if (msglen < sizeof(*hdr))
5388 buf = kmalloc(msglen, GFP_KERNEL);
5392 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5398 opcode = __le16_to_cpu(hdr->opcode);
5399 index = __le16_to_cpu(hdr->index);
5400 len = __le16_to_cpu(hdr->len);
5402 if (len != msglen - sizeof(*hdr)) {
5407 if (index != MGMT_INDEX_NONE) {
5408 hdev = hci_dev_get(index);
5410 err = cmd_status(sk, index, opcode,
5411 MGMT_STATUS_INVALID_INDEX);
5415 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5416 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
5417 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5418 err = cmd_status(sk, index, opcode,
5419 MGMT_STATUS_INVALID_INDEX);
5424 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5425 mgmt_handlers[opcode].func == NULL) {
5426 BT_DBG("Unknown op %u", opcode);
5427 err = cmd_status(sk, index, opcode,
5428 MGMT_STATUS_UNKNOWN_COMMAND);
5432 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5433 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5434 err = cmd_status(sk, index, opcode,
5435 MGMT_STATUS_INVALID_INDEX);
5439 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5440 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5441 err = cmd_status(sk, index, opcode,
5442 MGMT_STATUS_INVALID_INDEX);
5446 handler = &mgmt_handlers[opcode];
5448 if ((handler->var_len && len < handler->data_len) ||
5449 (!handler->var_len && len != handler->data_len)) {
5450 err = cmd_status(sk, index, opcode,
5451 MGMT_STATUS_INVALID_PARAMS);
5456 mgmt_init_hdev(sk, hdev);
5458 cp = buf + sizeof(*hdr);
5460 err = handler->func(sk, hdev, cp, len);
5474 void mgmt_index_added(struct hci_dev *hdev)
5476 if (hdev->dev_type != HCI_BREDR)
5479 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5482 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5483 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5485 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5488 void mgmt_index_removed(struct hci_dev *hdev)
5490 u8 status = MGMT_STATUS_INVALID_INDEX;
5492 if (hdev->dev_type != HCI_BREDR)
5495 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5498 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5500 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5501 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5503 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5506 /* This function requires the caller holds hdev->lock */
5507 static void restart_le_auto_conns(struct hci_dev *hdev)
5509 struct hci_conn_params *p;
5512 list_for_each_entry(p, &hdev->le_conn_params, list) {
5513 if (p->auto_connect == HCI_AUTO_CONN_ALWAYS) {
5514 hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
5519 /* Calling hci_pend_le_conn_add will actually already trigger
5520 * background scanning when needed. So no need to trigger it
5521 * just another time.
5523 * This check is here to avoid an unneeded restart of the
5524 * passive scanning. Since this is during the controller
5525 * power up phase the duplicate filtering is not an issue.
5530 hci_update_background_scan(hdev);
5533 static void powered_complete(struct hci_dev *hdev, u8 status)
5535 struct cmd_lookup match = { NULL, hdev };
5537 BT_DBG("status 0x%02x", status);
5541 restart_le_auto_conns(hdev);
5543 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5545 new_settings(hdev, match.sk);
5547 hci_dev_unlock(hdev);
5553 static int powered_update_hci(struct hci_dev *hdev)
5555 struct hci_request req;
5558 hci_req_init(&req, hdev);
5560 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5561 !lmp_host_ssp_capable(hdev)) {
5564 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5567 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5568 lmp_bredr_capable(hdev)) {
5569 struct hci_cp_write_le_host_supported cp;
5572 cp.simul = lmp_le_br_capable(hdev);
5574 /* Check first if we already have the right
5575 * host state (host features set)
5577 if (cp.le != lmp_host_le_capable(hdev) ||
5578 cp.simul != lmp_host_le_br_capable(hdev))
5579 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5583 if (lmp_le_capable(hdev)) {
5584 /* Make sure the controller has a good default for
5585 * advertising data. This also applies to the case
5586 * where BR/EDR was toggled during the AUTO_OFF phase.
5588 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5589 update_adv_data(&req);
5590 update_scan_rsp_data(&req);
5593 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5594 enable_advertising(&req);
5597 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5598 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5599 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5600 sizeof(link_sec), &link_sec);
5602 if (lmp_bredr_capable(hdev)) {
5603 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5604 set_bredr_scan(&req);
5610 return hci_req_run(&req, powered_complete);
5613 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5615 struct cmd_lookup match = { NULL, hdev };
5616 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5617 u8 zero_cod[] = { 0, 0, 0 };
5620 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5624 if (powered_update_hci(hdev) == 0)
5627 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5632 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5633 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5635 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5636 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5637 zero_cod, sizeof(zero_cod), NULL);
5640 err = new_settings(hdev, match.sk);
5648 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5650 struct pending_cmd *cmd;
5653 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5657 if (err == -ERFKILL)
5658 status = MGMT_STATUS_RFKILLED;
5660 status = MGMT_STATUS_FAILED;
5662 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5664 mgmt_pending_remove(cmd);
5667 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5669 struct hci_request req;
5673 /* When discoverable timeout triggers, then just make sure
5674 * the limited discoverable flag is cleared. Even in the case
5675 * of a timeout triggered from general discoverable, it is
5676 * safe to unconditionally clear the flag.
5678 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5679 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5681 hci_req_init(&req, hdev);
5682 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5683 u8 scan = SCAN_PAGE;
5684 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5685 sizeof(scan), &scan);
5688 update_adv_data(&req);
5689 hci_req_run(&req, NULL);
5691 hdev->discov_timeout = 0;
5693 new_settings(hdev, NULL);
5695 hci_dev_unlock(hdev);
5698 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5702 /* Nothing needed here if there's a pending command since that
5703 * commands request completion callback takes care of everything
5706 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5709 /* Powering off may clear the scan mode - don't let that interfere */
5710 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5714 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5716 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5717 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5721 struct hci_request req;
5723 /* In case this change in discoverable was triggered by
5724 * a disabling of connectable there could be a need to
5725 * update the advertising flags.
5727 hci_req_init(&req, hdev);
5728 update_adv_data(&req);
5729 hci_req_run(&req, NULL);
5731 new_settings(hdev, NULL);
5735 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5739 /* Nothing needed here if there's a pending command since that
5740 * commands request completion callback takes care of everything
5743 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5746 /* Powering off may clear the scan mode - don't let that interfere */
5747 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5751 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5753 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5756 new_settings(hdev, NULL);
5759 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5761 /* Powering off may stop advertising - don't let that interfere */
5762 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5766 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5768 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5771 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5773 u8 mgmt_err = mgmt_status(status);
5775 if (scan & SCAN_PAGE)
5776 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5777 cmd_status_rsp, &mgmt_err);
5779 if (scan & SCAN_INQUIRY)
5780 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
5781 cmd_status_rsp, &mgmt_err);
5784 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5787 struct mgmt_ev_new_link_key ev;
5789 memset(&ev, 0, sizeof(ev));
5791 ev.store_hint = persistent;
5792 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5793 ev.key.addr.type = BDADDR_BREDR;
5794 ev.key.type = key->type;
5795 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5796 ev.key.pin_len = key->pin_len;
5798 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5801 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
5803 if (ltk->authenticated)
5804 return MGMT_LTK_AUTHENTICATED;
5806 return MGMT_LTK_UNAUTHENTICATED;
5809 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5811 struct mgmt_ev_new_long_term_key ev;
5813 memset(&ev, 0, sizeof(ev));
5815 /* Devices using resolvable or non-resolvable random addresses
5816 * without providing an indentity resolving key don't require
5817 * to store long term keys. Their addresses will change the
5820 * Only when a remote device provides an identity address
5821 * make sure the long term key is stored. If the remote
5822 * identity is known, the long term keys are internally
5823 * mapped to the identity address. So allow static random
5824 * and public addresses here.
5826 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5827 (key->bdaddr.b[5] & 0xc0) != 0xc0)
5828 ev.store_hint = 0x00;
5830 ev.store_hint = persistent;
5832 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5833 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5834 ev.key.type = mgmt_ltk_type(key);
5835 ev.key.enc_size = key->enc_size;
5836 ev.key.ediv = key->ediv;
5837 ev.key.rand = key->rand;
5839 if (key->type == SMP_LTK)
5842 memcpy(ev.key.val, key->val, sizeof(key->val));
5844 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5847 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5849 struct mgmt_ev_new_irk ev;
5851 memset(&ev, 0, sizeof(ev));
5853 /* For identity resolving keys from devices that are already
5854 * using a public address or static random address, do not
5855 * ask for storing this key. The identity resolving key really
5856 * is only mandatory for devices using resovlable random
5859 * Storing all identity resolving keys has the downside that
5860 * they will be also loaded on next boot of they system. More
5861 * identity resolving keys, means more time during scanning is
5862 * needed to actually resolve these addresses.
5864 if (bacmp(&irk->rpa, BDADDR_ANY))
5865 ev.store_hint = 0x01;
5867 ev.store_hint = 0x00;
5869 bacpy(&ev.rpa, &irk->rpa);
5870 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5871 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5872 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5874 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5877 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5880 struct mgmt_ev_new_csrk ev;
5882 memset(&ev, 0, sizeof(ev));
5884 /* Devices using resolvable or non-resolvable random addresses
5885 * without providing an indentity resolving key don't require
5886 * to store signature resolving keys. Their addresses will change
5887 * the next time around.
5889 * Only when a remote device provides an identity address
5890 * make sure the signature resolving key is stored. So allow
5891 * static random and public addresses here.
5893 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5894 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
5895 ev.store_hint = 0x00;
5897 ev.store_hint = persistent;
5899 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
5900 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
5901 ev.key.master = csrk->master;
5902 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
5904 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5907 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
5908 u8 bdaddr_type, u8 store_hint, u16 min_interval,
5909 u16 max_interval, u16 latency, u16 timeout)
5911 struct mgmt_ev_new_conn_param ev;
5913 if (!hci_is_identity_address(bdaddr, bdaddr_type))
5916 memset(&ev, 0, sizeof(ev));
5917 bacpy(&ev.addr.bdaddr, bdaddr);
5918 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
5919 ev.store_hint = store_hint;
5920 ev.min_interval = cpu_to_le16(min_interval);
5921 ev.max_interval = cpu_to_le16(max_interval);
5922 ev.latency = cpu_to_le16(latency);
5923 ev.timeout = cpu_to_le16(timeout);
5925 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
5928 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5931 eir[eir_len++] = sizeof(type) + data_len;
5932 eir[eir_len++] = type;
5933 memcpy(&eir[eir_len], data, data_len);
5934 eir_len += data_len;
5939 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5940 u8 addr_type, u32 flags, u8 *name, u8 name_len,
5944 struct mgmt_ev_device_connected *ev = (void *) buf;
5947 bacpy(&ev->addr.bdaddr, bdaddr);
5948 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5950 ev->flags = __cpu_to_le32(flags);
5953 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5956 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5957 eir_len = eir_append_data(ev->eir, eir_len,
5958 EIR_CLASS_OF_DEV, dev_class, 3);
5960 ev->eir_len = cpu_to_le16(eir_len);
5962 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5963 sizeof(*ev) + eir_len, NULL);
5966 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5968 struct mgmt_cp_disconnect *cp = cmd->param;
5969 struct sock **sk = data;
5970 struct mgmt_rp_disconnect rp;
5972 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5973 rp.addr.type = cp->addr.type;
5975 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5981 mgmt_pending_remove(cmd);
5984 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5986 struct hci_dev *hdev = data;
5987 struct mgmt_cp_unpair_device *cp = cmd->param;
5988 struct mgmt_rp_unpair_device rp;
5990 memset(&rp, 0, sizeof(rp));
5991 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5992 rp.addr.type = cp->addr.type;
5994 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5996 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5998 mgmt_pending_remove(cmd);
6001 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6002 u8 link_type, u8 addr_type, u8 reason,
6003 bool mgmt_connected)
6005 struct mgmt_ev_device_disconnected ev;
6006 struct pending_cmd *power_off;
6007 struct sock *sk = NULL;
6009 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6011 struct mgmt_mode *cp = power_off->param;
6013 /* The connection is still in hci_conn_hash so test for 1
6014 * instead of 0 to know if this is the last one.
6016 if (!cp->val && hci_conn_count(hdev) == 1) {
6017 cancel_delayed_work(&hdev->power_off);
6018 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6022 if (!mgmt_connected)
6025 if (link_type != ACL_LINK && link_type != LE_LINK)
6028 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6030 bacpy(&ev.addr.bdaddr, bdaddr);
6031 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6034 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6039 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6043 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6044 u8 link_type, u8 addr_type, u8 status)
6046 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6047 struct mgmt_cp_disconnect *cp;
6048 struct mgmt_rp_disconnect rp;
6049 struct pending_cmd *cmd;
6051 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6054 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6060 if (bacmp(bdaddr, &cp->addr.bdaddr))
6063 if (cp->addr.type != bdaddr_type)
6066 bacpy(&rp.addr.bdaddr, bdaddr);
6067 rp.addr.type = bdaddr_type;
6069 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
6070 mgmt_status(status), &rp, sizeof(rp));
6072 mgmt_pending_remove(cmd);
6075 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6076 u8 addr_type, u8 status)
6078 struct mgmt_ev_connect_failed ev;
6079 struct pending_cmd *power_off;
6081 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6083 struct mgmt_mode *cp = power_off->param;
6085 /* The connection is still in hci_conn_hash so test for 1
6086 * instead of 0 to know if this is the last one.
6088 if (!cp->val && hci_conn_count(hdev) == 1) {
6089 cancel_delayed_work(&hdev->power_off);
6090 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6094 bacpy(&ev.addr.bdaddr, bdaddr);
6095 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6096 ev.status = mgmt_status(status);
6098 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6101 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6103 struct mgmt_ev_pin_code_request ev;
6105 bacpy(&ev.addr.bdaddr, bdaddr);
6106 ev.addr.type = BDADDR_BREDR;
6109 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6112 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6115 struct pending_cmd *cmd;
6116 struct mgmt_rp_pin_code_reply rp;
6118 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6122 bacpy(&rp.addr.bdaddr, bdaddr);
6123 rp.addr.type = BDADDR_BREDR;
6125 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6126 mgmt_status(status), &rp, sizeof(rp));
6128 mgmt_pending_remove(cmd);
6131 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6134 struct pending_cmd *cmd;
6135 struct mgmt_rp_pin_code_reply rp;
6137 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6141 bacpy(&rp.addr.bdaddr, bdaddr);
6142 rp.addr.type = BDADDR_BREDR;
6144 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6145 mgmt_status(status), &rp, sizeof(rp));
6147 mgmt_pending_remove(cmd);
6150 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6151 u8 link_type, u8 addr_type, u32 value,
6154 struct mgmt_ev_user_confirm_request ev;
6156 BT_DBG("%s", hdev->name);
6158 bacpy(&ev.addr.bdaddr, bdaddr);
6159 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6160 ev.confirm_hint = confirm_hint;
6161 ev.value = cpu_to_le32(value);
6163 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6167 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6168 u8 link_type, u8 addr_type)
6170 struct mgmt_ev_user_passkey_request ev;
6172 BT_DBG("%s", hdev->name);
6174 bacpy(&ev.addr.bdaddr, bdaddr);
6175 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6177 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6181 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6182 u8 link_type, u8 addr_type, u8 status,
6185 struct pending_cmd *cmd;
6186 struct mgmt_rp_user_confirm_reply rp;
6189 cmd = mgmt_pending_find(opcode, hdev);
6193 bacpy(&rp.addr.bdaddr, bdaddr);
6194 rp.addr.type = link_to_bdaddr(link_type, addr_type);
6195 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6198 mgmt_pending_remove(cmd);
6203 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6204 u8 link_type, u8 addr_type, u8 status)
6206 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6207 status, MGMT_OP_USER_CONFIRM_REPLY);
6210 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6211 u8 link_type, u8 addr_type, u8 status)
6213 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6215 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6218 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6219 u8 link_type, u8 addr_type, u8 status)
6221 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6222 status, MGMT_OP_USER_PASSKEY_REPLY);
6225 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6226 u8 link_type, u8 addr_type, u8 status)
6228 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6230 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6233 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6234 u8 link_type, u8 addr_type, u32 passkey,
6237 struct mgmt_ev_passkey_notify ev;
6239 BT_DBG("%s", hdev->name);
6241 bacpy(&ev.addr.bdaddr, bdaddr);
6242 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6243 ev.passkey = __cpu_to_le32(passkey);
6244 ev.entered = entered;
6246 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6249 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6250 u8 addr_type, u8 status)
6252 struct mgmt_ev_auth_failed ev;
6254 bacpy(&ev.addr.bdaddr, bdaddr);
6255 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6256 ev.status = mgmt_status(status);
6258 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
6261 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6263 struct cmd_lookup match = { NULL, hdev };
6267 u8 mgmt_err = mgmt_status(status);
6268 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6269 cmd_status_rsp, &mgmt_err);
6273 if (test_bit(HCI_AUTH, &hdev->flags))
6274 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6277 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6280 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6284 new_settings(hdev, match.sk);
6290 static void clear_eir(struct hci_request *req)
6292 struct hci_dev *hdev = req->hdev;
6293 struct hci_cp_write_eir cp;
6295 if (!lmp_ext_inq_capable(hdev))
6298 memset(hdev->eir, 0, sizeof(hdev->eir));
6300 memset(&cp, 0, sizeof(cp));
6302 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6305 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6307 struct cmd_lookup match = { NULL, hdev };
6308 struct hci_request req;
6309 bool changed = false;
6312 u8 mgmt_err = mgmt_status(status);
6314 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6315 &hdev->dev_flags)) {
6316 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6317 new_settings(hdev, NULL);
6320 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6326 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6328 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6330 changed = test_and_clear_bit(HCI_HS_ENABLED,
6333 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6336 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6339 new_settings(hdev, match.sk);
6344 hci_req_init(&req, hdev);
6346 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6347 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6348 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6349 sizeof(enable), &enable);
6355 hci_req_run(&req, NULL);
6358 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6360 struct cmd_lookup match = { NULL, hdev };
6361 bool changed = false;
6364 u8 mgmt_err = mgmt_status(status);
6367 if (test_and_clear_bit(HCI_SC_ENABLED,
6369 new_settings(hdev, NULL);
6370 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6373 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6374 cmd_status_rsp, &mgmt_err);
6379 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6381 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6382 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6385 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6386 settings_rsp, &match);
6389 new_settings(hdev, match.sk);
6395 static void sk_lookup(struct pending_cmd *cmd, void *data)
6397 struct cmd_lookup *match = data;
6399 if (match->sk == NULL) {
6400 match->sk = cmd->sk;
6401 sock_hold(match->sk);
6405 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6408 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6410 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6411 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6412 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6415 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6422 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6424 struct mgmt_cp_set_local_name ev;
6425 struct pending_cmd *cmd;
6430 memset(&ev, 0, sizeof(ev));
6431 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6432 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6434 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6436 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6438 /* If this is a HCI command related to powering on the
6439 * HCI dev don't send any mgmt signals.
6441 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6445 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6446 cmd ? cmd->sk : NULL);
6449 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6450 u8 *randomizer192, u8 *hash256,
6451 u8 *randomizer256, u8 status)
6453 struct pending_cmd *cmd;
6455 BT_DBG("%s status %u", hdev->name, status);
6457 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6462 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6463 mgmt_status(status));
6465 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6466 hash256 && randomizer256) {
6467 struct mgmt_rp_read_local_oob_ext_data rp;
6469 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6470 memcpy(rp.randomizer192, randomizer192,
6471 sizeof(rp.randomizer192));
6473 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6474 memcpy(rp.randomizer256, randomizer256,
6475 sizeof(rp.randomizer256));
6477 cmd_complete(cmd->sk, hdev->id,
6478 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6481 struct mgmt_rp_read_local_oob_data rp;
6483 memcpy(rp.hash, hash192, sizeof(rp.hash));
6484 memcpy(rp.randomizer, randomizer192,
6485 sizeof(rp.randomizer));
6487 cmd_complete(cmd->sk, hdev->id,
6488 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6493 mgmt_pending_remove(cmd);
6496 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6497 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6498 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6501 struct mgmt_ev_device_found *ev = (void *) buf;
6502 struct smp_irk *irk;
6505 /* Don't send events for a non-kernel initiated discovery. With
6506 * LE one exception is if we have pend_le_reports > 0 in which
6507 * case we're doing passive scanning and want these events.
6509 if (!hci_discovery_active(hdev)) {
6510 if (link_type == ACL_LINK)
6512 if (link_type == LE_LINK && !hdev->pend_le_reports)
6516 /* Make sure that the buffer is big enough. The 5 extra bytes
6517 * are for the potential CoD field.
6519 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6522 memset(buf, 0, sizeof(buf));
6524 irk = hci_get_irk(hdev, bdaddr, addr_type);
6526 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
6527 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
6529 bacpy(&ev->addr.bdaddr, bdaddr);
6530 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6534 ev->flags = cpu_to_le32(flags);
6537 memcpy(ev->eir, eir, eir_len);
6539 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6540 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6543 if (scan_rsp_len > 0)
6544 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6546 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6547 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6549 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6552 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6553 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6555 struct mgmt_ev_device_found *ev;
6556 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6559 ev = (struct mgmt_ev_device_found *) buf;
6561 memset(buf, 0, sizeof(buf));
6563 bacpy(&ev->addr.bdaddr, bdaddr);
6564 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6567 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6570 ev->eir_len = cpu_to_le16(eir_len);
6572 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6575 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6577 struct mgmt_ev_discovering ev;
6578 struct pending_cmd *cmd;
6580 BT_DBG("%s discovering %u", hdev->name, discovering);
6583 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6585 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6588 u8 type = hdev->discovery.type;
6590 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6592 mgmt_pending_remove(cmd);
6595 memset(&ev, 0, sizeof(ev));
6596 ev.type = hdev->discovery.type;
6597 ev.discovering = discovering;
6599 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6602 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6604 BT_DBG("%s status %u", hdev->name, status);
6606 /* Clear the advertising mgmt setting if we failed to re-enable it */
6608 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6609 new_settings(hdev, NULL);
6613 void mgmt_reenable_advertising(struct hci_dev *hdev)
6615 struct hci_request req;
6617 if (hci_conn_num(hdev, LE_LINK) > 0)
6620 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6623 hci_req_init(&req, hdev);
6624 enable_advertising(&req);
6626 /* If this fails we have no option but to let user space know
6627 * that we've disabled advertising.
6629 if (hci_req_run(&req, adv_enable_complete) < 0) {
6630 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6631 new_settings(hdev, NULL);