2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
96 static const u16 mgmt_events[] = {
97 MGMT_EV_CONTROLLER_ERROR,
99 MGMT_EV_INDEX_REMOVED,
100 MGMT_EV_NEW_SETTINGS,
101 MGMT_EV_CLASS_OF_DEV_CHANGED,
102 MGMT_EV_LOCAL_NAME_CHANGED,
103 MGMT_EV_NEW_LINK_KEY,
104 MGMT_EV_NEW_LONG_TERM_KEY,
105 MGMT_EV_DEVICE_CONNECTED,
106 MGMT_EV_DEVICE_DISCONNECTED,
107 MGMT_EV_CONNECT_FAILED,
108 MGMT_EV_PIN_CODE_REQUEST,
109 MGMT_EV_USER_CONFIRM_REQUEST,
110 MGMT_EV_USER_PASSKEY_REQUEST,
112 MGMT_EV_DEVICE_FOUND,
114 MGMT_EV_DEVICE_BLOCKED,
115 MGMT_EV_DEVICE_UNBLOCKED,
116 MGMT_EV_DEVICE_UNPAIRED,
117 MGMT_EV_PASSKEY_NOTIFY,
120 MGMT_EV_DEVICE_ADDED,
121 MGMT_EV_DEVICE_REMOVED,
122 MGMT_EV_NEW_CONN_PARAM,
123 MGMT_EV_UNCONF_INDEX_ADDED,
124 MGMT_EV_UNCONF_INDEX_REMOVED,
127 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
129 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
130 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
133 struct list_head list;
141 /* HCI to MGMT error code conversion table */
142 static u8 mgmt_status_table[] = {
144 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
145 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
146 MGMT_STATUS_FAILED, /* Hardware Failure */
147 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
148 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
149 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
150 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
151 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
152 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
153 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
154 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
155 MGMT_STATUS_BUSY, /* Command Disallowed */
156 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
157 MGMT_STATUS_REJECTED, /* Rejected Security */
158 MGMT_STATUS_REJECTED, /* Rejected Personal */
159 MGMT_STATUS_TIMEOUT, /* Host Timeout */
160 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
161 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
162 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
163 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
164 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
165 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
166 MGMT_STATUS_BUSY, /* Repeated Attempts */
167 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
168 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
169 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
170 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
171 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
172 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
173 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
174 MGMT_STATUS_FAILED, /* Unspecified Error */
175 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
176 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
177 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
178 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
179 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
180 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
181 MGMT_STATUS_FAILED, /* Unit Link Key Used */
182 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
183 MGMT_STATUS_TIMEOUT, /* Instant Passed */
184 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
185 MGMT_STATUS_FAILED, /* Transaction Collision */
186 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
187 MGMT_STATUS_REJECTED, /* QoS Rejected */
188 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
189 MGMT_STATUS_REJECTED, /* Insufficient Security */
190 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
191 MGMT_STATUS_BUSY, /* Role Switch Pending */
192 MGMT_STATUS_FAILED, /* Slot Violation */
193 MGMT_STATUS_FAILED, /* Role Switch Failed */
194 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
195 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
196 MGMT_STATUS_BUSY, /* Host Busy Pairing */
197 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
198 MGMT_STATUS_BUSY, /* Controller Busy */
199 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
200 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
201 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
202 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
203 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
206 static u8 mgmt_status(u8 hci_status)
208 if (hci_status < ARRAY_SIZE(mgmt_status_table))
209 return mgmt_status_table[hci_status];
211 return MGMT_STATUS_FAILED;
214 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
217 struct mgmt_hdr *hdr;
218 struct mgmt_ev_cmd_status *ev;
221 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
223 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
227 hdr = (void *) skb_put(skb, sizeof(*hdr));
229 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
230 hdr->index = cpu_to_le16(index);
231 hdr->len = cpu_to_le16(sizeof(*ev));
233 ev = (void *) skb_put(skb, sizeof(*ev));
235 ev->opcode = cpu_to_le16(cmd);
237 err = sock_queue_rcv_skb(sk, skb);
244 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
245 void *rp, size_t rp_len)
248 struct mgmt_hdr *hdr;
249 struct mgmt_ev_cmd_complete *ev;
252 BT_DBG("sock %p", sk);
254 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
258 hdr = (void *) skb_put(skb, sizeof(*hdr));
260 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
261 hdr->index = cpu_to_le16(index);
262 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
264 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
265 ev->opcode = cpu_to_le16(cmd);
269 memcpy(ev->data, rp, rp_len);
271 err = sock_queue_rcv_skb(sk, skb);
278 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
281 struct mgmt_rp_read_version rp;
283 BT_DBG("sock %p", sk);
285 rp.version = MGMT_VERSION;
286 rp.revision = cpu_to_le16(MGMT_REVISION);
288 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
292 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
295 struct mgmt_rp_read_commands *rp;
296 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
297 const u16 num_events = ARRAY_SIZE(mgmt_events);
302 BT_DBG("sock %p", sk);
304 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
306 rp = kmalloc(rp_size, GFP_KERNEL);
310 rp->num_commands = cpu_to_le16(num_commands);
311 rp->num_events = cpu_to_le16(num_events);
313 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
314 put_unaligned_le16(mgmt_commands[i], opcode);
316 for (i = 0; i < num_events; i++, opcode++)
317 put_unaligned_le16(mgmt_events[i], opcode);
319 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
326 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
329 struct mgmt_rp_read_index_list *rp;
335 BT_DBG("sock %p", sk);
337 read_lock(&hci_dev_list_lock);
340 list_for_each_entry(d, &hci_dev_list, list) {
341 if (d->dev_type == HCI_BREDR &&
342 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
346 rp_len = sizeof(*rp) + (2 * count);
347 rp = kmalloc(rp_len, GFP_ATOMIC);
349 read_unlock(&hci_dev_list_lock);
354 list_for_each_entry(d, &hci_dev_list, list) {
355 if (test_bit(HCI_SETUP, &d->dev_flags) ||
356 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
359 /* Devices marked as raw-only are neither configured
360 * nor unconfigured controllers.
362 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
365 if (d->dev_type == HCI_BREDR &&
366 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
367 rp->index[count++] = cpu_to_le16(d->id);
368 BT_DBG("Added hci%u", d->id);
372 rp->num_controllers = cpu_to_le16(count);
373 rp_len = sizeof(*rp) + (2 * count);
375 read_unlock(&hci_dev_list_lock);
377 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
385 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
386 void *data, u16 data_len)
388 struct mgmt_rp_read_unconf_index_list *rp;
394 BT_DBG("sock %p", sk);
396 read_lock(&hci_dev_list_lock);
399 list_for_each_entry(d, &hci_dev_list, list) {
400 if (d->dev_type == HCI_BREDR &&
401 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
405 rp_len = sizeof(*rp) + (2 * count);
406 rp = kmalloc(rp_len, GFP_ATOMIC);
408 read_unlock(&hci_dev_list_lock);
413 list_for_each_entry(d, &hci_dev_list, list) {
414 if (test_bit(HCI_SETUP, &d->dev_flags) ||
415 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
418 /* Devices marked as raw-only are neither configured
419 * nor unconfigured controllers.
421 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
424 if (d->dev_type == HCI_BREDR &&
425 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
426 rp->index[count++] = cpu_to_le16(d->id);
427 BT_DBG("Added hci%u", d->id);
431 rp->num_controllers = cpu_to_le16(count);
432 rp_len = sizeof(*rp) + (2 * count);
434 read_unlock(&hci_dev_list_lock);
436 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
444 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
445 void *data, u16 data_len)
447 struct mgmt_rp_read_config_info rp;
449 BT_DBG("sock %p %s", sk, hdev->name);
453 memset(&rp, 0, sizeof(rp));
454 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
455 if (hdev->set_bdaddr)
456 rp.supported_options = cpu_to_le32(MGMT_OPTION_PUBLIC_ADDRESS);
458 rp.supported_options = cpu_to_le32(0);
459 rp.missing_options = cpu_to_le32(0);
461 hci_dev_unlock(hdev);
463 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
467 static u32 get_supported_settings(struct hci_dev *hdev)
471 settings |= MGMT_SETTING_POWERED;
472 settings |= MGMT_SETTING_PAIRABLE;
473 settings |= MGMT_SETTING_DEBUG_KEYS;
475 if (lmp_bredr_capable(hdev)) {
476 settings |= MGMT_SETTING_CONNECTABLE;
477 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
478 settings |= MGMT_SETTING_FAST_CONNECTABLE;
479 settings |= MGMT_SETTING_DISCOVERABLE;
480 settings |= MGMT_SETTING_BREDR;
481 settings |= MGMT_SETTING_LINK_SECURITY;
483 if (lmp_ssp_capable(hdev)) {
484 settings |= MGMT_SETTING_SSP;
485 settings |= MGMT_SETTING_HS;
488 if (lmp_sc_capable(hdev) ||
489 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
490 settings |= MGMT_SETTING_SECURE_CONN;
493 if (lmp_le_capable(hdev)) {
494 settings |= MGMT_SETTING_LE;
495 settings |= MGMT_SETTING_ADVERTISING;
496 settings |= MGMT_SETTING_PRIVACY;
499 if (hdev->set_bdaddr)
500 settings |= MGMT_SETTING_CONFIGURATION;
505 static u32 get_current_settings(struct hci_dev *hdev)
509 if (hdev_is_powered(hdev))
510 settings |= MGMT_SETTING_POWERED;
512 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
513 settings |= MGMT_SETTING_CONNECTABLE;
515 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
516 settings |= MGMT_SETTING_FAST_CONNECTABLE;
518 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
519 settings |= MGMT_SETTING_DISCOVERABLE;
521 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
522 settings |= MGMT_SETTING_PAIRABLE;
524 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
525 settings |= MGMT_SETTING_BREDR;
527 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
528 settings |= MGMT_SETTING_LE;
530 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
531 settings |= MGMT_SETTING_LINK_SECURITY;
533 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
534 settings |= MGMT_SETTING_SSP;
536 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
537 settings |= MGMT_SETTING_HS;
539 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
540 settings |= MGMT_SETTING_ADVERTISING;
542 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
543 settings |= MGMT_SETTING_SECURE_CONN;
545 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
546 settings |= MGMT_SETTING_DEBUG_KEYS;
548 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
549 settings |= MGMT_SETTING_PRIVACY;
554 #define PNP_INFO_SVCLASS_ID 0x1200
556 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
558 u8 *ptr = data, *uuids_start = NULL;
559 struct bt_uuid *uuid;
564 list_for_each_entry(uuid, &hdev->uuids, list) {
567 if (uuid->size != 16)
570 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
574 if (uuid16 == PNP_INFO_SVCLASS_ID)
580 uuids_start[1] = EIR_UUID16_ALL;
584 /* Stop if not enough space to put next UUID */
585 if ((ptr - data) + sizeof(u16) > len) {
586 uuids_start[1] = EIR_UUID16_SOME;
590 *ptr++ = (uuid16 & 0x00ff);
591 *ptr++ = (uuid16 & 0xff00) >> 8;
592 uuids_start[0] += sizeof(uuid16);
598 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
600 u8 *ptr = data, *uuids_start = NULL;
601 struct bt_uuid *uuid;
606 list_for_each_entry(uuid, &hdev->uuids, list) {
607 if (uuid->size != 32)
613 uuids_start[1] = EIR_UUID32_ALL;
617 /* Stop if not enough space to put next UUID */
618 if ((ptr - data) + sizeof(u32) > len) {
619 uuids_start[1] = EIR_UUID32_SOME;
623 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
625 uuids_start[0] += sizeof(u32);
631 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
633 u8 *ptr = data, *uuids_start = NULL;
634 struct bt_uuid *uuid;
639 list_for_each_entry(uuid, &hdev->uuids, list) {
640 if (uuid->size != 128)
646 uuids_start[1] = EIR_UUID128_ALL;
650 /* Stop if not enough space to put next UUID */
651 if ((ptr - data) + 16 > len) {
652 uuids_start[1] = EIR_UUID128_SOME;
656 memcpy(ptr, uuid->uuid, 16);
658 uuids_start[0] += 16;
664 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
666 struct pending_cmd *cmd;
668 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
669 if (cmd->opcode == opcode)
676 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
677 struct hci_dev *hdev,
680 struct pending_cmd *cmd;
682 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
683 if (cmd->user_data != data)
685 if (cmd->opcode == opcode)
692 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
697 name_len = strlen(hdev->dev_name);
699 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
701 if (name_len > max_len) {
703 ptr[1] = EIR_NAME_SHORT;
705 ptr[1] = EIR_NAME_COMPLETE;
707 ptr[0] = name_len + 1;
709 memcpy(ptr + 2, hdev->dev_name, name_len);
711 ad_len += (name_len + 2);
712 ptr += (name_len + 2);
718 static void update_scan_rsp_data(struct hci_request *req)
720 struct hci_dev *hdev = req->hdev;
721 struct hci_cp_le_set_scan_rsp_data cp;
724 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
727 memset(&cp, 0, sizeof(cp));
729 len = create_scan_rsp_data(hdev, cp.data);
731 if (hdev->scan_rsp_data_len == len &&
732 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
735 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
736 hdev->scan_rsp_data_len = len;
740 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
743 static u8 get_adv_discov_flags(struct hci_dev *hdev)
745 struct pending_cmd *cmd;
747 /* If there's a pending mgmt command the flags will not yet have
748 * their final values, so check for this first.
750 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
752 struct mgmt_mode *cp = cmd->param;
754 return LE_AD_GENERAL;
755 else if (cp->val == 0x02)
756 return LE_AD_LIMITED;
758 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
759 return LE_AD_LIMITED;
760 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
761 return LE_AD_GENERAL;
767 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
769 u8 ad_len = 0, flags = 0;
771 flags |= get_adv_discov_flags(hdev);
773 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
774 flags |= LE_AD_NO_BREDR;
777 BT_DBG("adv flags 0x%02x", flags);
787 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
789 ptr[1] = EIR_TX_POWER;
790 ptr[2] = (u8) hdev->adv_tx_power;
799 static void update_adv_data(struct hci_request *req)
801 struct hci_dev *hdev = req->hdev;
802 struct hci_cp_le_set_adv_data cp;
805 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
808 memset(&cp, 0, sizeof(cp));
810 len = create_adv_data(hdev, cp.data);
812 if (hdev->adv_data_len == len &&
813 memcmp(cp.data, hdev->adv_data, len) == 0)
816 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
817 hdev->adv_data_len = len;
821 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
824 static void create_eir(struct hci_dev *hdev, u8 *data)
829 name_len = strlen(hdev->dev_name);
835 ptr[1] = EIR_NAME_SHORT;
837 ptr[1] = EIR_NAME_COMPLETE;
839 /* EIR Data length */
840 ptr[0] = name_len + 1;
842 memcpy(ptr + 2, hdev->dev_name, name_len);
844 ptr += (name_len + 2);
847 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
849 ptr[1] = EIR_TX_POWER;
850 ptr[2] = (u8) hdev->inq_tx_power;
855 if (hdev->devid_source > 0) {
857 ptr[1] = EIR_DEVICE_ID;
859 put_unaligned_le16(hdev->devid_source, ptr + 2);
860 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
861 put_unaligned_le16(hdev->devid_product, ptr + 6);
862 put_unaligned_le16(hdev->devid_version, ptr + 8);
867 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
868 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
869 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
872 static void update_eir(struct hci_request *req)
874 struct hci_dev *hdev = req->hdev;
875 struct hci_cp_write_eir cp;
877 if (!hdev_is_powered(hdev))
880 if (!lmp_ext_inq_capable(hdev))
883 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
886 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
889 memset(&cp, 0, sizeof(cp));
891 create_eir(hdev, cp.data);
893 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
896 memcpy(hdev->eir, cp.data, sizeof(cp.data));
898 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
901 static u8 get_service_classes(struct hci_dev *hdev)
903 struct bt_uuid *uuid;
906 list_for_each_entry(uuid, &hdev->uuids, list)
907 val |= uuid->svc_hint;
912 static void update_class(struct hci_request *req)
914 struct hci_dev *hdev = req->hdev;
917 BT_DBG("%s", hdev->name);
919 if (!hdev_is_powered(hdev))
922 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
925 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
928 cod[0] = hdev->minor_class;
929 cod[1] = hdev->major_class;
930 cod[2] = get_service_classes(hdev);
932 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
935 if (memcmp(cod, hdev->dev_class, 3) == 0)
938 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
941 static bool get_connectable(struct hci_dev *hdev)
943 struct pending_cmd *cmd;
945 /* If there's a pending mgmt command the flag will not yet have
946 * it's final value, so check for this first.
948 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
950 struct mgmt_mode *cp = cmd->param;
954 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
957 static void enable_advertising(struct hci_request *req)
959 struct hci_dev *hdev = req->hdev;
960 struct hci_cp_le_set_adv_param cp;
961 u8 own_addr_type, enable = 0x01;
964 /* Clear the HCI_ADVERTISING bit temporarily so that the
965 * hci_update_random_address knows that it's safe to go ahead
966 * and write a new random address. The flag will be set back on
967 * as soon as the SET_ADV_ENABLE HCI command completes.
969 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
971 connectable = get_connectable(hdev);
973 /* Set require_privacy to true only when non-connectable
974 * advertising is used. In that case it is fine to use a
975 * non-resolvable private address.
977 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
980 memset(&cp, 0, sizeof(cp));
981 cp.min_interval = cpu_to_le16(0x0800);
982 cp.max_interval = cpu_to_le16(0x0800);
983 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
984 cp.own_address_type = own_addr_type;
985 cp.channel_map = hdev->le_adv_channel_map;
987 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
989 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
992 static void disable_advertising(struct hci_request *req)
996 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
999 static void service_cache_off(struct work_struct *work)
1001 struct hci_dev *hdev = container_of(work, struct hci_dev,
1002 service_cache.work);
1003 struct hci_request req;
1005 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1008 hci_req_init(&req, hdev);
1015 hci_dev_unlock(hdev);
1017 hci_req_run(&req, NULL);
1020 static void rpa_expired(struct work_struct *work)
1022 struct hci_dev *hdev = container_of(work, struct hci_dev,
1024 struct hci_request req;
1028 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1030 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
1031 hci_conn_num(hdev, LE_LINK) > 0)
1034 /* The generation of a new RPA and programming it into the
1035 * controller happens in the enable_advertising() function.
1038 hci_req_init(&req, hdev);
1040 disable_advertising(&req);
1041 enable_advertising(&req);
1043 hci_req_run(&req, NULL);
1046 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1048 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1051 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1052 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1054 /* Non-mgmt controlled devices get this bit set
1055 * implicitly so that pairing works for them, however
1056 * for mgmt we require user-space to explicitly enable
1059 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1062 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1063 void *data, u16 data_len)
1065 struct mgmt_rp_read_info rp;
1067 BT_DBG("sock %p %s", sk, hdev->name);
1071 memset(&rp, 0, sizeof(rp));
1073 bacpy(&rp.bdaddr, &hdev->bdaddr);
1075 rp.version = hdev->hci_ver;
1076 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1078 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1079 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1081 memcpy(rp.dev_class, hdev->dev_class, 3);
1083 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1084 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1086 hci_dev_unlock(hdev);
1088 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1092 static void mgmt_pending_free(struct pending_cmd *cmd)
1099 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1100 struct hci_dev *hdev, void *data,
1103 struct pending_cmd *cmd;
1105 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1109 cmd->opcode = opcode;
1110 cmd->index = hdev->id;
1112 cmd->param = kmalloc(len, GFP_KERNEL);
1119 memcpy(cmd->param, data, len);
1124 list_add(&cmd->list, &hdev->mgmt_pending);
1129 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1130 void (*cb)(struct pending_cmd *cmd,
1134 struct pending_cmd *cmd, *tmp;
1136 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1137 if (opcode > 0 && cmd->opcode != opcode)
1144 static void mgmt_pending_remove(struct pending_cmd *cmd)
1146 list_del(&cmd->list);
1147 mgmt_pending_free(cmd);
1150 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1152 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1154 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1158 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1160 BT_DBG("%s status 0x%02x", hdev->name, status);
1162 if (hci_conn_count(hdev) == 0) {
1163 cancel_delayed_work(&hdev->power_off);
1164 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1168 static void hci_stop_discovery(struct hci_request *req)
1170 struct hci_dev *hdev = req->hdev;
1171 struct hci_cp_remote_name_req_cancel cp;
1172 struct inquiry_entry *e;
1174 switch (hdev->discovery.state) {
1175 case DISCOVERY_FINDING:
1176 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1177 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1179 cancel_delayed_work(&hdev->le_scan_disable);
1180 hci_req_add_le_scan_disable(req);
1185 case DISCOVERY_RESOLVING:
1186 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1191 bacpy(&cp.bdaddr, &e->data.bdaddr);
1192 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1198 /* Passive scanning */
1199 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1200 hci_req_add_le_scan_disable(req);
1205 static int clean_up_hci_state(struct hci_dev *hdev)
1207 struct hci_request req;
1208 struct hci_conn *conn;
1210 hci_req_init(&req, hdev);
1212 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1213 test_bit(HCI_PSCAN, &hdev->flags)) {
1215 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1218 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1219 disable_advertising(&req);
1221 hci_stop_discovery(&req);
1223 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1224 struct hci_cp_disconnect dc;
1225 struct hci_cp_reject_conn_req rej;
1227 switch (conn->state) {
1230 dc.handle = cpu_to_le16(conn->handle);
1231 dc.reason = 0x15; /* Terminated due to Power Off */
1232 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1235 if (conn->type == LE_LINK)
1236 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1238 else if (conn->type == ACL_LINK)
1239 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1243 bacpy(&rej.bdaddr, &conn->dst);
1244 rej.reason = 0x15; /* Terminated due to Power Off */
1245 if (conn->type == ACL_LINK)
1246 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1248 else if (conn->type == SCO_LINK)
1249 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1255 return hci_req_run(&req, clean_up_hci_complete);
1258 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1261 struct mgmt_mode *cp = data;
1262 struct pending_cmd *cmd;
1265 BT_DBG("request for %s", hdev->name);
1267 if (cp->val != 0x00 && cp->val != 0x01)
1268 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1269 MGMT_STATUS_INVALID_PARAMS);
1273 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1274 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1279 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1280 cancel_delayed_work(&hdev->power_off);
1283 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1285 err = mgmt_powered(hdev, 1);
1290 if (!!cp->val == hdev_is_powered(hdev)) {
1291 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1295 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1302 queue_work(hdev->req_workqueue, &hdev->power_on);
1305 /* Disconnect connections, stop scans, etc */
1306 err = clean_up_hci_state(hdev);
1308 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1309 HCI_POWER_OFF_TIMEOUT);
1311 /* ENODATA means there were no HCI commands queued */
1312 if (err == -ENODATA) {
1313 cancel_delayed_work(&hdev->power_off);
1314 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1320 hci_dev_unlock(hdev);
1324 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1325 struct sock *skip_sk)
1327 struct sk_buff *skb;
1328 struct mgmt_hdr *hdr;
1330 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1334 hdr = (void *) skb_put(skb, sizeof(*hdr));
1335 hdr->opcode = cpu_to_le16(event);
1337 hdr->index = cpu_to_le16(hdev->id);
1339 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1340 hdr->len = cpu_to_le16(data_len);
1343 memcpy(skb_put(skb, data_len), data, data_len);
1346 __net_timestamp(skb);
1348 hci_send_to_control(skb, skip_sk);
1354 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1358 ev = cpu_to_le32(get_current_settings(hdev));
1360 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1365 struct hci_dev *hdev;
1369 static void settings_rsp(struct pending_cmd *cmd, void *data)
1371 struct cmd_lookup *match = data;
1373 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1375 list_del(&cmd->list);
1377 if (match->sk == NULL) {
1378 match->sk = cmd->sk;
1379 sock_hold(match->sk);
1382 mgmt_pending_free(cmd);
1385 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1389 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1390 mgmt_pending_remove(cmd);
1393 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1395 if (!lmp_bredr_capable(hdev))
1396 return MGMT_STATUS_NOT_SUPPORTED;
1397 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1398 return MGMT_STATUS_REJECTED;
1400 return MGMT_STATUS_SUCCESS;
1403 static u8 mgmt_le_support(struct hci_dev *hdev)
1405 if (!lmp_le_capable(hdev))
1406 return MGMT_STATUS_NOT_SUPPORTED;
1407 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1408 return MGMT_STATUS_REJECTED;
1410 return MGMT_STATUS_SUCCESS;
1413 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1415 struct pending_cmd *cmd;
1416 struct mgmt_mode *cp;
1417 struct hci_request req;
1420 BT_DBG("status 0x%02x", status);
1424 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1429 u8 mgmt_err = mgmt_status(status);
1430 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1431 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1437 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1440 if (hdev->discov_timeout > 0) {
1441 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1442 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1446 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1450 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1453 new_settings(hdev, cmd->sk);
1455 /* When the discoverable mode gets changed, make sure
1456 * that class of device has the limited discoverable
1457 * bit correctly set.
1459 hci_req_init(&req, hdev);
1461 hci_req_run(&req, NULL);
1464 mgmt_pending_remove(cmd);
1467 hci_dev_unlock(hdev);
1470 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1473 struct mgmt_cp_set_discoverable *cp = data;
1474 struct pending_cmd *cmd;
1475 struct hci_request req;
1480 BT_DBG("request for %s", hdev->name);
1482 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1483 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1484 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1485 MGMT_STATUS_REJECTED);
1487 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1488 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1489 MGMT_STATUS_INVALID_PARAMS);
1491 timeout = __le16_to_cpu(cp->timeout);
1493 /* Disabling discoverable requires that no timeout is set,
1494 * and enabling limited discoverable requires a timeout.
1496 if ((cp->val == 0x00 && timeout > 0) ||
1497 (cp->val == 0x02 && timeout == 0))
1498 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1499 MGMT_STATUS_INVALID_PARAMS);
1503 if (!hdev_is_powered(hdev) && timeout > 0) {
1504 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1505 MGMT_STATUS_NOT_POWERED);
1509 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1510 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1511 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1516 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1517 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1518 MGMT_STATUS_REJECTED);
1522 if (!hdev_is_powered(hdev)) {
1523 bool changed = false;
1525 /* Setting limited discoverable when powered off is
1526 * not a valid operation since it requires a timeout
1527 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1529 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1530 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1534 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1539 err = new_settings(hdev, sk);
1544 /* If the current mode is the same, then just update the timeout
1545 * value with the new value. And if only the timeout gets updated,
1546 * then no need for any HCI transactions.
1548 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1549 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1550 &hdev->dev_flags)) {
1551 cancel_delayed_work(&hdev->discov_off);
1552 hdev->discov_timeout = timeout;
1554 if (cp->val && hdev->discov_timeout > 0) {
1555 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1556 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1560 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1564 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1570 /* Cancel any potential discoverable timeout that might be
1571 * still active and store new timeout value. The arming of
1572 * the timeout happens in the complete handler.
1574 cancel_delayed_work(&hdev->discov_off);
1575 hdev->discov_timeout = timeout;
1577 /* Limited discoverable mode */
1578 if (cp->val == 0x02)
1579 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1581 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1583 hci_req_init(&req, hdev);
1585 /* The procedure for LE-only controllers is much simpler - just
1586 * update the advertising data.
1588 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1594 struct hci_cp_write_current_iac_lap hci_cp;
1596 if (cp->val == 0x02) {
1597 /* Limited discoverable mode */
1598 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1599 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1600 hci_cp.iac_lap[1] = 0x8b;
1601 hci_cp.iac_lap[2] = 0x9e;
1602 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1603 hci_cp.iac_lap[4] = 0x8b;
1604 hci_cp.iac_lap[5] = 0x9e;
1606 /* General discoverable mode */
1608 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1609 hci_cp.iac_lap[1] = 0x8b;
1610 hci_cp.iac_lap[2] = 0x9e;
1613 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1614 (hci_cp.num_iac * 3) + 1, &hci_cp);
1616 scan |= SCAN_INQUIRY;
1618 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1621 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1624 update_adv_data(&req);
1626 err = hci_req_run(&req, set_discoverable_complete);
1628 mgmt_pending_remove(cmd);
1631 hci_dev_unlock(hdev);
1635 static void write_fast_connectable(struct hci_request *req, bool enable)
1637 struct hci_dev *hdev = req->hdev;
1638 struct hci_cp_write_page_scan_activity acp;
1641 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1644 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1648 type = PAGE_SCAN_TYPE_INTERLACED;
1650 /* 160 msec page scan interval */
1651 acp.interval = cpu_to_le16(0x0100);
1653 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1655 /* default 1.28 sec page scan */
1656 acp.interval = cpu_to_le16(0x0800);
1659 acp.window = cpu_to_le16(0x0012);
1661 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1662 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1663 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1666 if (hdev->page_scan_type != type)
1667 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1670 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1672 struct pending_cmd *cmd;
1673 struct mgmt_mode *cp;
1676 BT_DBG("status 0x%02x", status);
1680 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1685 u8 mgmt_err = mgmt_status(status);
1686 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1692 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1694 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1696 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1699 new_settings(hdev, cmd->sk);
1702 mgmt_pending_remove(cmd);
1705 hci_dev_unlock(hdev);
1708 static int set_connectable_update_settings(struct hci_dev *hdev,
1709 struct sock *sk, u8 val)
1711 bool changed = false;
1714 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1718 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1720 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1721 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1724 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1729 return new_settings(hdev, sk);
1734 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1737 struct mgmt_mode *cp = data;
1738 struct pending_cmd *cmd;
1739 struct hci_request req;
1743 BT_DBG("request for %s", hdev->name);
1745 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1746 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1747 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1748 MGMT_STATUS_REJECTED);
1750 if (cp->val != 0x00 && cp->val != 0x01)
1751 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1752 MGMT_STATUS_INVALID_PARAMS);
1756 if (!hdev_is_powered(hdev)) {
1757 err = set_connectable_update_settings(hdev, sk, cp->val);
1761 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1762 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1763 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1768 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1774 hci_req_init(&req, hdev);
1776 /* If BR/EDR is not enabled and we disable advertising as a
1777 * by-product of disabling connectable, we need to update the
1778 * advertising flags.
1780 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1782 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1783 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1785 update_adv_data(&req);
1786 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1792 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1793 hdev->discov_timeout > 0)
1794 cancel_delayed_work(&hdev->discov_off);
1797 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1800 /* If we're going from non-connectable to connectable or
1801 * vice-versa when fast connectable is enabled ensure that fast
1802 * connectable gets disabled. write_fast_connectable won't do
1803 * anything if the page scan parameters are already what they
1806 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1807 write_fast_connectable(&req, false);
1809 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1810 hci_conn_num(hdev, LE_LINK) == 0) {
1811 disable_advertising(&req);
1812 enable_advertising(&req);
1815 err = hci_req_run(&req, set_connectable_complete);
1817 mgmt_pending_remove(cmd);
1818 if (err == -ENODATA)
1819 err = set_connectable_update_settings(hdev, sk,
1825 hci_dev_unlock(hdev);
1829 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1832 struct mgmt_mode *cp = data;
1836 BT_DBG("request for %s", hdev->name);
1838 if (cp->val != 0x00 && cp->val != 0x01)
1839 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1840 MGMT_STATUS_INVALID_PARAMS);
1845 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1847 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1849 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1854 err = new_settings(hdev, sk);
1857 hci_dev_unlock(hdev);
1861 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1864 struct mgmt_mode *cp = data;
1865 struct pending_cmd *cmd;
1869 BT_DBG("request for %s", hdev->name);
1871 status = mgmt_bredr_support(hdev);
1873 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1876 if (cp->val != 0x00 && cp->val != 0x01)
1877 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1878 MGMT_STATUS_INVALID_PARAMS);
1882 if (!hdev_is_powered(hdev)) {
1883 bool changed = false;
1885 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1886 &hdev->dev_flags)) {
1887 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1891 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1896 err = new_settings(hdev, sk);
1901 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1902 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1909 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1910 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1914 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1920 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1922 mgmt_pending_remove(cmd);
1927 hci_dev_unlock(hdev);
1931 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1933 struct mgmt_mode *cp = data;
1934 struct pending_cmd *cmd;
1938 BT_DBG("request for %s", hdev->name);
1940 status = mgmt_bredr_support(hdev);
1942 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1944 if (!lmp_ssp_capable(hdev))
1945 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1946 MGMT_STATUS_NOT_SUPPORTED);
1948 if (cp->val != 0x00 && cp->val != 0x01)
1949 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1950 MGMT_STATUS_INVALID_PARAMS);
1954 if (!hdev_is_powered(hdev)) {
1958 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1961 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1964 changed = test_and_clear_bit(HCI_HS_ENABLED,
1967 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1970 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1975 err = new_settings(hdev, sk);
1980 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1981 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1982 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1987 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1988 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1992 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1998 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
1999 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2000 sizeof(cp->val), &cp->val);
2002 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2004 mgmt_pending_remove(cmd);
2009 hci_dev_unlock(hdev);
2013 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2015 struct mgmt_mode *cp = data;
2020 BT_DBG("request for %s", hdev->name);
2022 status = mgmt_bredr_support(hdev);
2024 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2026 if (!lmp_ssp_capable(hdev))
2027 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2028 MGMT_STATUS_NOT_SUPPORTED);
2030 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2031 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2032 MGMT_STATUS_REJECTED);
2034 if (cp->val != 0x00 && cp->val != 0x01)
2035 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2036 MGMT_STATUS_INVALID_PARAMS);
2041 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2043 if (hdev_is_powered(hdev)) {
2044 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2045 MGMT_STATUS_REJECTED);
2049 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2052 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2057 err = new_settings(hdev, sk);
2060 hci_dev_unlock(hdev);
2064 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2066 struct cmd_lookup match = { NULL, hdev };
2069 u8 mgmt_err = mgmt_status(status);
2071 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2076 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2078 new_settings(hdev, match.sk);
2083 /* Make sure the controller has a good default for
2084 * advertising data. Restrict the update to when LE
2085 * has actually been enabled. During power on, the
2086 * update in powered_update_hci will take care of it.
2088 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2089 struct hci_request req;
2093 hci_req_init(&req, hdev);
2094 update_adv_data(&req);
2095 update_scan_rsp_data(&req);
2096 hci_req_run(&req, NULL);
2098 hci_dev_unlock(hdev);
2102 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2104 struct mgmt_mode *cp = data;
2105 struct hci_cp_write_le_host_supported hci_cp;
2106 struct pending_cmd *cmd;
2107 struct hci_request req;
2111 BT_DBG("request for %s", hdev->name);
2113 if (!lmp_le_capable(hdev))
2114 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2115 MGMT_STATUS_NOT_SUPPORTED);
2117 if (cp->val != 0x00 && cp->val != 0x01)
2118 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2119 MGMT_STATUS_INVALID_PARAMS);
2121 /* LE-only devices do not allow toggling LE on/off */
2122 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2123 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2124 MGMT_STATUS_REJECTED);
2129 enabled = lmp_host_le_capable(hdev);
2131 if (!hdev_is_powered(hdev) || val == enabled) {
2132 bool changed = false;
2134 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2135 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2139 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2140 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2144 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2149 err = new_settings(hdev, sk);
2154 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2155 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2156 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2161 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2167 hci_req_init(&req, hdev);
2169 memset(&hci_cp, 0, sizeof(hci_cp));
2173 hci_cp.simul = lmp_le_br_capable(hdev);
2175 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2176 disable_advertising(&req);
2179 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2182 err = hci_req_run(&req, le_enable_complete);
2184 mgmt_pending_remove(cmd);
2187 hci_dev_unlock(hdev);
2191 /* This is a helper function to test for pending mgmt commands that can
2192 * cause CoD or EIR HCI commands. We can only allow one such pending
2193 * mgmt command at a time since otherwise we cannot easily track what
2194 * the current values are, will be, and based on that calculate if a new
2195 * HCI command needs to be sent and if yes with what value.
2197 static bool pending_eir_or_class(struct hci_dev *hdev)
2199 struct pending_cmd *cmd;
2201 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2202 switch (cmd->opcode) {
2203 case MGMT_OP_ADD_UUID:
2204 case MGMT_OP_REMOVE_UUID:
2205 case MGMT_OP_SET_DEV_CLASS:
2206 case MGMT_OP_SET_POWERED:
2214 static const u8 bluetooth_base_uuid[] = {
2215 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2216 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2219 static u8 get_uuid_size(const u8 *uuid)
2223 if (memcmp(uuid, bluetooth_base_uuid, 12))
2226 val = get_unaligned_le32(&uuid[12]);
2233 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2235 struct pending_cmd *cmd;
2239 cmd = mgmt_pending_find(mgmt_op, hdev);
2243 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2244 hdev->dev_class, 3);
2246 mgmt_pending_remove(cmd);
2249 hci_dev_unlock(hdev);
2252 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2254 BT_DBG("status 0x%02x", status);
2256 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2259 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2261 struct mgmt_cp_add_uuid *cp = data;
2262 struct pending_cmd *cmd;
2263 struct hci_request req;
2264 struct bt_uuid *uuid;
2267 BT_DBG("request for %s", hdev->name);
2271 if (pending_eir_or_class(hdev)) {
2272 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2277 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2283 memcpy(uuid->uuid, cp->uuid, 16);
2284 uuid->svc_hint = cp->svc_hint;
2285 uuid->size = get_uuid_size(cp->uuid);
2287 list_add_tail(&uuid->list, &hdev->uuids);
2289 hci_req_init(&req, hdev);
2294 err = hci_req_run(&req, add_uuid_complete);
2296 if (err != -ENODATA)
2299 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2300 hdev->dev_class, 3);
2304 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2313 hci_dev_unlock(hdev);
2317 static bool enable_service_cache(struct hci_dev *hdev)
2319 if (!hdev_is_powered(hdev))
2322 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2323 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2331 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2333 BT_DBG("status 0x%02x", status);
2335 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2338 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2341 struct mgmt_cp_remove_uuid *cp = data;
2342 struct pending_cmd *cmd;
2343 struct bt_uuid *match, *tmp;
2344 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2345 struct hci_request req;
2348 BT_DBG("request for %s", hdev->name);
2352 if (pending_eir_or_class(hdev)) {
2353 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2358 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2359 hci_uuids_clear(hdev);
2361 if (enable_service_cache(hdev)) {
2362 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2363 0, hdev->dev_class, 3);
2372 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2373 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2376 list_del(&match->list);
2382 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2383 MGMT_STATUS_INVALID_PARAMS);
2388 hci_req_init(&req, hdev);
2393 err = hci_req_run(&req, remove_uuid_complete);
2395 if (err != -ENODATA)
2398 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2399 hdev->dev_class, 3);
2403 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2412 hci_dev_unlock(hdev);
2416 static void set_class_complete(struct hci_dev *hdev, u8 status)
2418 BT_DBG("status 0x%02x", status);
2420 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2423 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2426 struct mgmt_cp_set_dev_class *cp = data;
2427 struct pending_cmd *cmd;
2428 struct hci_request req;
2431 BT_DBG("request for %s", hdev->name);
2433 if (!lmp_bredr_capable(hdev))
2434 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2435 MGMT_STATUS_NOT_SUPPORTED);
2439 if (pending_eir_or_class(hdev)) {
2440 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2445 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2446 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2447 MGMT_STATUS_INVALID_PARAMS);
2451 hdev->major_class = cp->major;
2452 hdev->minor_class = cp->minor;
2454 if (!hdev_is_powered(hdev)) {
2455 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2456 hdev->dev_class, 3);
2460 hci_req_init(&req, hdev);
2462 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2463 hci_dev_unlock(hdev);
2464 cancel_delayed_work_sync(&hdev->service_cache);
2471 err = hci_req_run(&req, set_class_complete);
2473 if (err != -ENODATA)
2476 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2477 hdev->dev_class, 3);
2481 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2490 hci_dev_unlock(hdev);
2494 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2497 struct mgmt_cp_load_link_keys *cp = data;
2498 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2499 sizeof(struct mgmt_link_key_info));
2500 u16 key_count, expected_len;
2504 BT_DBG("request for %s", hdev->name);
2506 if (!lmp_bredr_capable(hdev))
2507 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2508 MGMT_STATUS_NOT_SUPPORTED);
2510 key_count = __le16_to_cpu(cp->key_count);
2511 if (key_count > max_key_count) {
2512 BT_ERR("load_link_keys: too big key_count value %u",
2514 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2515 MGMT_STATUS_INVALID_PARAMS);
2518 expected_len = sizeof(*cp) + key_count *
2519 sizeof(struct mgmt_link_key_info);
2520 if (expected_len != len) {
2521 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2523 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2524 MGMT_STATUS_INVALID_PARAMS);
2527 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2528 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2529 MGMT_STATUS_INVALID_PARAMS);
2531 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2534 for (i = 0; i < key_count; i++) {
2535 struct mgmt_link_key_info *key = &cp->keys[i];
2537 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2538 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2539 MGMT_STATUS_INVALID_PARAMS);
2544 hci_link_keys_clear(hdev);
2547 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2550 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2554 new_settings(hdev, NULL);
2556 for (i = 0; i < key_count; i++) {
2557 struct mgmt_link_key_info *key = &cp->keys[i];
2559 /* Always ignore debug keys and require a new pairing if
2560 * the user wants to use them.
2562 if (key->type == HCI_LK_DEBUG_COMBINATION)
2565 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2566 key->type, key->pin_len, NULL);
2569 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2571 hci_dev_unlock(hdev);
2576 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2577 u8 addr_type, struct sock *skip_sk)
2579 struct mgmt_ev_device_unpaired ev;
2581 bacpy(&ev.addr.bdaddr, bdaddr);
2582 ev.addr.type = addr_type;
2584 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2588 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2591 struct mgmt_cp_unpair_device *cp = data;
2592 struct mgmt_rp_unpair_device rp;
2593 struct hci_cp_disconnect dc;
2594 struct pending_cmd *cmd;
2595 struct hci_conn *conn;
2598 memset(&rp, 0, sizeof(rp));
2599 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2600 rp.addr.type = cp->addr.type;
2602 if (!bdaddr_type_is_valid(cp->addr.type))
2603 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2604 MGMT_STATUS_INVALID_PARAMS,
2607 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2608 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2609 MGMT_STATUS_INVALID_PARAMS,
2614 if (!hdev_is_powered(hdev)) {
2615 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2616 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2620 if (cp->addr.type == BDADDR_BREDR) {
2621 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2625 if (cp->addr.type == BDADDR_LE_PUBLIC)
2626 addr_type = ADDR_LE_DEV_PUBLIC;
2628 addr_type = ADDR_LE_DEV_RANDOM;
2630 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2632 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2634 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2638 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2639 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2643 if (cp->disconnect) {
2644 if (cp->addr.type == BDADDR_BREDR)
2645 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2648 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2655 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2657 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2661 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2668 dc.handle = cpu_to_le16(conn->handle);
2669 dc.reason = 0x13; /* Remote User Terminated Connection */
2670 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2672 mgmt_pending_remove(cmd);
2675 hci_dev_unlock(hdev);
2679 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2682 struct mgmt_cp_disconnect *cp = data;
2683 struct mgmt_rp_disconnect rp;
2684 struct hci_cp_disconnect dc;
2685 struct pending_cmd *cmd;
2686 struct hci_conn *conn;
2691 memset(&rp, 0, sizeof(rp));
2692 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2693 rp.addr.type = cp->addr.type;
2695 if (!bdaddr_type_is_valid(cp->addr.type))
2696 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2697 MGMT_STATUS_INVALID_PARAMS,
2702 if (!test_bit(HCI_UP, &hdev->flags)) {
2703 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2704 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2708 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2709 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2710 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2714 if (cp->addr.type == BDADDR_BREDR)
2715 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2718 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2720 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2721 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2722 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2726 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2732 dc.handle = cpu_to_le16(conn->handle);
2733 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2735 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2737 mgmt_pending_remove(cmd);
2740 hci_dev_unlock(hdev);
2744 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2746 switch (link_type) {
2748 switch (addr_type) {
2749 case ADDR_LE_DEV_PUBLIC:
2750 return BDADDR_LE_PUBLIC;
2753 /* Fallback to LE Random address type */
2754 return BDADDR_LE_RANDOM;
2758 /* Fallback to BR/EDR type */
2759 return BDADDR_BREDR;
2763 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2766 struct mgmt_rp_get_connections *rp;
2776 if (!hdev_is_powered(hdev)) {
2777 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2778 MGMT_STATUS_NOT_POWERED);
2783 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2784 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2788 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2789 rp = kmalloc(rp_len, GFP_KERNEL);
2796 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2797 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2799 bacpy(&rp->addr[i].bdaddr, &c->dst);
2800 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2801 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2806 rp->conn_count = cpu_to_le16(i);
2808 /* Recalculate length in case of filtered SCO connections, etc */
2809 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2811 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2817 hci_dev_unlock(hdev);
2821 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2822 struct mgmt_cp_pin_code_neg_reply *cp)
2824 struct pending_cmd *cmd;
2827 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2832 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2833 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2835 mgmt_pending_remove(cmd);
2840 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2843 struct hci_conn *conn;
2844 struct mgmt_cp_pin_code_reply *cp = data;
2845 struct hci_cp_pin_code_reply reply;
2846 struct pending_cmd *cmd;
2853 if (!hdev_is_powered(hdev)) {
2854 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2855 MGMT_STATUS_NOT_POWERED);
2859 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2861 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2862 MGMT_STATUS_NOT_CONNECTED);
2866 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2867 struct mgmt_cp_pin_code_neg_reply ncp;
2869 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2871 BT_ERR("PIN code is not 16 bytes long");
2873 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2875 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2876 MGMT_STATUS_INVALID_PARAMS);
2881 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2887 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2888 reply.pin_len = cp->pin_len;
2889 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2891 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2893 mgmt_pending_remove(cmd);
2896 hci_dev_unlock(hdev);
2900 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2903 struct mgmt_cp_set_io_capability *cp = data;
2907 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2908 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2909 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
2913 hdev->io_capability = cp->io_capability;
2915 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2916 hdev->io_capability);
2918 hci_dev_unlock(hdev);
2920 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2924 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2926 struct hci_dev *hdev = conn->hdev;
2927 struct pending_cmd *cmd;
2929 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2930 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2933 if (cmd->user_data != conn)
2942 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2944 struct mgmt_rp_pair_device rp;
2945 struct hci_conn *conn = cmd->user_data;
2947 bacpy(&rp.addr.bdaddr, &conn->dst);
2948 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2950 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2953 /* So we don't get further callbacks for this connection */
2954 conn->connect_cfm_cb = NULL;
2955 conn->security_cfm_cb = NULL;
2956 conn->disconn_cfm_cb = NULL;
2958 hci_conn_drop(conn);
2960 mgmt_pending_remove(cmd);
2963 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2965 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2966 struct pending_cmd *cmd;
2968 cmd = find_pairing(conn);
2970 pairing_complete(cmd, status);
2973 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2975 struct pending_cmd *cmd;
2977 BT_DBG("status %u", status);
2979 cmd = find_pairing(conn);
2981 BT_DBG("Unable to find a pending command");
2983 pairing_complete(cmd, mgmt_status(status));
2986 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2988 struct pending_cmd *cmd;
2990 BT_DBG("status %u", status);
2995 cmd = find_pairing(conn);
2997 BT_DBG("Unable to find a pending command");
2999 pairing_complete(cmd, mgmt_status(status));
3002 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3005 struct mgmt_cp_pair_device *cp = data;
3006 struct mgmt_rp_pair_device rp;
3007 struct pending_cmd *cmd;
3008 u8 sec_level, auth_type;
3009 struct hci_conn *conn;
3014 memset(&rp, 0, sizeof(rp));
3015 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3016 rp.addr.type = cp->addr.type;
3018 if (!bdaddr_type_is_valid(cp->addr.type))
3019 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3020 MGMT_STATUS_INVALID_PARAMS,
3023 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3024 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3025 MGMT_STATUS_INVALID_PARAMS,
3030 if (!hdev_is_powered(hdev)) {
3031 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3032 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3036 sec_level = BT_SECURITY_MEDIUM;
3037 auth_type = HCI_AT_DEDICATED_BONDING;
3039 if (cp->addr.type == BDADDR_BREDR) {
3040 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3045 /* Convert from L2CAP channel address type to HCI address type
3047 if (cp->addr.type == BDADDR_LE_PUBLIC)
3048 addr_type = ADDR_LE_DEV_PUBLIC;
3050 addr_type = ADDR_LE_DEV_RANDOM;
3052 /* When pairing a new device, it is expected to remember
3053 * this device for future connections. Adding the connection
3054 * parameter information ahead of time allows tracking
3055 * of the slave preferred values and will speed up any
3056 * further connection establishment.
3058 * If connection parameters already exist, then they
3059 * will be kept and this function does nothing.
3061 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3063 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3064 sec_level, auth_type);
3070 if (PTR_ERR(conn) == -EBUSY)
3071 status = MGMT_STATUS_BUSY;
3073 status = MGMT_STATUS_CONNECT_FAILED;
3075 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3081 if (conn->connect_cfm_cb) {
3082 hci_conn_drop(conn);
3083 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3084 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3088 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3091 hci_conn_drop(conn);
3095 /* For LE, just connecting isn't a proof that the pairing finished */
3096 if (cp->addr.type == BDADDR_BREDR) {
3097 conn->connect_cfm_cb = pairing_complete_cb;
3098 conn->security_cfm_cb = pairing_complete_cb;
3099 conn->disconn_cfm_cb = pairing_complete_cb;
3101 conn->connect_cfm_cb = le_pairing_complete_cb;
3102 conn->security_cfm_cb = le_pairing_complete_cb;
3103 conn->disconn_cfm_cb = le_pairing_complete_cb;
3106 conn->io_capability = cp->io_cap;
3107 cmd->user_data = conn;
3109 if (conn->state == BT_CONNECTED &&
3110 hci_conn_security(conn, sec_level, auth_type))
3111 pairing_complete(cmd, 0);
3116 hci_dev_unlock(hdev);
3120 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3123 struct mgmt_addr_info *addr = data;
3124 struct pending_cmd *cmd;
3125 struct hci_conn *conn;
3132 if (!hdev_is_powered(hdev)) {
3133 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3134 MGMT_STATUS_NOT_POWERED);
3138 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3140 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3141 MGMT_STATUS_INVALID_PARAMS);
3145 conn = cmd->user_data;
3147 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3148 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3149 MGMT_STATUS_INVALID_PARAMS);
3153 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3155 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3156 addr, sizeof(*addr));
3158 hci_dev_unlock(hdev);
3162 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3163 struct mgmt_addr_info *addr, u16 mgmt_op,
3164 u16 hci_op, __le32 passkey)
3166 struct pending_cmd *cmd;
3167 struct hci_conn *conn;
3172 if (!hdev_is_powered(hdev)) {
3173 err = cmd_complete(sk, hdev->id, mgmt_op,
3174 MGMT_STATUS_NOT_POWERED, addr,
3179 if (addr->type == BDADDR_BREDR)
3180 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3182 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3185 err = cmd_complete(sk, hdev->id, mgmt_op,
3186 MGMT_STATUS_NOT_CONNECTED, addr,
3191 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3192 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3194 err = cmd_complete(sk, hdev->id, mgmt_op,
3195 MGMT_STATUS_SUCCESS, addr,
3198 err = cmd_complete(sk, hdev->id, mgmt_op,
3199 MGMT_STATUS_FAILED, addr,
3205 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3211 /* Continue with pairing via HCI */
3212 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3213 struct hci_cp_user_passkey_reply cp;
3215 bacpy(&cp.bdaddr, &addr->bdaddr);
3216 cp.passkey = passkey;
3217 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3219 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3223 mgmt_pending_remove(cmd);
3226 hci_dev_unlock(hdev);
3230 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3231 void *data, u16 len)
3233 struct mgmt_cp_pin_code_neg_reply *cp = data;
3237 return user_pairing_resp(sk, hdev, &cp->addr,
3238 MGMT_OP_PIN_CODE_NEG_REPLY,
3239 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3242 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3245 struct mgmt_cp_user_confirm_reply *cp = data;
3249 if (len != sizeof(*cp))
3250 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3251 MGMT_STATUS_INVALID_PARAMS);
3253 return user_pairing_resp(sk, hdev, &cp->addr,
3254 MGMT_OP_USER_CONFIRM_REPLY,
3255 HCI_OP_USER_CONFIRM_REPLY, 0);
3258 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3259 void *data, u16 len)
3261 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3265 return user_pairing_resp(sk, hdev, &cp->addr,
3266 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3267 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3270 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3273 struct mgmt_cp_user_passkey_reply *cp = data;
3277 return user_pairing_resp(sk, hdev, &cp->addr,
3278 MGMT_OP_USER_PASSKEY_REPLY,
3279 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3282 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3283 void *data, u16 len)
3285 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3289 return user_pairing_resp(sk, hdev, &cp->addr,
3290 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3291 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3294 static void update_name(struct hci_request *req)
3296 struct hci_dev *hdev = req->hdev;
3297 struct hci_cp_write_local_name cp;
3299 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3301 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3304 static void set_name_complete(struct hci_dev *hdev, u8 status)
3306 struct mgmt_cp_set_local_name *cp;
3307 struct pending_cmd *cmd;
3309 BT_DBG("status 0x%02x", status);
3313 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3320 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3321 mgmt_status(status));
3323 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3326 mgmt_pending_remove(cmd);
3329 hci_dev_unlock(hdev);
3332 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3335 struct mgmt_cp_set_local_name *cp = data;
3336 struct pending_cmd *cmd;
3337 struct hci_request req;
3344 /* If the old values are the same as the new ones just return a
3345 * direct command complete event.
3347 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3348 !memcmp(hdev->short_name, cp->short_name,
3349 sizeof(hdev->short_name))) {
3350 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3355 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3357 if (!hdev_is_powered(hdev)) {
3358 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3360 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3365 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3371 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3377 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3379 hci_req_init(&req, hdev);
3381 if (lmp_bredr_capable(hdev)) {
3386 /* The name is stored in the scan response data and so
3387 * no need to udpate the advertising data here.
3389 if (lmp_le_capable(hdev))
3390 update_scan_rsp_data(&req);
3392 err = hci_req_run(&req, set_name_complete);
3394 mgmt_pending_remove(cmd);
3397 hci_dev_unlock(hdev);
3401 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3402 void *data, u16 data_len)
3404 struct pending_cmd *cmd;
3407 BT_DBG("%s", hdev->name);
3411 if (!hdev_is_powered(hdev)) {
3412 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3413 MGMT_STATUS_NOT_POWERED);
3417 if (!lmp_ssp_capable(hdev)) {
3418 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3419 MGMT_STATUS_NOT_SUPPORTED);
3423 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3424 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3429 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3435 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3436 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3439 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3442 mgmt_pending_remove(cmd);
3445 hci_dev_unlock(hdev);
3449 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3450 void *data, u16 len)
3454 BT_DBG("%s ", hdev->name);
3458 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3459 struct mgmt_cp_add_remote_oob_data *cp = data;
3462 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3463 cp->hash, cp->randomizer);
3465 status = MGMT_STATUS_FAILED;
3467 status = MGMT_STATUS_SUCCESS;
3469 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3470 status, &cp->addr, sizeof(cp->addr));
3471 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3472 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3475 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3481 status = MGMT_STATUS_FAILED;
3483 status = MGMT_STATUS_SUCCESS;
3485 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3486 status, &cp->addr, sizeof(cp->addr));
3488 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3489 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3490 MGMT_STATUS_INVALID_PARAMS);
3493 hci_dev_unlock(hdev);
3497 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3498 void *data, u16 len)
3500 struct mgmt_cp_remove_remote_oob_data *cp = data;
3504 BT_DBG("%s", hdev->name);
3508 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3510 status = MGMT_STATUS_INVALID_PARAMS;
3512 status = MGMT_STATUS_SUCCESS;
3514 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3515 status, &cp->addr, sizeof(cp->addr));
3517 hci_dev_unlock(hdev);
3521 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3523 struct pending_cmd *cmd;
3527 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3529 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3533 type = hdev->discovery.type;
3535 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3536 &type, sizeof(type));
3537 mgmt_pending_remove(cmd);
3542 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3544 unsigned long timeout = 0;
3546 BT_DBG("status %d", status);
3550 mgmt_start_discovery_failed(hdev, status);
3551 hci_dev_unlock(hdev);
3556 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3557 hci_dev_unlock(hdev);
3559 switch (hdev->discovery.type) {
3560 case DISCOV_TYPE_LE:
3561 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3564 case DISCOV_TYPE_INTERLEAVED:
3565 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3568 case DISCOV_TYPE_BREDR:
3572 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3578 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3581 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3582 void *data, u16 len)
3584 struct mgmt_cp_start_discovery *cp = data;
3585 struct pending_cmd *cmd;
3586 struct hci_cp_le_set_scan_param param_cp;
3587 struct hci_cp_le_set_scan_enable enable_cp;
3588 struct hci_cp_inquiry inq_cp;
3589 struct hci_request req;
3590 /* General inquiry access code (GIAC) */
3591 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3592 u8 status, own_addr_type;
3595 BT_DBG("%s", hdev->name);
3599 if (!hdev_is_powered(hdev)) {
3600 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3601 MGMT_STATUS_NOT_POWERED);
3605 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3606 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3611 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3612 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3617 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3623 hdev->discovery.type = cp->type;
3625 hci_req_init(&req, hdev);
3627 switch (hdev->discovery.type) {
3628 case DISCOV_TYPE_BREDR:
3629 status = mgmt_bredr_support(hdev);
3631 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3633 mgmt_pending_remove(cmd);
3637 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3638 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3640 mgmt_pending_remove(cmd);
3644 hci_inquiry_cache_flush(hdev);
3646 memset(&inq_cp, 0, sizeof(inq_cp));
3647 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3648 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3649 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3652 case DISCOV_TYPE_LE:
3653 case DISCOV_TYPE_INTERLEAVED:
3654 status = mgmt_le_support(hdev);
3656 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3658 mgmt_pending_remove(cmd);
3662 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3663 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3664 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3665 MGMT_STATUS_NOT_SUPPORTED);
3666 mgmt_pending_remove(cmd);
3670 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3671 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3672 MGMT_STATUS_REJECTED);
3673 mgmt_pending_remove(cmd);
3677 /* If controller is scanning, it means the background scanning
3678 * is running. Thus, we should temporarily stop it in order to
3679 * set the discovery scanning parameters.
3681 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3682 hci_req_add_le_scan_disable(&req);
3684 memset(¶m_cp, 0, sizeof(param_cp));
3686 /* All active scans will be done with either a resolvable
3687 * private address (when privacy feature has been enabled)
3688 * or unresolvable private address.
3690 err = hci_update_random_address(&req, true, &own_addr_type);
3692 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3693 MGMT_STATUS_FAILED);
3694 mgmt_pending_remove(cmd);
3698 param_cp.type = LE_SCAN_ACTIVE;
3699 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3700 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3701 param_cp.own_address_type = own_addr_type;
3702 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3705 memset(&enable_cp, 0, sizeof(enable_cp));
3706 enable_cp.enable = LE_SCAN_ENABLE;
3707 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3708 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3713 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3714 MGMT_STATUS_INVALID_PARAMS);
3715 mgmt_pending_remove(cmd);
3719 err = hci_req_run(&req, start_discovery_complete);
3721 mgmt_pending_remove(cmd);
3723 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3726 hci_dev_unlock(hdev);
3730 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3732 struct pending_cmd *cmd;
3735 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3739 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3740 &hdev->discovery.type, sizeof(hdev->discovery.type));
3741 mgmt_pending_remove(cmd);
3746 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3748 BT_DBG("status %d", status);
3753 mgmt_stop_discovery_failed(hdev, status);
3757 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3760 hci_dev_unlock(hdev);
3763 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3766 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3767 struct pending_cmd *cmd;
3768 struct hci_request req;
3771 BT_DBG("%s", hdev->name);
3775 if (!hci_discovery_active(hdev)) {
3776 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3777 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3778 sizeof(mgmt_cp->type));
3782 if (hdev->discovery.type != mgmt_cp->type) {
3783 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3784 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3785 sizeof(mgmt_cp->type));
3789 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3795 hci_req_init(&req, hdev);
3797 hci_stop_discovery(&req);
3799 err = hci_req_run(&req, stop_discovery_complete);
3801 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3805 mgmt_pending_remove(cmd);
3807 /* If no HCI commands were sent we're done */
3808 if (err == -ENODATA) {
3809 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3810 &mgmt_cp->type, sizeof(mgmt_cp->type));
3811 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3815 hci_dev_unlock(hdev);
3819 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3822 struct mgmt_cp_confirm_name *cp = data;
3823 struct inquiry_entry *e;
3826 BT_DBG("%s", hdev->name);
3830 if (!hci_discovery_active(hdev)) {
3831 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3832 MGMT_STATUS_FAILED, &cp->addr,
3837 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3839 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3840 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3845 if (cp->name_known) {
3846 e->name_state = NAME_KNOWN;
3849 e->name_state = NAME_NEEDED;
3850 hci_inquiry_cache_update_resolve(hdev, e);
3853 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3857 hci_dev_unlock(hdev);
3861 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3864 struct mgmt_cp_block_device *cp = data;
3868 BT_DBG("%s", hdev->name);
3870 if (!bdaddr_type_is_valid(cp->addr.type))
3871 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3872 MGMT_STATUS_INVALID_PARAMS,
3873 &cp->addr, sizeof(cp->addr));
3877 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3879 status = MGMT_STATUS_FAILED;
3883 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3885 status = MGMT_STATUS_SUCCESS;
3888 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3889 &cp->addr, sizeof(cp->addr));
3891 hci_dev_unlock(hdev);
3896 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3899 struct mgmt_cp_unblock_device *cp = data;
3903 BT_DBG("%s", hdev->name);
3905 if (!bdaddr_type_is_valid(cp->addr.type))
3906 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3907 MGMT_STATUS_INVALID_PARAMS,
3908 &cp->addr, sizeof(cp->addr));
3912 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3914 status = MGMT_STATUS_INVALID_PARAMS;
3918 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3920 status = MGMT_STATUS_SUCCESS;
3923 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3924 &cp->addr, sizeof(cp->addr));
3926 hci_dev_unlock(hdev);
3931 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3934 struct mgmt_cp_set_device_id *cp = data;
3935 struct hci_request req;
3939 BT_DBG("%s", hdev->name);
3941 source = __le16_to_cpu(cp->source);
3943 if (source > 0x0002)
3944 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3945 MGMT_STATUS_INVALID_PARAMS);
3949 hdev->devid_source = source;
3950 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3951 hdev->devid_product = __le16_to_cpu(cp->product);
3952 hdev->devid_version = __le16_to_cpu(cp->version);
3954 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3956 hci_req_init(&req, hdev);
3958 hci_req_run(&req, NULL);
3960 hci_dev_unlock(hdev);
3965 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3967 struct cmd_lookup match = { NULL, hdev };
3970 u8 mgmt_err = mgmt_status(status);
3972 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3973 cmd_status_rsp, &mgmt_err);
3977 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3980 new_settings(hdev, match.sk);
3986 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3989 struct mgmt_mode *cp = data;
3990 struct pending_cmd *cmd;
3991 struct hci_request req;
3992 u8 val, enabled, status;
3995 BT_DBG("request for %s", hdev->name);
3997 status = mgmt_le_support(hdev);
3999 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4002 if (cp->val != 0x00 && cp->val != 0x01)
4003 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4004 MGMT_STATUS_INVALID_PARAMS);
4009 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4011 /* The following conditions are ones which mean that we should
4012 * not do any HCI communication but directly send a mgmt
4013 * response to user space (after toggling the flag if
4016 if (!hdev_is_powered(hdev) || val == enabled ||
4017 hci_conn_num(hdev, LE_LINK) > 0) {
4018 bool changed = false;
4020 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4021 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4025 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4030 err = new_settings(hdev, sk);
4035 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4036 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4037 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4042 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4048 hci_req_init(&req, hdev);
4051 enable_advertising(&req);
4053 disable_advertising(&req);
4055 err = hci_req_run(&req, set_advertising_complete);
4057 mgmt_pending_remove(cmd);
4060 hci_dev_unlock(hdev);
4064 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4065 void *data, u16 len)
4067 struct mgmt_cp_set_static_address *cp = data;
4070 BT_DBG("%s", hdev->name);
4072 if (!lmp_le_capable(hdev))
4073 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4074 MGMT_STATUS_NOT_SUPPORTED);
4076 if (hdev_is_powered(hdev))
4077 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4078 MGMT_STATUS_REJECTED);
4080 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4081 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4082 return cmd_status(sk, hdev->id,
4083 MGMT_OP_SET_STATIC_ADDRESS,
4084 MGMT_STATUS_INVALID_PARAMS);
4086 /* Two most significant bits shall be set */
4087 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4088 return cmd_status(sk, hdev->id,
4089 MGMT_OP_SET_STATIC_ADDRESS,
4090 MGMT_STATUS_INVALID_PARAMS);
4095 bacpy(&hdev->static_addr, &cp->bdaddr);
4097 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4099 hci_dev_unlock(hdev);
4104 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4105 void *data, u16 len)
4107 struct mgmt_cp_set_scan_params *cp = data;
4108 __u16 interval, window;
4111 BT_DBG("%s", hdev->name);
4113 if (!lmp_le_capable(hdev))
4114 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4115 MGMT_STATUS_NOT_SUPPORTED);
4117 interval = __le16_to_cpu(cp->interval);
4119 if (interval < 0x0004 || interval > 0x4000)
4120 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4121 MGMT_STATUS_INVALID_PARAMS);
4123 window = __le16_to_cpu(cp->window);
4125 if (window < 0x0004 || window > 0x4000)
4126 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4127 MGMT_STATUS_INVALID_PARAMS);
4129 if (window > interval)
4130 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4131 MGMT_STATUS_INVALID_PARAMS);
4135 hdev->le_scan_interval = interval;
4136 hdev->le_scan_window = window;
4138 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4140 /* If background scan is running, restart it so new parameters are
4143 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4144 hdev->discovery.state == DISCOVERY_STOPPED) {
4145 struct hci_request req;
4147 hci_req_init(&req, hdev);
4149 hci_req_add_le_scan_disable(&req);
4150 hci_req_add_le_passive_scan(&req);
4152 hci_req_run(&req, NULL);
4155 hci_dev_unlock(hdev);
4160 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4162 struct pending_cmd *cmd;
4164 BT_DBG("status 0x%02x", status);
4168 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4173 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4174 mgmt_status(status));
4176 struct mgmt_mode *cp = cmd->param;
4179 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4181 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4183 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4184 new_settings(hdev, cmd->sk);
4187 mgmt_pending_remove(cmd);
4190 hci_dev_unlock(hdev);
4193 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4194 void *data, u16 len)
4196 struct mgmt_mode *cp = data;
4197 struct pending_cmd *cmd;
4198 struct hci_request req;
4201 BT_DBG("%s", hdev->name);
4203 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4204 hdev->hci_ver < BLUETOOTH_VER_1_2)
4205 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4206 MGMT_STATUS_NOT_SUPPORTED);
4208 if (cp->val != 0x00 && cp->val != 0x01)
4209 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4210 MGMT_STATUS_INVALID_PARAMS);
4212 if (!hdev_is_powered(hdev))
4213 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4214 MGMT_STATUS_NOT_POWERED);
4216 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4217 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4218 MGMT_STATUS_REJECTED);
4222 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4223 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4228 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4229 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4234 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4241 hci_req_init(&req, hdev);
4243 write_fast_connectable(&req, cp->val);
4245 err = hci_req_run(&req, fast_connectable_complete);
4247 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4248 MGMT_STATUS_FAILED);
4249 mgmt_pending_remove(cmd);
4253 hci_dev_unlock(hdev);
4258 static void set_bredr_scan(struct hci_request *req)
4260 struct hci_dev *hdev = req->hdev;
4263 /* Ensure that fast connectable is disabled. This function will
4264 * not do anything if the page scan parameters are already what
4267 write_fast_connectable(req, false);
4269 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4271 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4272 scan |= SCAN_INQUIRY;
4275 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4278 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4280 struct pending_cmd *cmd;
4282 BT_DBG("status 0x%02x", status);
4286 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4291 u8 mgmt_err = mgmt_status(status);
4293 /* We need to restore the flag if related HCI commands
4296 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4298 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4300 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4301 new_settings(hdev, cmd->sk);
4304 mgmt_pending_remove(cmd);
4307 hci_dev_unlock(hdev);
4310 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4312 struct mgmt_mode *cp = data;
4313 struct pending_cmd *cmd;
4314 struct hci_request req;
4317 BT_DBG("request for %s", hdev->name);
4319 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4320 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4321 MGMT_STATUS_NOT_SUPPORTED);
4323 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4324 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4325 MGMT_STATUS_REJECTED);
4327 if (cp->val != 0x00 && cp->val != 0x01)
4328 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4329 MGMT_STATUS_INVALID_PARAMS);
4333 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4334 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4338 if (!hdev_is_powered(hdev)) {
4340 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4341 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4342 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4343 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4344 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4347 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4349 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4353 err = new_settings(hdev, sk);
4357 /* Reject disabling when powered on */
4359 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4360 MGMT_STATUS_REJECTED);
4364 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4365 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4370 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4376 /* We need to flip the bit already here so that update_adv_data
4377 * generates the correct flags.
4379 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4381 hci_req_init(&req, hdev);
4383 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4384 set_bredr_scan(&req);
4386 /* Since only the advertising data flags will change, there
4387 * is no need to update the scan response data.
4389 update_adv_data(&req);
4391 err = hci_req_run(&req, set_bredr_complete);
4393 mgmt_pending_remove(cmd);
4396 hci_dev_unlock(hdev);
4400 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4401 void *data, u16 len)
4403 struct mgmt_mode *cp = data;
4404 struct pending_cmd *cmd;
4408 BT_DBG("request for %s", hdev->name);
4410 status = mgmt_bredr_support(hdev);
4412 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4415 if (!lmp_sc_capable(hdev) &&
4416 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4417 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4418 MGMT_STATUS_NOT_SUPPORTED);
4420 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4421 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4422 MGMT_STATUS_INVALID_PARAMS);
4426 if (!hdev_is_powered(hdev)) {
4430 changed = !test_and_set_bit(HCI_SC_ENABLED,
4432 if (cp->val == 0x02)
4433 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4435 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4437 changed = test_and_clear_bit(HCI_SC_ENABLED,
4439 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4442 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4447 err = new_settings(hdev, sk);
4452 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4453 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4460 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4461 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4462 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4466 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4472 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4474 mgmt_pending_remove(cmd);
4478 if (cp->val == 0x02)
4479 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4481 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4484 hci_dev_unlock(hdev);
4488 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4489 void *data, u16 len)
4491 struct mgmt_mode *cp = data;
4492 bool changed, use_changed;
4495 BT_DBG("request for %s", hdev->name);
4497 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4498 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4499 MGMT_STATUS_INVALID_PARAMS);
4504 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4507 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4510 if (cp->val == 0x02)
4511 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4514 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4517 if (hdev_is_powered(hdev) && use_changed &&
4518 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4519 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4520 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4521 sizeof(mode), &mode);
4524 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4529 err = new_settings(hdev, sk);
4532 hci_dev_unlock(hdev);
4536 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4539 struct mgmt_cp_set_privacy *cp = cp_data;
4543 BT_DBG("request for %s", hdev->name);
4545 if (!lmp_le_capable(hdev))
4546 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4547 MGMT_STATUS_NOT_SUPPORTED);
4549 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4550 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4551 MGMT_STATUS_INVALID_PARAMS);
4553 if (hdev_is_powered(hdev))
4554 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4555 MGMT_STATUS_REJECTED);
4559 /* If user space supports this command it is also expected to
4560 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4562 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4565 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4566 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4567 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4569 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4570 memset(hdev->irk, 0, sizeof(hdev->irk));
4571 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4574 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4579 err = new_settings(hdev, sk);
4582 hci_dev_unlock(hdev);
4586 static bool irk_is_valid(struct mgmt_irk_info *irk)
4588 switch (irk->addr.type) {
4589 case BDADDR_LE_PUBLIC:
4592 case BDADDR_LE_RANDOM:
4593 /* Two most significant bits shall be set */
4594 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4602 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4605 struct mgmt_cp_load_irks *cp = cp_data;
4606 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4607 sizeof(struct mgmt_irk_info));
4608 u16 irk_count, expected_len;
4611 BT_DBG("request for %s", hdev->name);
4613 if (!lmp_le_capable(hdev))
4614 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4615 MGMT_STATUS_NOT_SUPPORTED);
4617 irk_count = __le16_to_cpu(cp->irk_count);
4618 if (irk_count > max_irk_count) {
4619 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4620 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4621 MGMT_STATUS_INVALID_PARAMS);
4624 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4625 if (expected_len != len) {
4626 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4628 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4629 MGMT_STATUS_INVALID_PARAMS);
4632 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4634 for (i = 0; i < irk_count; i++) {
4635 struct mgmt_irk_info *key = &cp->irks[i];
4637 if (!irk_is_valid(key))
4638 return cmd_status(sk, hdev->id,
4640 MGMT_STATUS_INVALID_PARAMS);
4645 hci_smp_irks_clear(hdev);
4647 for (i = 0; i < irk_count; i++) {
4648 struct mgmt_irk_info *irk = &cp->irks[i];
4651 if (irk->addr.type == BDADDR_LE_PUBLIC)
4652 addr_type = ADDR_LE_DEV_PUBLIC;
4654 addr_type = ADDR_LE_DEV_RANDOM;
4656 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4660 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4662 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4664 hci_dev_unlock(hdev);
4669 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4671 if (key->master != 0x00 && key->master != 0x01)
4674 switch (key->addr.type) {
4675 case BDADDR_LE_PUBLIC:
4678 case BDADDR_LE_RANDOM:
4679 /* Two most significant bits shall be set */
4680 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4688 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4689 void *cp_data, u16 len)
4691 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4692 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4693 sizeof(struct mgmt_ltk_info));
4694 u16 key_count, expected_len;
4697 BT_DBG("request for %s", hdev->name);
4699 if (!lmp_le_capable(hdev))
4700 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4701 MGMT_STATUS_NOT_SUPPORTED);
4703 key_count = __le16_to_cpu(cp->key_count);
4704 if (key_count > max_key_count) {
4705 BT_ERR("load_ltks: too big key_count value %u", key_count);
4706 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4707 MGMT_STATUS_INVALID_PARAMS);
4710 expected_len = sizeof(*cp) + key_count *
4711 sizeof(struct mgmt_ltk_info);
4712 if (expected_len != len) {
4713 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4715 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4716 MGMT_STATUS_INVALID_PARAMS);
4719 BT_DBG("%s key_count %u", hdev->name, key_count);
4721 for (i = 0; i < key_count; i++) {
4722 struct mgmt_ltk_info *key = &cp->keys[i];
4724 if (!ltk_is_valid(key))
4725 return cmd_status(sk, hdev->id,
4726 MGMT_OP_LOAD_LONG_TERM_KEYS,
4727 MGMT_STATUS_INVALID_PARAMS);
4732 hci_smp_ltks_clear(hdev);
4734 for (i = 0; i < key_count; i++) {
4735 struct mgmt_ltk_info *key = &cp->keys[i];
4736 u8 type, addr_type, authenticated;
4738 if (key->addr.type == BDADDR_LE_PUBLIC)
4739 addr_type = ADDR_LE_DEV_PUBLIC;
4741 addr_type = ADDR_LE_DEV_RANDOM;
4746 type = SMP_LTK_SLAVE;
4748 switch (key->type) {
4749 case MGMT_LTK_UNAUTHENTICATED:
4750 authenticated = 0x00;
4752 case MGMT_LTK_AUTHENTICATED:
4753 authenticated = 0x01;
4759 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4760 authenticated, key->val, key->enc_size, key->ediv,
4764 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4767 hci_dev_unlock(hdev);
4772 struct cmd_conn_lookup {
4773 struct hci_conn *conn;
4774 bool valid_tx_power;
4778 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4780 struct cmd_conn_lookup *match = data;
4781 struct mgmt_cp_get_conn_info *cp;
4782 struct mgmt_rp_get_conn_info rp;
4783 struct hci_conn *conn = cmd->user_data;
4785 if (conn != match->conn)
4788 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4790 memset(&rp, 0, sizeof(rp));
4791 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4792 rp.addr.type = cp->addr.type;
4794 if (!match->mgmt_status) {
4795 rp.rssi = conn->rssi;
4797 if (match->valid_tx_power) {
4798 rp.tx_power = conn->tx_power;
4799 rp.max_tx_power = conn->max_tx_power;
4801 rp.tx_power = HCI_TX_POWER_INVALID;
4802 rp.max_tx_power = HCI_TX_POWER_INVALID;
4806 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4807 match->mgmt_status, &rp, sizeof(rp));
4809 hci_conn_drop(conn);
4811 mgmt_pending_remove(cmd);
4814 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4816 struct hci_cp_read_rssi *cp;
4817 struct hci_conn *conn;
4818 struct cmd_conn_lookup match;
4821 BT_DBG("status 0x%02x", status);
4825 /* TX power data is valid in case request completed successfully,
4826 * otherwise we assume it's not valid. At the moment we assume that
4827 * either both or none of current and max values are valid to keep code
4830 match.valid_tx_power = !status;
4832 /* Commands sent in request are either Read RSSI or Read Transmit Power
4833 * Level so we check which one was last sent to retrieve connection
4834 * handle. Both commands have handle as first parameter so it's safe to
4835 * cast data on the same command struct.
4837 * First command sent is always Read RSSI and we fail only if it fails.
4838 * In other case we simply override error to indicate success as we
4839 * already remembered if TX power value is actually valid.
4841 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4843 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4848 BT_ERR("invalid sent_cmd in response");
4852 handle = __le16_to_cpu(cp->handle);
4853 conn = hci_conn_hash_lookup_handle(hdev, handle);
4855 BT_ERR("unknown handle (%d) in response", handle);
4860 match.mgmt_status = mgmt_status(status);
4862 /* Cache refresh is complete, now reply for mgmt request for given
4865 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4866 get_conn_info_complete, &match);
4869 hci_dev_unlock(hdev);
4872 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4875 struct mgmt_cp_get_conn_info *cp = data;
4876 struct mgmt_rp_get_conn_info rp;
4877 struct hci_conn *conn;
4878 unsigned long conn_info_age;
4881 BT_DBG("%s", hdev->name);
4883 memset(&rp, 0, sizeof(rp));
4884 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4885 rp.addr.type = cp->addr.type;
4887 if (!bdaddr_type_is_valid(cp->addr.type))
4888 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4889 MGMT_STATUS_INVALID_PARAMS,
4894 if (!hdev_is_powered(hdev)) {
4895 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4896 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4900 if (cp->addr.type == BDADDR_BREDR)
4901 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4904 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4906 if (!conn || conn->state != BT_CONNECTED) {
4907 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4908 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
4912 /* To avoid client trying to guess when to poll again for information we
4913 * calculate conn info age as random value between min/max set in hdev.
4915 conn_info_age = hdev->conn_info_min_age +
4916 prandom_u32_max(hdev->conn_info_max_age -
4917 hdev->conn_info_min_age);
4919 /* Query controller to refresh cached values if they are too old or were
4922 if (time_after(jiffies, conn->conn_info_timestamp +
4923 msecs_to_jiffies(conn_info_age)) ||
4924 !conn->conn_info_timestamp) {
4925 struct hci_request req;
4926 struct hci_cp_read_tx_power req_txp_cp;
4927 struct hci_cp_read_rssi req_rssi_cp;
4928 struct pending_cmd *cmd;
4930 hci_req_init(&req, hdev);
4931 req_rssi_cp.handle = cpu_to_le16(conn->handle);
4932 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4935 /* For LE links TX power does not change thus we don't need to
4936 * query for it once value is known.
4938 if (!bdaddr_type_is_le(cp->addr.type) ||
4939 conn->tx_power == HCI_TX_POWER_INVALID) {
4940 req_txp_cp.handle = cpu_to_le16(conn->handle);
4941 req_txp_cp.type = 0x00;
4942 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4943 sizeof(req_txp_cp), &req_txp_cp);
4946 /* Max TX power needs to be read only once per connection */
4947 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4948 req_txp_cp.handle = cpu_to_le16(conn->handle);
4949 req_txp_cp.type = 0x01;
4950 hci_req_add(&req, HCI_OP_READ_TX_POWER,
4951 sizeof(req_txp_cp), &req_txp_cp);
4954 err = hci_req_run(&req, conn_info_refresh_complete);
4958 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4965 hci_conn_hold(conn);
4966 cmd->user_data = conn;
4968 conn->conn_info_timestamp = jiffies;
4970 /* Cache is valid, just reply with values cached in hci_conn */
4971 rp.rssi = conn->rssi;
4972 rp.tx_power = conn->tx_power;
4973 rp.max_tx_power = conn->max_tx_power;
4975 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4976 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4980 hci_dev_unlock(hdev);
4984 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
4986 struct mgmt_cp_get_clock_info *cp;
4987 struct mgmt_rp_get_clock_info rp;
4988 struct hci_cp_read_clock *hci_cp;
4989 struct pending_cmd *cmd;
4990 struct hci_conn *conn;
4992 BT_DBG("%s status %u", hdev->name, status);
4996 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5000 if (hci_cp->which) {
5001 u16 handle = __le16_to_cpu(hci_cp->handle);
5002 conn = hci_conn_hash_lookup_handle(hdev, handle);
5007 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5013 memset(&rp, 0, sizeof(rp));
5014 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5019 rp.local_clock = cpu_to_le32(hdev->clock);
5022 rp.piconet_clock = cpu_to_le32(conn->clock);
5023 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5027 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5029 mgmt_pending_remove(cmd);
5031 hci_conn_drop(conn);
5034 hci_dev_unlock(hdev);
5037 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5040 struct mgmt_cp_get_clock_info *cp = data;
5041 struct mgmt_rp_get_clock_info rp;
5042 struct hci_cp_read_clock hci_cp;
5043 struct pending_cmd *cmd;
5044 struct hci_request req;
5045 struct hci_conn *conn;
5048 BT_DBG("%s", hdev->name);
5050 memset(&rp, 0, sizeof(rp));
5051 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5052 rp.addr.type = cp->addr.type;
5054 if (cp->addr.type != BDADDR_BREDR)
5055 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5056 MGMT_STATUS_INVALID_PARAMS,
5061 if (!hdev_is_powered(hdev)) {
5062 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5063 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5067 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5068 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5070 if (!conn || conn->state != BT_CONNECTED) {
5071 err = cmd_complete(sk, hdev->id,
5072 MGMT_OP_GET_CLOCK_INFO,
5073 MGMT_STATUS_NOT_CONNECTED,
5081 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5087 hci_req_init(&req, hdev);
5089 memset(&hci_cp, 0, sizeof(hci_cp));
5090 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5093 hci_conn_hold(conn);
5094 cmd->user_data = conn;
5096 hci_cp.handle = cpu_to_le16(conn->handle);
5097 hci_cp.which = 0x01; /* Piconet clock */
5098 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5101 err = hci_req_run(&req, get_clock_info_complete);
5103 mgmt_pending_remove(cmd);
5106 hci_dev_unlock(hdev);
5110 static void device_added(struct sock *sk, struct hci_dev *hdev,
5111 bdaddr_t *bdaddr, u8 type, u8 action)
5113 struct mgmt_ev_device_added ev;
5115 bacpy(&ev.addr.bdaddr, bdaddr);
5116 ev.addr.type = type;
5119 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5122 static int add_device(struct sock *sk, struct hci_dev *hdev,
5123 void *data, u16 len)
5125 struct mgmt_cp_add_device *cp = data;
5126 u8 auto_conn, addr_type;
5129 BT_DBG("%s", hdev->name);
5131 if (!bdaddr_type_is_le(cp->addr.type) ||
5132 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5133 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5134 MGMT_STATUS_INVALID_PARAMS,
5135 &cp->addr, sizeof(cp->addr));
5137 if (cp->action != 0x00 && cp->action != 0x01)
5138 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5139 MGMT_STATUS_INVALID_PARAMS,
5140 &cp->addr, sizeof(cp->addr));
5144 if (cp->addr.type == BDADDR_LE_PUBLIC)
5145 addr_type = ADDR_LE_DEV_PUBLIC;
5147 addr_type = ADDR_LE_DEV_RANDOM;
5150 auto_conn = HCI_AUTO_CONN_ALWAYS;
5152 auto_conn = HCI_AUTO_CONN_REPORT;
5154 /* If the connection parameters don't exist for this device,
5155 * they will be created and configured with defaults.
5157 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5159 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5161 &cp->addr, sizeof(cp->addr));
5165 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5167 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5168 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5171 hci_dev_unlock(hdev);
5175 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5176 bdaddr_t *bdaddr, u8 type)
5178 struct mgmt_ev_device_removed ev;
5180 bacpy(&ev.addr.bdaddr, bdaddr);
5181 ev.addr.type = type;
5183 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5186 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5187 void *data, u16 len)
5189 struct mgmt_cp_remove_device *cp = data;
5192 BT_DBG("%s", hdev->name);
5196 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5197 struct hci_conn_params *params;
5200 if (!bdaddr_type_is_le(cp->addr.type)) {
5201 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5202 MGMT_STATUS_INVALID_PARAMS,
5203 &cp->addr, sizeof(cp->addr));
5207 if (cp->addr.type == BDADDR_LE_PUBLIC)
5208 addr_type = ADDR_LE_DEV_PUBLIC;
5210 addr_type = ADDR_LE_DEV_RANDOM;
5212 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5215 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5216 MGMT_STATUS_INVALID_PARAMS,
5217 &cp->addr, sizeof(cp->addr));
5221 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5222 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5223 MGMT_STATUS_INVALID_PARAMS,
5224 &cp->addr, sizeof(cp->addr));
5228 list_del_init(¶ms->action);
5229 list_del(¶ms->list);
5231 hci_update_background_scan(hdev);
5233 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5235 if (cp->addr.type) {
5236 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5237 MGMT_STATUS_INVALID_PARAMS,
5238 &cp->addr, sizeof(cp->addr));
5242 hci_conn_params_clear_enabled(hdev);
5245 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5246 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5249 hci_dev_unlock(hdev);
5253 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5256 struct mgmt_cp_load_conn_param *cp = data;
5257 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5258 sizeof(struct mgmt_conn_param));
5259 u16 param_count, expected_len;
5262 if (!lmp_le_capable(hdev))
5263 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5264 MGMT_STATUS_NOT_SUPPORTED);
5266 param_count = __le16_to_cpu(cp->param_count);
5267 if (param_count > max_param_count) {
5268 BT_ERR("load_conn_param: too big param_count value %u",
5270 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5271 MGMT_STATUS_INVALID_PARAMS);
5274 expected_len = sizeof(*cp) + param_count *
5275 sizeof(struct mgmt_conn_param);
5276 if (expected_len != len) {
5277 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5279 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5280 MGMT_STATUS_INVALID_PARAMS);
5283 BT_DBG("%s param_count %u", hdev->name, param_count);
5287 hci_conn_params_clear_disabled(hdev);
5289 for (i = 0; i < param_count; i++) {
5290 struct mgmt_conn_param *param = &cp->params[i];
5291 struct hci_conn_params *hci_param;
5292 u16 min, max, latency, timeout;
5295 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5298 if (param->addr.type == BDADDR_LE_PUBLIC) {
5299 addr_type = ADDR_LE_DEV_PUBLIC;
5300 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5301 addr_type = ADDR_LE_DEV_RANDOM;
5303 BT_ERR("Ignoring invalid connection parameters");
5307 min = le16_to_cpu(param->min_interval);
5308 max = le16_to_cpu(param->max_interval);
5309 latency = le16_to_cpu(param->latency);
5310 timeout = le16_to_cpu(param->timeout);
5312 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5313 min, max, latency, timeout);
5315 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5316 BT_ERR("Ignoring invalid connection parameters");
5320 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5323 BT_ERR("Failed to add connection parameters");
5327 hci_param->conn_min_interval = min;
5328 hci_param->conn_max_interval = max;
5329 hci_param->conn_latency = latency;
5330 hci_param->supervision_timeout = timeout;
5333 hci_dev_unlock(hdev);
5335 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5338 static const struct mgmt_handler {
5339 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5343 } mgmt_handlers[] = {
5344 { NULL }, /* 0x0000 (no command) */
5345 { read_version, false, MGMT_READ_VERSION_SIZE },
5346 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5347 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5348 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5349 { set_powered, false, MGMT_SETTING_SIZE },
5350 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5351 { set_connectable, false, MGMT_SETTING_SIZE },
5352 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5353 { set_pairable, false, MGMT_SETTING_SIZE },
5354 { set_link_security, false, MGMT_SETTING_SIZE },
5355 { set_ssp, false, MGMT_SETTING_SIZE },
5356 { set_hs, false, MGMT_SETTING_SIZE },
5357 { set_le, false, MGMT_SETTING_SIZE },
5358 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5359 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5360 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5361 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5362 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5363 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5364 { disconnect, false, MGMT_DISCONNECT_SIZE },
5365 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5366 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5367 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5368 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5369 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5370 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5371 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5372 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5373 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5374 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5375 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5376 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5377 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5378 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5379 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5380 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5381 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5382 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5383 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5384 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5385 { set_advertising, false, MGMT_SETTING_SIZE },
5386 { set_bredr, false, MGMT_SETTING_SIZE },
5387 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5388 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5389 { set_secure_conn, false, MGMT_SETTING_SIZE },
5390 { set_debug_keys, false, MGMT_SETTING_SIZE },
5391 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5392 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5393 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5394 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5395 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5396 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5397 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5398 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5399 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5402 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5406 struct mgmt_hdr *hdr;
5407 u16 opcode, index, len;
5408 struct hci_dev *hdev = NULL;
5409 const struct mgmt_handler *handler;
5412 BT_DBG("got %zu bytes", msglen);
5414 if (msglen < sizeof(*hdr))
5417 buf = kmalloc(msglen, GFP_KERNEL);
5421 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5427 opcode = __le16_to_cpu(hdr->opcode);
5428 index = __le16_to_cpu(hdr->index);
5429 len = __le16_to_cpu(hdr->len);
5431 if (len != msglen - sizeof(*hdr)) {
5436 if (index != MGMT_INDEX_NONE) {
5437 hdev = hci_dev_get(index);
5439 err = cmd_status(sk, index, opcode,
5440 MGMT_STATUS_INVALID_INDEX);
5444 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5445 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
5446 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5447 err = cmd_status(sk, index, opcode,
5448 MGMT_STATUS_INVALID_INDEX);
5453 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5454 mgmt_handlers[opcode].func == NULL) {
5455 BT_DBG("Unknown op %u", opcode);
5456 err = cmd_status(sk, index, opcode,
5457 MGMT_STATUS_UNKNOWN_COMMAND);
5461 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5462 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5463 err = cmd_status(sk, index, opcode,
5464 MGMT_STATUS_INVALID_INDEX);
5468 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5469 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5470 err = cmd_status(sk, index, opcode,
5471 MGMT_STATUS_INVALID_INDEX);
5475 handler = &mgmt_handlers[opcode];
5477 if ((handler->var_len && len < handler->data_len) ||
5478 (!handler->var_len && len != handler->data_len)) {
5479 err = cmd_status(sk, index, opcode,
5480 MGMT_STATUS_INVALID_PARAMS);
5485 mgmt_init_hdev(sk, hdev);
5487 cp = buf + sizeof(*hdr);
5489 err = handler->func(sk, hdev, cp, len);
5503 void mgmt_index_added(struct hci_dev *hdev)
5505 if (hdev->dev_type != HCI_BREDR)
5508 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5511 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5512 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5514 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5517 void mgmt_index_removed(struct hci_dev *hdev)
5519 u8 status = MGMT_STATUS_INVALID_INDEX;
5521 if (hdev->dev_type != HCI_BREDR)
5524 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5527 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5529 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5530 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5532 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5535 /* This function requires the caller holds hdev->lock */
5536 static void restart_le_auto_conns(struct hci_dev *hdev)
5538 struct hci_conn_params *p;
5541 list_for_each_entry(p, &hdev->le_conn_params, list) {
5542 if (p->auto_connect == HCI_AUTO_CONN_ALWAYS) {
5543 hci_pend_le_conn_add(hdev, p);
5548 /* Calling hci_pend_le_conn_add will actually already trigger
5549 * background scanning when needed. So no need to trigger it
5550 * just another time.
5552 * This check is here to avoid an unneeded restart of the
5553 * passive scanning. Since this is during the controller
5554 * power up phase the duplicate filtering is not an issue.
5559 hci_update_background_scan(hdev);
5562 static void powered_complete(struct hci_dev *hdev, u8 status)
5564 struct cmd_lookup match = { NULL, hdev };
5566 BT_DBG("status 0x%02x", status);
5570 restart_le_auto_conns(hdev);
5572 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5574 new_settings(hdev, match.sk);
5576 hci_dev_unlock(hdev);
5582 static int powered_update_hci(struct hci_dev *hdev)
5584 struct hci_request req;
5587 hci_req_init(&req, hdev);
5589 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5590 !lmp_host_ssp_capable(hdev)) {
5593 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5596 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5597 lmp_bredr_capable(hdev)) {
5598 struct hci_cp_write_le_host_supported cp;
5601 cp.simul = lmp_le_br_capable(hdev);
5603 /* Check first if we already have the right
5604 * host state (host features set)
5606 if (cp.le != lmp_host_le_capable(hdev) ||
5607 cp.simul != lmp_host_le_br_capable(hdev))
5608 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5612 if (lmp_le_capable(hdev)) {
5613 /* Make sure the controller has a good default for
5614 * advertising data. This also applies to the case
5615 * where BR/EDR was toggled during the AUTO_OFF phase.
5617 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5618 update_adv_data(&req);
5619 update_scan_rsp_data(&req);
5622 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5623 enable_advertising(&req);
5626 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5627 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5628 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5629 sizeof(link_sec), &link_sec);
5631 if (lmp_bredr_capable(hdev)) {
5632 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5633 set_bredr_scan(&req);
5639 return hci_req_run(&req, powered_complete);
5642 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5644 struct cmd_lookup match = { NULL, hdev };
5645 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5646 u8 zero_cod[] = { 0, 0, 0 };
5649 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5653 if (powered_update_hci(hdev) == 0)
5656 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5661 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5662 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5664 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5665 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5666 zero_cod, sizeof(zero_cod), NULL);
5669 err = new_settings(hdev, match.sk);
5677 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5679 struct pending_cmd *cmd;
5682 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5686 if (err == -ERFKILL)
5687 status = MGMT_STATUS_RFKILLED;
5689 status = MGMT_STATUS_FAILED;
5691 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5693 mgmt_pending_remove(cmd);
5696 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5698 struct hci_request req;
5702 /* When discoverable timeout triggers, then just make sure
5703 * the limited discoverable flag is cleared. Even in the case
5704 * of a timeout triggered from general discoverable, it is
5705 * safe to unconditionally clear the flag.
5707 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5708 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5710 hci_req_init(&req, hdev);
5711 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5712 u8 scan = SCAN_PAGE;
5713 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5714 sizeof(scan), &scan);
5717 update_adv_data(&req);
5718 hci_req_run(&req, NULL);
5720 hdev->discov_timeout = 0;
5722 new_settings(hdev, NULL);
5724 hci_dev_unlock(hdev);
5727 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5731 /* Nothing needed here if there's a pending command since that
5732 * commands request completion callback takes care of everything
5735 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5738 /* Powering off may clear the scan mode - don't let that interfere */
5739 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5743 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5745 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5746 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5750 struct hci_request req;
5752 /* In case this change in discoverable was triggered by
5753 * a disabling of connectable there could be a need to
5754 * update the advertising flags.
5756 hci_req_init(&req, hdev);
5757 update_adv_data(&req);
5758 hci_req_run(&req, NULL);
5760 new_settings(hdev, NULL);
5764 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5768 /* Nothing needed here if there's a pending command since that
5769 * commands request completion callback takes care of everything
5772 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5775 /* Powering off may clear the scan mode - don't let that interfere */
5776 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5780 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5782 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
5785 new_settings(hdev, NULL);
5788 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
5790 /* Powering off may stop advertising - don't let that interfere */
5791 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5795 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
5797 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5800 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
5802 u8 mgmt_err = mgmt_status(status);
5804 if (scan & SCAN_PAGE)
5805 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
5806 cmd_status_rsp, &mgmt_err);
5808 if (scan & SCAN_INQUIRY)
5809 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
5810 cmd_status_rsp, &mgmt_err);
5813 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5816 struct mgmt_ev_new_link_key ev;
5818 memset(&ev, 0, sizeof(ev));
5820 ev.store_hint = persistent;
5821 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5822 ev.key.addr.type = BDADDR_BREDR;
5823 ev.key.type = key->type;
5824 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5825 ev.key.pin_len = key->pin_len;
5827 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5830 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
5832 if (ltk->authenticated)
5833 return MGMT_LTK_AUTHENTICATED;
5835 return MGMT_LTK_UNAUTHENTICATED;
5838 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5840 struct mgmt_ev_new_long_term_key ev;
5842 memset(&ev, 0, sizeof(ev));
5844 /* Devices using resolvable or non-resolvable random addresses
5845 * without providing an indentity resolving key don't require
5846 * to store long term keys. Their addresses will change the
5849 * Only when a remote device provides an identity address
5850 * make sure the long term key is stored. If the remote
5851 * identity is known, the long term keys are internally
5852 * mapped to the identity address. So allow static random
5853 * and public addresses here.
5855 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5856 (key->bdaddr.b[5] & 0xc0) != 0xc0)
5857 ev.store_hint = 0x00;
5859 ev.store_hint = persistent;
5861 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5862 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5863 ev.key.type = mgmt_ltk_type(key);
5864 ev.key.enc_size = key->enc_size;
5865 ev.key.ediv = key->ediv;
5866 ev.key.rand = key->rand;
5868 if (key->type == SMP_LTK)
5871 memcpy(ev.key.val, key->val, sizeof(key->val));
5873 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5876 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5878 struct mgmt_ev_new_irk ev;
5880 memset(&ev, 0, sizeof(ev));
5882 /* For identity resolving keys from devices that are already
5883 * using a public address or static random address, do not
5884 * ask for storing this key. The identity resolving key really
5885 * is only mandatory for devices using resovlable random
5888 * Storing all identity resolving keys has the downside that
5889 * they will be also loaded on next boot of they system. More
5890 * identity resolving keys, means more time during scanning is
5891 * needed to actually resolve these addresses.
5893 if (bacmp(&irk->rpa, BDADDR_ANY))
5894 ev.store_hint = 0x01;
5896 ev.store_hint = 0x00;
5898 bacpy(&ev.rpa, &irk->rpa);
5899 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5900 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5901 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5903 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5906 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5909 struct mgmt_ev_new_csrk ev;
5911 memset(&ev, 0, sizeof(ev));
5913 /* Devices using resolvable or non-resolvable random addresses
5914 * without providing an indentity resolving key don't require
5915 * to store signature resolving keys. Their addresses will change
5916 * the next time around.
5918 * Only when a remote device provides an identity address
5919 * make sure the signature resolving key is stored. So allow
5920 * static random and public addresses here.
5922 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5923 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
5924 ev.store_hint = 0x00;
5926 ev.store_hint = persistent;
5928 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
5929 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
5930 ev.key.master = csrk->master;
5931 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
5933 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5936 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
5937 u8 bdaddr_type, u8 store_hint, u16 min_interval,
5938 u16 max_interval, u16 latency, u16 timeout)
5940 struct mgmt_ev_new_conn_param ev;
5942 if (!hci_is_identity_address(bdaddr, bdaddr_type))
5945 memset(&ev, 0, sizeof(ev));
5946 bacpy(&ev.addr.bdaddr, bdaddr);
5947 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
5948 ev.store_hint = store_hint;
5949 ev.min_interval = cpu_to_le16(min_interval);
5950 ev.max_interval = cpu_to_le16(max_interval);
5951 ev.latency = cpu_to_le16(latency);
5952 ev.timeout = cpu_to_le16(timeout);
5954 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
5957 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5960 eir[eir_len++] = sizeof(type) + data_len;
5961 eir[eir_len++] = type;
5962 memcpy(&eir[eir_len], data, data_len);
5963 eir_len += data_len;
5968 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5969 u8 addr_type, u32 flags, u8 *name, u8 name_len,
5973 struct mgmt_ev_device_connected *ev = (void *) buf;
5976 bacpy(&ev->addr.bdaddr, bdaddr);
5977 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5979 ev->flags = __cpu_to_le32(flags);
5982 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5985 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5986 eir_len = eir_append_data(ev->eir, eir_len,
5987 EIR_CLASS_OF_DEV, dev_class, 3);
5989 ev->eir_len = cpu_to_le16(eir_len);
5991 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5992 sizeof(*ev) + eir_len, NULL);
5995 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5997 struct mgmt_cp_disconnect *cp = cmd->param;
5998 struct sock **sk = data;
5999 struct mgmt_rp_disconnect rp;
6001 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6002 rp.addr.type = cp->addr.type;
6004 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
6010 mgmt_pending_remove(cmd);
6013 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6015 struct hci_dev *hdev = data;
6016 struct mgmt_cp_unpair_device *cp = cmd->param;
6017 struct mgmt_rp_unpair_device rp;
6019 memset(&rp, 0, sizeof(rp));
6020 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6021 rp.addr.type = cp->addr.type;
6023 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6025 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
6027 mgmt_pending_remove(cmd);
6030 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6031 u8 link_type, u8 addr_type, u8 reason,
6032 bool mgmt_connected)
6034 struct mgmt_ev_device_disconnected ev;
6035 struct pending_cmd *power_off;
6036 struct sock *sk = NULL;
6038 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6040 struct mgmt_mode *cp = power_off->param;
6042 /* The connection is still in hci_conn_hash so test for 1
6043 * instead of 0 to know if this is the last one.
6045 if (!cp->val && hci_conn_count(hdev) == 1) {
6046 cancel_delayed_work(&hdev->power_off);
6047 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6051 if (!mgmt_connected)
6054 if (link_type != ACL_LINK && link_type != LE_LINK)
6057 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6059 bacpy(&ev.addr.bdaddr, bdaddr);
6060 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6063 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6068 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6072 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6073 u8 link_type, u8 addr_type, u8 status)
6075 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6076 struct mgmt_cp_disconnect *cp;
6077 struct mgmt_rp_disconnect rp;
6078 struct pending_cmd *cmd;
6080 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6083 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6089 if (bacmp(bdaddr, &cp->addr.bdaddr))
6092 if (cp->addr.type != bdaddr_type)
6095 bacpy(&rp.addr.bdaddr, bdaddr);
6096 rp.addr.type = bdaddr_type;
6098 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
6099 mgmt_status(status), &rp, sizeof(rp));
6101 mgmt_pending_remove(cmd);
6104 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6105 u8 addr_type, u8 status)
6107 struct mgmt_ev_connect_failed ev;
6108 struct pending_cmd *power_off;
6110 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6112 struct mgmt_mode *cp = power_off->param;
6114 /* The connection is still in hci_conn_hash so test for 1
6115 * instead of 0 to know if this is the last one.
6117 if (!cp->val && hci_conn_count(hdev) == 1) {
6118 cancel_delayed_work(&hdev->power_off);
6119 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6123 bacpy(&ev.addr.bdaddr, bdaddr);
6124 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6125 ev.status = mgmt_status(status);
6127 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6130 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6132 struct mgmt_ev_pin_code_request ev;
6134 bacpy(&ev.addr.bdaddr, bdaddr);
6135 ev.addr.type = BDADDR_BREDR;
6138 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6141 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6144 struct pending_cmd *cmd;
6145 struct mgmt_rp_pin_code_reply rp;
6147 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6151 bacpy(&rp.addr.bdaddr, bdaddr);
6152 rp.addr.type = BDADDR_BREDR;
6154 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6155 mgmt_status(status), &rp, sizeof(rp));
6157 mgmt_pending_remove(cmd);
6160 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6163 struct pending_cmd *cmd;
6164 struct mgmt_rp_pin_code_reply rp;
6166 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6170 bacpy(&rp.addr.bdaddr, bdaddr);
6171 rp.addr.type = BDADDR_BREDR;
6173 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6174 mgmt_status(status), &rp, sizeof(rp));
6176 mgmt_pending_remove(cmd);
6179 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6180 u8 link_type, u8 addr_type, u32 value,
6183 struct mgmt_ev_user_confirm_request ev;
6185 BT_DBG("%s", hdev->name);
6187 bacpy(&ev.addr.bdaddr, bdaddr);
6188 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6189 ev.confirm_hint = confirm_hint;
6190 ev.value = cpu_to_le32(value);
6192 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6196 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6197 u8 link_type, u8 addr_type)
6199 struct mgmt_ev_user_passkey_request ev;
6201 BT_DBG("%s", hdev->name);
6203 bacpy(&ev.addr.bdaddr, bdaddr);
6204 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6206 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6210 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6211 u8 link_type, u8 addr_type, u8 status,
6214 struct pending_cmd *cmd;
6215 struct mgmt_rp_user_confirm_reply rp;
6218 cmd = mgmt_pending_find(opcode, hdev);
6222 bacpy(&rp.addr.bdaddr, bdaddr);
6223 rp.addr.type = link_to_bdaddr(link_type, addr_type);
6224 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6227 mgmt_pending_remove(cmd);
6232 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6233 u8 link_type, u8 addr_type, u8 status)
6235 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6236 status, MGMT_OP_USER_CONFIRM_REPLY);
6239 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6240 u8 link_type, u8 addr_type, u8 status)
6242 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6244 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6247 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6248 u8 link_type, u8 addr_type, u8 status)
6250 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6251 status, MGMT_OP_USER_PASSKEY_REPLY);
6254 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6255 u8 link_type, u8 addr_type, u8 status)
6257 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6259 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6262 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6263 u8 link_type, u8 addr_type, u32 passkey,
6266 struct mgmt_ev_passkey_notify ev;
6268 BT_DBG("%s", hdev->name);
6270 bacpy(&ev.addr.bdaddr, bdaddr);
6271 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6272 ev.passkey = __cpu_to_le32(passkey);
6273 ev.entered = entered;
6275 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6278 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6279 u8 addr_type, u8 status)
6281 struct mgmt_ev_auth_failed ev;
6283 bacpy(&ev.addr.bdaddr, bdaddr);
6284 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6285 ev.status = mgmt_status(status);
6287 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
6290 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6292 struct cmd_lookup match = { NULL, hdev };
6296 u8 mgmt_err = mgmt_status(status);
6297 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6298 cmd_status_rsp, &mgmt_err);
6302 if (test_bit(HCI_AUTH, &hdev->flags))
6303 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6306 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6309 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6313 new_settings(hdev, match.sk);
6319 static void clear_eir(struct hci_request *req)
6321 struct hci_dev *hdev = req->hdev;
6322 struct hci_cp_write_eir cp;
6324 if (!lmp_ext_inq_capable(hdev))
6327 memset(hdev->eir, 0, sizeof(hdev->eir));
6329 memset(&cp, 0, sizeof(cp));
6331 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6334 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6336 struct cmd_lookup match = { NULL, hdev };
6337 struct hci_request req;
6338 bool changed = false;
6341 u8 mgmt_err = mgmt_status(status);
6343 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6344 &hdev->dev_flags)) {
6345 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6346 new_settings(hdev, NULL);
6349 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6355 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6357 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6359 changed = test_and_clear_bit(HCI_HS_ENABLED,
6362 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6365 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6368 new_settings(hdev, match.sk);
6373 hci_req_init(&req, hdev);
6375 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6376 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6377 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6378 sizeof(enable), &enable);
6384 hci_req_run(&req, NULL);
6387 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6389 struct cmd_lookup match = { NULL, hdev };
6390 bool changed = false;
6393 u8 mgmt_err = mgmt_status(status);
6396 if (test_and_clear_bit(HCI_SC_ENABLED,
6398 new_settings(hdev, NULL);
6399 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6402 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6403 cmd_status_rsp, &mgmt_err);
6408 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6410 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6411 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6414 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6415 settings_rsp, &match);
6418 new_settings(hdev, match.sk);
6424 static void sk_lookup(struct pending_cmd *cmd, void *data)
6426 struct cmd_lookup *match = data;
6428 if (match->sk == NULL) {
6429 match->sk = cmd->sk;
6430 sock_hold(match->sk);
6434 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6437 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6439 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6440 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6441 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6444 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6451 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6453 struct mgmt_cp_set_local_name ev;
6454 struct pending_cmd *cmd;
6459 memset(&ev, 0, sizeof(ev));
6460 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6461 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6463 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6465 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6467 /* If this is a HCI command related to powering on the
6468 * HCI dev don't send any mgmt signals.
6470 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6474 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6475 cmd ? cmd->sk : NULL);
6478 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6479 u8 *randomizer192, u8 *hash256,
6480 u8 *randomizer256, u8 status)
6482 struct pending_cmd *cmd;
6484 BT_DBG("%s status %u", hdev->name, status);
6486 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6491 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6492 mgmt_status(status));
6494 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6495 hash256 && randomizer256) {
6496 struct mgmt_rp_read_local_oob_ext_data rp;
6498 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6499 memcpy(rp.randomizer192, randomizer192,
6500 sizeof(rp.randomizer192));
6502 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6503 memcpy(rp.randomizer256, randomizer256,
6504 sizeof(rp.randomizer256));
6506 cmd_complete(cmd->sk, hdev->id,
6507 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6510 struct mgmt_rp_read_local_oob_data rp;
6512 memcpy(rp.hash, hash192, sizeof(rp.hash));
6513 memcpy(rp.randomizer, randomizer192,
6514 sizeof(rp.randomizer));
6516 cmd_complete(cmd->sk, hdev->id,
6517 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6522 mgmt_pending_remove(cmd);
6525 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6526 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6527 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6530 struct mgmt_ev_device_found *ev = (void *) buf;
6531 struct smp_irk *irk;
6534 /* Don't send events for a non-kernel initiated discovery. With
6535 * LE one exception is if we have pend_le_reports > 0 in which
6536 * case we're doing passive scanning and want these events.
6538 if (!hci_discovery_active(hdev)) {
6539 if (link_type == ACL_LINK)
6541 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6545 /* Make sure that the buffer is big enough. The 5 extra bytes
6546 * are for the potential CoD field.
6548 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6551 memset(buf, 0, sizeof(buf));
6553 irk = hci_get_irk(hdev, bdaddr, addr_type);
6555 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
6556 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
6558 bacpy(&ev->addr.bdaddr, bdaddr);
6559 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6563 ev->flags = cpu_to_le32(flags);
6566 memcpy(ev->eir, eir, eir_len);
6568 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6569 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6572 if (scan_rsp_len > 0)
6573 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6575 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6576 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6578 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6581 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6582 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6584 struct mgmt_ev_device_found *ev;
6585 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6588 ev = (struct mgmt_ev_device_found *) buf;
6590 memset(buf, 0, sizeof(buf));
6592 bacpy(&ev->addr.bdaddr, bdaddr);
6593 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6596 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6599 ev->eir_len = cpu_to_le16(eir_len);
6601 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6604 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6606 struct mgmt_ev_discovering ev;
6607 struct pending_cmd *cmd;
6609 BT_DBG("%s discovering %u", hdev->name, discovering);
6612 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6614 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6617 u8 type = hdev->discovery.type;
6619 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6621 mgmt_pending_remove(cmd);
6624 memset(&ev, 0, sizeof(ev));
6625 ev.type = hdev->discovery.type;
6626 ev.discovering = discovering;
6628 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6631 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6633 BT_DBG("%s status %u", hdev->name, status);
6635 /* Clear the advertising mgmt setting if we failed to re-enable it */
6637 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6638 new_settings(hdev, NULL);
6642 void mgmt_reenable_advertising(struct hci_dev *hdev)
6644 struct hci_request req;
6646 if (hci_conn_num(hdev, LE_LINK) > 0)
6649 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6652 hci_req_init(&req, hdev);
6653 enable_advertising(&req);
6655 /* If this fails we have no option but to let user space know
6656 * that we've disabled advertising.
6658 if (hci_req_run(&req, adv_enable_complete) < 0) {
6659 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
6660 new_settings(hdev, NULL);