1 // SPDX-License-Identifier: GPL-2.0
3 * BlueZ - Bluetooth protocol stack for Linux
5 * Copyright (C) 2021 Intel Corporation
8 #include <net/bluetooth/bluetooth.h>
9 #include <net/bluetooth/hci_core.h>
10 #include <net/bluetooth/mgmt.h>
12 #include "hci_request.h"
15 static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
18 bt_dev_dbg(hdev, "result 0x%2.2x", result);
20 if (hdev->req_status != HCI_REQ_PEND)
23 hdev->req_result = result;
24 hdev->req_status = HCI_REQ_DONE;
26 wake_up_interruptible(&hdev->req_wait_q);
29 static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode,
30 u32 plen, const void *param,
33 int len = HCI_COMMAND_HDR_SIZE + plen;
34 struct hci_command_hdr *hdr;
37 skb = bt_skb_alloc(len, GFP_ATOMIC);
41 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
42 hdr->opcode = cpu_to_le16(opcode);
46 skb_put_data(skb, param, plen);
48 bt_dev_dbg(hdev, "skb len %d", skb->len);
50 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
51 hci_skb_opcode(skb) = opcode;
56 static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
57 const void *param, u8 event, struct sock *sk)
59 struct hci_dev *hdev = req->hdev;
62 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
64 /* If an error occurred during request building, there is no point in
65 * queueing the HCI command. We can simply return.
70 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk);
72 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
78 if (skb_queue_empty(&req->cmd_q))
79 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
81 bt_cb(skb)->hci.req_event = event;
83 skb_queue_tail(&req->cmd_q, skb);
86 static int hci_cmd_sync_run(struct hci_request *req)
88 struct hci_dev *hdev = req->hdev;
92 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
94 /* If an error occurred during request building, remove all HCI
95 * commands queued on the HCI request queue.
98 skb_queue_purge(&req->cmd_q);
102 /* Do not allow empty requests */
103 if (skb_queue_empty(&req->cmd_q))
106 skb = skb_peek_tail(&req->cmd_q);
107 bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete;
108 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
110 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
111 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
112 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
114 queue_work(hdev->workqueue, &hdev->cmd_work);
119 /* This function requires the caller holds hdev->req_lock. */
120 struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
121 const void *param, u8 event, u32 timeout,
124 struct hci_request req;
128 bt_dev_dbg(hdev, "");
130 hci_req_init(&req, hdev);
132 hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
134 hdev->req_status = HCI_REQ_PEND;
136 err = hci_cmd_sync_run(&req);
140 err = wait_event_interruptible_timeout(hdev->req_wait_q,
141 hdev->req_status != HCI_REQ_PEND,
144 if (err == -ERESTARTSYS)
145 return ERR_PTR(-EINTR);
147 switch (hdev->req_status) {
149 err = -bt_to_errno(hdev->req_result);
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
161 hdev->req_status = 0;
162 hdev->req_result = 0;
164 hdev->req_skb = NULL;
166 bt_dev_dbg(hdev, "end: err %d", err);
174 return ERR_PTR(-ENODATA);
178 EXPORT_SYMBOL(__hci_cmd_sync_sk);
180 /* This function requires the caller holds hdev->req_lock. */
181 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
182 const void *param, u32 timeout)
184 return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL);
186 EXPORT_SYMBOL(__hci_cmd_sync);
188 /* Send HCI command and wait for command complete event */
189 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
190 const void *param, u32 timeout)
194 if (!test_bit(HCI_UP, &hdev->flags))
195 return ERR_PTR(-ENETDOWN);
197 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
199 hci_req_sync_lock(hdev);
200 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
201 hci_req_sync_unlock(hdev);
205 EXPORT_SYMBOL(hci_cmd_sync);
207 /* This function requires the caller holds hdev->req_lock. */
208 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
209 const void *param, u8 event, u32 timeout)
211 return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout,
214 EXPORT_SYMBOL(__hci_cmd_sync_ev);
216 /* This function requires the caller holds hdev->req_lock. */
217 int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
218 const void *param, u8 event, u32 timeout,
224 skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
225 if (IS_ERR_OR_NULL(skb)) {
226 bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
231 status = skb->data[0];
237 EXPORT_SYMBOL(__hci_cmd_sync_status_sk);
239 int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
240 const void *param, u32 timeout)
242 return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout,
245 EXPORT_SYMBOL(__hci_cmd_sync_status);
247 static void hci_cmd_sync_work(struct work_struct *work)
249 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
250 struct hci_cmd_sync_work_entry *entry;
251 hci_cmd_sync_work_func_t func;
252 hci_cmd_sync_work_destroy_t destroy;
255 bt_dev_dbg(hdev, "");
257 mutex_lock(&hdev->cmd_sync_work_lock);
258 entry = list_first_entry(&hdev->cmd_sync_work_list,
259 struct hci_cmd_sync_work_entry, list);
261 list_del(&entry->list);
264 destroy = entry->destroy;
271 mutex_unlock(&hdev->cmd_sync_work_lock);
276 hci_req_sync_lock(hdev);
278 err = func(hdev, data);
281 destroy(hdev, data, err);
283 hci_req_sync_unlock(hdev);
287 void hci_cmd_sync_init(struct hci_dev *hdev)
289 INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
290 INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
291 mutex_init(&hdev->cmd_sync_work_lock);
294 void hci_cmd_sync_clear(struct hci_dev *hdev)
296 struct hci_cmd_sync_work_entry *entry, *tmp;
298 cancel_work_sync(&hdev->cmd_sync_work);
300 list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
302 entry->destroy(hdev, entry->data, -ECANCELED);
304 list_del(&entry->list);
309 int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
310 void *data, hci_cmd_sync_work_destroy_t destroy)
312 struct hci_cmd_sync_work_entry *entry;
314 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
320 entry->destroy = destroy;
322 mutex_lock(&hdev->cmd_sync_work_lock);
323 list_add_tail(&entry->list, &hdev->cmd_sync_work_list);
324 mutex_unlock(&hdev->cmd_sync_work_lock);
326 queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
330 EXPORT_SYMBOL(hci_cmd_sync_queue);