Bluetooth: Add helper for serialized HCI command execution
[linux-2.6-block.git] / net / bluetooth / hci_sync.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * BlueZ - Bluetooth protocol stack for Linux
4  *
5  * Copyright (C) 2021 Intel Corporation
6  */
7
8 #include <net/bluetooth/bluetooth.h>
9 #include <net/bluetooth/hci_core.h>
10 #include <net/bluetooth/mgmt.h>
11
12 #include "hci_request.h"
13 #include "smp.h"
14
15 static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
16                                   struct sk_buff *skb)
17 {
18         bt_dev_dbg(hdev, "result 0x%2.2x", result);
19
20         if (hdev->req_status != HCI_REQ_PEND)
21                 return;
22
23         hdev->req_result = result;
24         hdev->req_status = HCI_REQ_DONE;
25
26         wake_up_interruptible(&hdev->req_wait_q);
27 }
28
29 static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode,
30                                           u32 plen, const void *param,
31                                           struct sock *sk)
32 {
33         int len = HCI_COMMAND_HDR_SIZE + plen;
34         struct hci_command_hdr *hdr;
35         struct sk_buff *skb;
36
37         skb = bt_skb_alloc(len, GFP_ATOMIC);
38         if (!skb)
39                 return NULL;
40
41         hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
42         hdr->opcode = cpu_to_le16(opcode);
43         hdr->plen   = plen;
44
45         if (plen)
46                 skb_put_data(skb, param, plen);
47
48         bt_dev_dbg(hdev, "skb len %d", skb->len);
49
50         hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
51         hci_skb_opcode(skb) = opcode;
52
53         return skb;
54 }
55
56 static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
57                              const void *param, u8 event, struct sock *sk)
58 {
59         struct hci_dev *hdev = req->hdev;
60         struct sk_buff *skb;
61
62         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
63
64         /* If an error occurred during request building, there is no point in
65          * queueing the HCI command. We can simply return.
66          */
67         if (req->err)
68                 return;
69
70         skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk);
71         if (!skb) {
72                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
73                            opcode);
74                 req->err = -ENOMEM;
75                 return;
76         }
77
78         if (skb_queue_empty(&req->cmd_q))
79                 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
80
81         bt_cb(skb)->hci.req_event = event;
82
83         skb_queue_tail(&req->cmd_q, skb);
84 }
85
86 static int hci_cmd_sync_run(struct hci_request *req)
87 {
88         struct hci_dev *hdev = req->hdev;
89         struct sk_buff *skb;
90         unsigned long flags;
91
92         bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
93
94         /* If an error occurred during request building, remove all HCI
95          * commands queued on the HCI request queue.
96          */
97         if (req->err) {
98                 skb_queue_purge(&req->cmd_q);
99                 return req->err;
100         }
101
102         /* Do not allow empty requests */
103         if (skb_queue_empty(&req->cmd_q))
104                 return -ENODATA;
105
106         skb = skb_peek_tail(&req->cmd_q);
107         bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete;
108         bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
109
110         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
111         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
112         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
113
114         queue_work(hdev->workqueue, &hdev->cmd_work);
115
116         return 0;
117 }
118
119 /* This function requires the caller holds hdev->req_lock. */
120 struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
121                                   const void *param, u8 event, u32 timeout,
122                                   struct sock *sk)
123 {
124         struct hci_request req;
125         struct sk_buff *skb;
126         int err = 0;
127
128         bt_dev_dbg(hdev, "");
129
130         hci_req_init(&req, hdev);
131
132         hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
133
134         hdev->req_status = HCI_REQ_PEND;
135
136         err = hci_cmd_sync_run(&req);
137         if (err < 0)
138                 return ERR_PTR(err);
139
140         err = wait_event_interruptible_timeout(hdev->req_wait_q,
141                                                hdev->req_status != HCI_REQ_PEND,
142                                                timeout);
143
144         if (err == -ERESTARTSYS)
145                 return ERR_PTR(-EINTR);
146
147         switch (hdev->req_status) {
148         case HCI_REQ_DONE:
149                 err = -bt_to_errno(hdev->req_result);
150                 break;
151
152         case HCI_REQ_CANCELED:
153                 err = -hdev->req_result;
154                 break;
155
156         default:
157                 err = -ETIMEDOUT;
158                 break;
159         }
160
161         hdev->req_status = 0;
162         hdev->req_result = 0;
163         skb = hdev->req_skb;
164         hdev->req_skb = NULL;
165
166         bt_dev_dbg(hdev, "end: err %d", err);
167
168         if (err < 0) {
169                 kfree_skb(skb);
170                 return ERR_PTR(err);
171         }
172
173         if (!skb)
174                 return ERR_PTR(-ENODATA);
175
176         return skb;
177 }
178 EXPORT_SYMBOL(__hci_cmd_sync_sk);
179
180 /* This function requires the caller holds hdev->req_lock. */
181 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
182                                const void *param, u32 timeout)
183 {
184         return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL);
185 }
186 EXPORT_SYMBOL(__hci_cmd_sync);
187
188 /* Send HCI command and wait for command complete event */
189 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
190                              const void *param, u32 timeout)
191 {
192         struct sk_buff *skb;
193
194         if (!test_bit(HCI_UP, &hdev->flags))
195                 return ERR_PTR(-ENETDOWN);
196
197         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
198
199         hci_req_sync_lock(hdev);
200         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
201         hci_req_sync_unlock(hdev);
202
203         return skb;
204 }
205 EXPORT_SYMBOL(hci_cmd_sync);
206
207 /* This function requires the caller holds hdev->req_lock. */
208 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
209                                   const void *param, u8 event, u32 timeout)
210 {
211         return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout,
212                                  NULL);
213 }
214 EXPORT_SYMBOL(__hci_cmd_sync_ev);
215
216 /* This function requires the caller holds hdev->req_lock. */
217 int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
218                              const void *param, u8 event, u32 timeout,
219                              struct sock *sk)
220 {
221         struct sk_buff *skb;
222         u8 status;
223
224         skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
225         if (IS_ERR_OR_NULL(skb)) {
226                 bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
227                            PTR_ERR(skb));
228                 return PTR_ERR(skb);
229         }
230
231         status = skb->data[0];
232
233         kfree_skb(skb);
234
235         return status;
236 }
237 EXPORT_SYMBOL(__hci_cmd_sync_status_sk);
238
239 int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
240                           const void *param, u32 timeout)
241 {
242         return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout,
243                                         NULL);
244 }
245 EXPORT_SYMBOL(__hci_cmd_sync_status);
246
247 static void hci_cmd_sync_work(struct work_struct *work)
248 {
249         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
250         struct hci_cmd_sync_work_entry *entry;
251         hci_cmd_sync_work_func_t func;
252         hci_cmd_sync_work_destroy_t destroy;
253         void *data;
254
255         bt_dev_dbg(hdev, "");
256
257         mutex_lock(&hdev->cmd_sync_work_lock);
258         entry = list_first_entry(&hdev->cmd_sync_work_list,
259                                  struct hci_cmd_sync_work_entry, list);
260         if (entry) {
261                 list_del(&entry->list);
262                 func = entry->func;
263                 data = entry->data;
264                 destroy = entry->destroy;
265                 kfree(entry);
266         } else {
267                 func = NULL;
268                 data = NULL;
269                 destroy = NULL;
270         }
271         mutex_unlock(&hdev->cmd_sync_work_lock);
272
273         if (func) {
274                 int err;
275
276                 hci_req_sync_lock(hdev);
277
278                 err = func(hdev, data);
279
280                 if (destroy)
281                         destroy(hdev, data, err);
282
283                 hci_req_sync_unlock(hdev);
284         }
285 }
286
287 void hci_cmd_sync_init(struct hci_dev *hdev)
288 {
289         INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
290         INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
291         mutex_init(&hdev->cmd_sync_work_lock);
292 }
293
294 void hci_cmd_sync_clear(struct hci_dev *hdev)
295 {
296         struct hci_cmd_sync_work_entry *entry, *tmp;
297
298         cancel_work_sync(&hdev->cmd_sync_work);
299
300         list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
301                 if (entry->destroy)
302                         entry->destroy(hdev, entry->data, -ECANCELED);
303
304                 list_del(&entry->list);
305                 kfree(entry);
306         }
307 }
308
309 int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
310                        void *data, hci_cmd_sync_work_destroy_t destroy)
311 {
312         struct hci_cmd_sync_work_entry *entry;
313
314         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
315         if (!entry)
316                 return -ENOMEM;
317
318         entry->func = func;
319         entry->data = data;
320         entry->destroy = destroy;
321
322         mutex_lock(&hdev->cmd_sync_work_lock);
323         list_add_tail(&entry->list, &hdev->cmd_sync_work_list);
324         mutex_unlock(&hdev->cmd_sync_work_lock);
325
326         queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
327
328         return 0;
329 }
330 EXPORT_SYMBOL(hci_cmd_sync_queue);