mm: update get_user_pages_longterm to migrate pages allocated from CMA region
[linux-2.6-block.git] / drivers / bluetooth / btmtkuart.c
CommitLineData
7237c4c9
SW
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2018 MediaTek Inc.
3
4/*
5 * Bluetooth support for MediaTek serial devices
6 *
7 * Author: Sean Wang <sean.wang@mediatek.com>
8 *
9 */
10
11#include <asm/unaligned.h>
12#include <linux/atomic.h>
13#include <linux/clk.h>
14#include <linux/firmware.h>
22eaf6c9 15#include <linux/gpio/consumer.h>
e0b67035 16#include <linux/iopoll.h>
7237c4c9
SW
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/of.h>
22eaf6c9
SW
20#include <linux/of_device.h>
21#include <linux/pinctrl/consumer.h>
7237c4c9 22#include <linux/pm_runtime.h>
22eaf6c9 23#include <linux/regulator/consumer.h>
7237c4c9
SW
24#include <linux/serdev.h>
25#include <linux/skbuff.h>
26
27#include <net/bluetooth/bluetooth.h>
28#include <net/bluetooth/hci_core.h>
29
30#include "h4_recv.h"
31
22eaf6c9 32#define VERSION "0.2"
7237c4c9
SW
33
34#define FIRMWARE_MT7622 "mediatek/mt7622pr2h.bin"
22eaf6c9
SW
35#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
36#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
7237c4c9
SW
37
38#define MTK_STP_TLR_SIZE 2
39
40#define BTMTKUART_TX_STATE_ACTIVE 1
41#define BTMTKUART_TX_STATE_WAKEUP 2
42#define BTMTKUART_TX_WAIT_VND_EVT 3
22eaf6c9
SW
43#define BTMTKUART_REQUIRED_WAKEUP 4
44
45#define BTMTKUART_FLAG_STANDALONE_HW BIT(0)
7237c4c9
SW
46
47enum {
48 MTK_WMT_PATCH_DWNLD = 0x1,
22eaf6c9
SW
49 MTK_WMT_TEST = 0x2,
50 MTK_WMT_WAKEUP = 0x3,
51 MTK_WMT_HIF = 0x4,
7237c4c9 52 MTK_WMT_FUNC_CTRL = 0x6,
e0b67035
SW
53 MTK_WMT_RST = 0x7,
54 MTK_WMT_SEMAPHORE = 0x17,
55};
56
57enum {
58 BTMTK_WMT_INVALID,
59 BTMTK_WMT_PATCH_UNDONE,
60 BTMTK_WMT_PATCH_DONE,
61 BTMTK_WMT_ON_UNDONE,
62 BTMTK_WMT_ON_DONE,
63 BTMTK_WMT_ON_PROGRESS,
7237c4c9
SW
64};
65
66struct mtk_stp_hdr {
67 u8 prefix;
68 __be16 dlen;
69 u8 cs;
70} __packed;
71
22eaf6c9
SW
72struct btmtkuart_data {
73 unsigned int flags;
74 const char *fwname;
75};
76
7237c4c9
SW
77struct mtk_wmt_hdr {
78 u8 dir;
79 u8 op;
80 __le16 dlen;
81 u8 flag;
82} __packed;
83
84struct mtk_hci_wmt_cmd {
85 struct mtk_wmt_hdr hdr;
86 u8 data[256];
87} __packed;
88
e0b67035
SW
89struct btmtk_hci_wmt_evt {
90 struct hci_event_hdr hhdr;
91 struct mtk_wmt_hdr whdr;
92} __packed;
93
94struct btmtk_hci_wmt_evt_funcc {
95 struct btmtk_hci_wmt_evt hwhdr;
96 __be16 status;
97} __packed;
98
99struct btmtk_tci_sleep {
100 u8 mode;
101 __le16 duration;
102 __le16 host_duration;
103 u8 host_wakeup_pin;
104 u8 time_compensation;
105} __packed;
106
88e5f366
SW
107struct btmtk_hci_wmt_params {
108 u8 op;
109 u8 flag;
110 u16 dlen;
111 const void *data;
112 u32 *status;
113};
114
7237c4c9
SW
115struct btmtkuart_dev {
116 struct hci_dev *hdev;
117 struct serdev_device *serdev;
118 struct clk *clk;
119
22eaf6c9
SW
120 struct regulator *vcc;
121 struct gpio_desc *reset;
122 struct pinctrl *pinctrl;
123 struct pinctrl_state *pins_runtime;
124 struct pinctrl_state *pins_boot;
125 speed_t desired_speed;
126 speed_t curr_speed;
127
7237c4c9
SW
128 struct work_struct tx_work;
129 unsigned long tx_state;
130 struct sk_buff_head txq;
131
132 struct sk_buff *rx_skb;
e0b67035 133 struct sk_buff *evt_skb;
7237c4c9
SW
134
135 u8 stp_pad[6];
136 u8 stp_cursor;
137 u16 stp_dlen;
22eaf6c9
SW
138
139 const struct btmtkuart_data *data;
7237c4c9
SW
140};
141
22eaf6c9
SW
142#define btmtkuart_is_standalone(bdev) \
143 ((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW)
144#define btmtkuart_is_builtin_soc(bdev) \
145 !((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW)
146
88e5f366
SW
147static int mtk_hci_wmt_sync(struct hci_dev *hdev,
148 struct btmtk_hci_wmt_params *wmt_params)
7237c4c9
SW
149{
150 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
e0b67035
SW
151 struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
152 u32 hlen, status = BTMTK_WMT_INVALID;
153 struct btmtk_hci_wmt_evt *wmt_evt;
7237c4c9
SW
154 struct mtk_hci_wmt_cmd wc;
155 struct mtk_wmt_hdr *hdr;
7237c4c9
SW
156 int err;
157
88e5f366 158 hlen = sizeof(*hdr) + wmt_params->dlen;
7237c4c9
SW
159 if (hlen > 255)
160 return -EINVAL;
161
162 hdr = (struct mtk_wmt_hdr *)&wc;
163 hdr->dir = 1;
88e5f366
SW
164 hdr->op = wmt_params->op;
165 hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
166 hdr->flag = wmt_params->flag;
167 memcpy(wc.data, wmt_params->data, wmt_params->dlen);
7237c4c9
SW
168
169 set_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
170
171 err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
172 if (err < 0) {
173 clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
174 return err;
175 }
176
177 /* The vendor specific WMT commands are all answered by a vendor
178 * specific event and will not have the Command Status or Command
179 * Complete as with usual HCI command flow control.
180 *
181 * After sending the command, wait for BTMTKUART_TX_WAIT_VND_EVT
adf5d730 182 * state to be cleared. The driver specific event receive routine
7237c4c9
SW
183 * will clear that state and with that indicate completion of the
184 * WMT command.
185 */
186 err = wait_on_bit_timeout(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT,
187 TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
188 if (err == -EINTR) {
189 bt_dev_err(hdev, "Execution of wmt command interrupted");
77f328db 190 clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
7237c4c9
SW
191 return err;
192 }
193
194 if (err) {
195 bt_dev_err(hdev, "Execution of wmt command timed out");
77f328db 196 clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
7237c4c9
SW
197 return -ETIMEDOUT;
198 }
199
e0b67035
SW
200 /* Parse and handle the return WMT event */
201 wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data;
202 if (wmt_evt->whdr.op != hdr->op) {
203 bt_dev_err(hdev, "Wrong op received %d expected %d",
204 wmt_evt->whdr.op, hdr->op);
205 err = -EIO;
206 goto err_free_skb;
207 }
208
209 switch (wmt_evt->whdr.op) {
210 case MTK_WMT_SEMAPHORE:
211 if (wmt_evt->whdr.flag == 2)
212 status = BTMTK_WMT_PATCH_UNDONE;
213 else
214 status = BTMTK_WMT_PATCH_DONE;
215 break;
216 case MTK_WMT_FUNC_CTRL:
217 wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
218 if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
219 status = BTMTK_WMT_ON_DONE;
220 else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420)
221 status = BTMTK_WMT_ON_PROGRESS;
222 else
223 status = BTMTK_WMT_ON_UNDONE;
224 break;
225 }
226
227 if (wmt_params->status)
228 *wmt_params->status = status;
229
230err_free_skb:
231 kfree_skb(bdev->evt_skb);
232 bdev->evt_skb = NULL;
233
234 return err;
7237c4c9
SW
235}
236
22eaf6c9 237static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
7237c4c9 238{
88e5f366 239 struct btmtk_hci_wmt_params wmt_params;
7237c4c9
SW
240 const struct firmware *fw;
241 const u8 *fw_ptr;
242 size_t fw_size;
243 int err, dlen;
244 u8 flag;
245
22eaf6c9 246 err = request_firmware(&fw, fwname, &hdev->dev);
7237c4c9
SW
247 if (err < 0) {
248 bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
249 return err;
250 }
251
252 fw_ptr = fw->data;
253 fw_size = fw->size;
254
255 /* The size of patch header is 30 bytes, should be skip */
addb3ffb
GS
256 if (fw_size < 30) {
257 err = -EINVAL;
258 goto free_fw;
259 }
7237c4c9
SW
260
261 fw_size -= 30;
262 fw_ptr += 30;
263 flag = 1;
264
88e5f366
SW
265 wmt_params.op = MTK_WMT_PATCH_DWNLD;
266 wmt_params.status = NULL;
267
7237c4c9
SW
268 while (fw_size > 0) {
269 dlen = min_t(int, 250, fw_size);
270
271 /* Tell device the position in sequence */
272 if (fw_size - dlen <= 0)
273 flag = 3;
274 else if (fw_size < fw->size - 30)
275 flag = 2;
276
88e5f366
SW
277 wmt_params.flag = flag;
278 wmt_params.dlen = dlen;
279 wmt_params.data = fw_ptr;
280
281 err = mtk_hci_wmt_sync(hdev, &wmt_params);
7237c4c9
SW
282 if (err < 0) {
283 bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
284 err);
e0b67035 285 goto free_fw;
7237c4c9
SW
286 }
287
288 fw_size -= dlen;
289 fw_ptr += dlen;
290 }
291
e0b67035
SW
292 wmt_params.op = MTK_WMT_RST;
293 wmt_params.flag = 4;
294 wmt_params.dlen = 0;
295 wmt_params.data = NULL;
296 wmt_params.status = NULL;
297
298 /* Activate funciton the firmware providing to */
299 err = mtk_hci_wmt_sync(hdev, &wmt_params);
300 if (err < 0) {
301 bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
302 goto free_fw;
303 }
304
305 /* Wait a few moments for firmware activation done */
306 usleep_range(10000, 12000);
307
addb3ffb 308free_fw:
7237c4c9 309 release_firmware(fw);
7237c4c9
SW
310 return err;
311}
312
313static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
314{
315 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
316 struct hci_event_hdr *hdr = (void *)skb->data;
317 int err;
318
319 /* Fix up the vendor event id with 0xff for vendor specific instead
320 * of 0xe4 so that event send via monitoring socket can be parsed
321 * properly.
322 */
323 if (hdr->evt == 0xe4)
324 hdr->evt = HCI_EV_VENDOR;
325
e0b67035
SW
326 /* When someone waits for the WMT event, the skb is being cloned
327 * and being processed the events from there then.
328 */
329 if (test_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state)) {
330 bdev->evt_skb = skb_clone(skb, GFP_KERNEL);
331 if (!bdev->evt_skb) {
332 err = -ENOMEM;
333 goto err_out;
334 }
335 }
336
7237c4c9 337 err = hci_recv_frame(hdev, skb);
e0b67035
SW
338 if (err < 0)
339 goto err_free_skb;
7237c4c9
SW
340
341 if (hdr->evt == HCI_EV_VENDOR) {
342 if (test_and_clear_bit(BTMTKUART_TX_WAIT_VND_EVT,
343 &bdev->tx_state)) {
344 /* Barrier to sync with other CPUs */
345 smp_mb__after_atomic();
346 wake_up_bit(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT);
347 }
348 }
349
e0b67035
SW
350 return 0;
351
352err_free_skb:
353 kfree_skb(bdev->evt_skb);
354 bdev->evt_skb = NULL;
355
356err_out:
7237c4c9
SW
357 return err;
358}
359
360static const struct h4_recv_pkt mtk_recv_pkts[] = {
361 { H4_RECV_ACL, .recv = hci_recv_frame },
362 { H4_RECV_SCO, .recv = hci_recv_frame },
363 { H4_RECV_EVENT, .recv = btmtkuart_recv_event },
364};
365
366static void btmtkuart_tx_work(struct work_struct *work)
367{
368 struct btmtkuart_dev *bdev = container_of(work, struct btmtkuart_dev,
369 tx_work);
370 struct serdev_device *serdev = bdev->serdev;
371 struct hci_dev *hdev = bdev->hdev;
372
373 while (1) {
374 clear_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
375
376 while (1) {
377 struct sk_buff *skb = skb_dequeue(&bdev->txq);
378 int len;
379
380 if (!skb)
381 break;
382
383 len = serdev_device_write_buf(serdev, skb->data,
384 skb->len);
385 hdev->stat.byte_tx += len;
386
387 skb_pull(skb, len);
388 if (skb->len > 0) {
389 skb_queue_head(&bdev->txq, skb);
390 break;
391 }
392
393 switch (hci_skb_pkt_type(skb)) {
394 case HCI_COMMAND_PKT:
395 hdev->stat.cmd_tx++;
396 break;
397 case HCI_ACLDATA_PKT:
398 hdev->stat.acl_tx++;
399 break;
400 case HCI_SCODATA_PKT:
401 hdev->stat.sco_tx++;
402 break;
403 }
404
405 kfree_skb(skb);
406 }
407
408 if (!test_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state))
409 break;
410 }
411
412 clear_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state);
413}
414
415static void btmtkuart_tx_wakeup(struct btmtkuart_dev *bdev)
416{
417 if (test_and_set_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state))
418 set_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state);
419
420 schedule_work(&bdev->tx_work);
421}
422
423static const unsigned char *
424mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count,
425 int *sz_h4)
426{
427 struct mtk_stp_hdr *shdr;
428
429 /* The cursor is reset when all the data of STP is consumed out */
430 if (!bdev->stp_dlen && bdev->stp_cursor >= 6)
431 bdev->stp_cursor = 0;
432
433 /* Filling pad until all STP info is obtained */
434 while (bdev->stp_cursor < 6 && count > 0) {
435 bdev->stp_pad[bdev->stp_cursor] = *data;
436 bdev->stp_cursor++;
437 data++;
438 count--;
439 }
440
441 /* Retrieve STP info and have a sanity check */
442 if (!bdev->stp_dlen && bdev->stp_cursor >= 6) {
443 shdr = (struct mtk_stp_hdr *)&bdev->stp_pad[2];
444 bdev->stp_dlen = be16_to_cpu(shdr->dlen) & 0x0fff;
445
446 /* Resync STP when unexpected data is being read */
447 if (shdr->prefix != 0x80 || bdev->stp_dlen > 2048) {
448 bt_dev_err(bdev->hdev, "stp format unexpect (%d, %d)",
449 shdr->prefix, bdev->stp_dlen);
450 bdev->stp_cursor = 2;
451 bdev->stp_dlen = 0;
452 }
453 }
454
455 /* Directly quit when there's no data found for H4 can process */
456 if (count <= 0)
457 return NULL;
458
459 /* Tranlate to how much the size of data H4 can handle so far */
460 *sz_h4 = min_t(int, count, bdev->stp_dlen);
461
462 /* Update the remaining size of STP packet */
463 bdev->stp_dlen -= *sz_h4;
464
465 /* Data points to STP payload which can be handled by H4 */
466 return data;
467}
468
469static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
470{
471 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
472 const unsigned char *p_left = data, *p_h4;
473 int sz_left = count, sz_h4, adv;
474 int err;
475
476 while (sz_left > 0) {
477 /* The serial data received from MT7622 BT controller is
478 * at all time padded around with the STP header and tailer.
479 *
480 * A full STP packet is looking like
481 * -----------------------------------
482 * | STP header | H:4 | STP tailer |
483 * -----------------------------------
484 * but it doesn't guarantee to contain a full H:4 packet which
485 * means that it's possible for multiple STP packets forms a
486 * full H:4 packet that means extra STP header + length doesn't
487 * indicate a full H:4 frame, things can fragment. Whose length
488 * recorded in STP header just shows up the most length the
489 * H:4 engine can handle currently.
490 */
491
492 p_h4 = mtk_stp_split(bdev, p_left, sz_left, &sz_h4);
493 if (!p_h4)
494 break;
495
496 adv = p_h4 - p_left;
497 sz_left -= adv;
498 p_left += adv;
499
500 bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
501 sz_h4, mtk_recv_pkts,
330ad75f 502 ARRAY_SIZE(mtk_recv_pkts));
7237c4c9
SW
503 if (IS_ERR(bdev->rx_skb)) {
504 err = PTR_ERR(bdev->rx_skb);
505 bt_dev_err(bdev->hdev,
506 "Frame reassembly failed (%d)", err);
507 bdev->rx_skb = NULL;
508 return err;
509 }
510
511 sz_left -= sz_h4;
512 p_left += sz_h4;
513 }
514
515 return 0;
516}
517
518static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data,
519 size_t count)
520{
521 struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
522 int err;
523
524 err = btmtkuart_recv(bdev->hdev, data, count);
525 if (err < 0)
526 return err;
527
528 bdev->hdev->stat.byte_rx += count;
529
530 return count;
531}
532
533static void btmtkuart_write_wakeup(struct serdev_device *serdev)
534{
535 struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
536
537 btmtkuart_tx_wakeup(bdev);
538}
539
540static const struct serdev_device_ops btmtkuart_client_ops = {
541 .receive_buf = btmtkuart_receive_buf,
542 .write_wakeup = btmtkuart_write_wakeup,
543};
544
545static int btmtkuart_open(struct hci_dev *hdev)
546{
547 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
548 struct device *dev;
549 int err;
550
551 err = serdev_device_open(bdev->serdev);
552 if (err) {
553 bt_dev_err(hdev, "Unable to open UART device %s",
554 dev_name(&bdev->serdev->dev));
555 goto err_open;
556 }
557
22eaf6c9
SW
558 if (btmtkuart_is_standalone(bdev)) {
559 if (bdev->curr_speed != bdev->desired_speed)
560 err = serdev_device_set_baudrate(bdev->serdev,
561 115200);
562 else
563 err = serdev_device_set_baudrate(bdev->serdev,
564 bdev->desired_speed);
565
566 if (err < 0) {
567 bt_dev_err(hdev, "Unable to set baudrate UART device %s",
568 dev_name(&bdev->serdev->dev));
569 goto err_serdev_close;
570 }
571
572 serdev_device_set_flow_control(bdev->serdev, false);
573 }
574
7237c4c9
SW
575 bdev->stp_cursor = 2;
576 bdev->stp_dlen = 0;
577
578 dev = &bdev->serdev->dev;
579
580 /* Enable the power domain and clock the device requires */
581 pm_runtime_enable(dev);
582 err = pm_runtime_get_sync(dev);
583 if (err < 0) {
584 pm_runtime_put_noidle(dev);
585 goto err_disable_rpm;
586 }
587
588 err = clk_prepare_enable(bdev->clk);
589 if (err < 0)
590 goto err_put_rpm;
591
592 return 0;
593
594err_put_rpm:
595 pm_runtime_put_sync(dev);
596err_disable_rpm:
597 pm_runtime_disable(dev);
22eaf6c9
SW
598err_serdev_close:
599 serdev_device_close(bdev->serdev);
7237c4c9
SW
600err_open:
601 return err;
602}
603
604static int btmtkuart_close(struct hci_dev *hdev)
605{
606 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
607 struct device *dev = &bdev->serdev->dev;
608
609 /* Shutdown the clock and power domain the device requires */
610 clk_disable_unprepare(bdev->clk);
611 pm_runtime_put_sync(dev);
612 pm_runtime_disable(dev);
613
614 serdev_device_close(bdev->serdev);
615
616 return 0;
617}
618
619static int btmtkuart_flush(struct hci_dev *hdev)
620{
621 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
622
623 /* Flush any pending characters */
624 serdev_device_write_flush(bdev->serdev);
625 skb_queue_purge(&bdev->txq);
626
627 cancel_work_sync(&bdev->tx_work);
628
629 kfree_skb(bdev->rx_skb);
630 bdev->rx_skb = NULL;
631
632 bdev->stp_cursor = 2;
633 bdev->stp_dlen = 0;
634
635 return 0;
636}
637
e0b67035
SW
638static int btmtkuart_func_query(struct hci_dev *hdev)
639{
640 struct btmtk_hci_wmt_params wmt_params;
641 int status, err;
642 u8 param = 0;
643
644 /* Query whether the function is enabled */
645 wmt_params.op = MTK_WMT_FUNC_CTRL;
646 wmt_params.flag = 4;
647 wmt_params.dlen = sizeof(param);
648 wmt_params.data = &param;
649 wmt_params.status = &status;
650
651 err = mtk_hci_wmt_sync(hdev, &wmt_params);
652 if (err < 0) {
653 bt_dev_err(hdev, "Failed to query function status (%d)", err);
654 return err;
655 }
656
657 return status;
658}
659
22eaf6c9
SW
660static int btmtkuart_change_baudrate(struct hci_dev *hdev)
661{
662 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
663 struct btmtk_hci_wmt_params wmt_params;
664 u32 baudrate;
665 u8 param;
666 int err;
667
668 /* Indicate the device to enter the probe state the host is
669 * ready to change a new baudrate.
670 */
671 baudrate = cpu_to_le32(bdev->desired_speed);
672 wmt_params.op = MTK_WMT_HIF;
673 wmt_params.flag = 1;
674 wmt_params.dlen = 4;
675 wmt_params.data = &baudrate;
676 wmt_params.status = NULL;
677
678 err = mtk_hci_wmt_sync(hdev, &wmt_params);
679 if (err < 0) {
680 bt_dev_err(hdev, "Failed to device baudrate (%d)", err);
681 return err;
682 }
683
684 err = serdev_device_set_baudrate(bdev->serdev,
685 bdev->desired_speed);
686 if (err < 0) {
687 bt_dev_err(hdev, "Failed to set up host baudrate (%d)",
688 err);
689 return err;
690 }
691
692 serdev_device_set_flow_control(bdev->serdev, false);
693
694 /* Send a dummy byte 0xff to activate the new baudrate */
695 param = 0xff;
696 err = serdev_device_write(bdev->serdev, &param, sizeof(param),
697 MAX_SCHEDULE_TIMEOUT);
698 if (err < 0 || err < sizeof(param))
699 return err;
700
701 serdev_device_wait_until_sent(bdev->serdev, 0);
702
703 /* Wait some time for the device changing baudrate done */
704 usleep_range(20000, 22000);
705
706 /* Test the new baudrate */
707 wmt_params.op = MTK_WMT_TEST;
708 wmt_params.flag = 7;
709 wmt_params.dlen = 0;
710 wmt_params.data = NULL;
711 wmt_params.status = NULL;
712
713 err = mtk_hci_wmt_sync(hdev, &wmt_params);
714 if (err < 0) {
715 bt_dev_err(hdev, "Failed to test new baudrate (%d)",
716 err);
717 return err;
718 }
719
720 bdev->curr_speed = bdev->desired_speed;
721
722 return 0;
723}
724
7237c4c9
SW
725static int btmtkuart_setup(struct hci_dev *hdev)
726{
22eaf6c9 727 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
88e5f366 728 struct btmtk_hci_wmt_params wmt_params;
e0b67035
SW
729 ktime_t calltime, delta, rettime;
730 struct btmtk_tci_sleep tci_sleep;
731 unsigned long long duration;
732 struct sk_buff *skb;
733 int err, status;
7237c4c9 734 u8 param = 0x1;
7237c4c9 735
e0b67035 736 calltime = ktime_get();
7237c4c9 737
22eaf6c9
SW
738 /* Wakeup MCUSYS is required for certain devices before we start to
739 * do any setups.
740 */
741 if (test_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state)) {
742 wmt_params.op = MTK_WMT_WAKEUP;
743 wmt_params.flag = 3;
744 wmt_params.dlen = 0;
745 wmt_params.data = NULL;
746 wmt_params.status = NULL;
747
748 err = mtk_hci_wmt_sync(hdev, &wmt_params);
749 if (err < 0) {
750 bt_dev_err(hdev, "Failed to wakeup the chip (%d)", err);
751 return err;
752 }
753
754 clear_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state);
755 }
756
757 if (btmtkuart_is_standalone(bdev))
758 btmtkuart_change_baudrate(hdev);
759
e0b67035
SW
760 /* Query whether the firmware is already download */
761 wmt_params.op = MTK_WMT_SEMAPHORE;
762 wmt_params.flag = 1;
88e5f366
SW
763 wmt_params.dlen = 0;
764 wmt_params.data = NULL;
e0b67035 765 wmt_params.status = &status;
88e5f366 766
88e5f366 767 err = mtk_hci_wmt_sync(hdev, &wmt_params);
7237c4c9 768 if (err < 0) {
e0b67035
SW
769 bt_dev_err(hdev, "Failed to query firmware status (%d)", err);
770 return err;
771 }
772
773 if (status == BTMTK_WMT_PATCH_DONE) {
774 bt_dev_info(hdev, "Firmware already downloaded");
775 goto ignore_setup_fw;
776 }
777
778 /* Setup a firmware which the device definitely requires */
22eaf6c9 779 err = mtk_setup_firmware(hdev, bdev->data->fwname);
e0b67035 780 if (err < 0)
7237c4c9 781 return err;
e0b67035
SW
782
783ignore_setup_fw:
784 /* Query whether the device is already enabled */
785 err = readx_poll_timeout(btmtkuart_func_query, hdev, status,
786 status < 0 || status != BTMTK_WMT_ON_PROGRESS,
787 2000, 5000000);
788 /* -ETIMEDOUT happens */
789 if (err < 0)
790 return err;
791
792 /* The other errors happen in btusb_mtk_func_query */
793 if (status < 0)
794 return status;
795
796 if (status == BTMTK_WMT_ON_DONE) {
797 bt_dev_info(hdev, "function already on");
798 goto ignore_func_on;
7237c4c9
SW
799 }
800
801 /* Enable Bluetooth protocol */
88e5f366
SW
802 wmt_params.op = MTK_WMT_FUNC_CTRL;
803 wmt_params.flag = 0;
804 wmt_params.dlen = sizeof(param);
805 wmt_params.data = &param;
806 wmt_params.status = NULL;
807
808 err = mtk_hci_wmt_sync(hdev, &wmt_params);
7237c4c9
SW
809 if (err < 0) {
810 bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
811 return err;
812 }
813
e0b67035
SW
814ignore_func_on:
815 /* Apply the low power environment setup */
816 tci_sleep.mode = 0x5;
817 tci_sleep.duration = cpu_to_le16(0x640);
818 tci_sleep.host_duration = cpu_to_le16(0x640);
819 tci_sleep.host_wakeup_pin = 0;
820 tci_sleep.time_compensation = 0;
821
822 skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
823 HCI_INIT_TIMEOUT);
824 if (IS_ERR(skb)) {
825 err = PTR_ERR(skb);
826 bt_dev_err(hdev, "Failed to apply low power setting (%d)", err);
827 return err;
828 }
829 kfree_skb(skb);
830
831 rettime = ktime_get();
832 delta = ktime_sub(rettime, calltime);
833 duration = (unsigned long long)ktime_to_ns(delta) >> 10;
834
835 bt_dev_info(hdev, "Device setup in %llu usecs", duration);
836
7237c4c9
SW
837 return 0;
838}
839
840static int btmtkuart_shutdown(struct hci_dev *hdev)
841{
88e5f366 842 struct btmtk_hci_wmt_params wmt_params;
7237c4c9
SW
843 u8 param = 0x0;
844 int err;
845
846 /* Disable the device */
88e5f366
SW
847 wmt_params.op = MTK_WMT_FUNC_CTRL;
848 wmt_params.flag = 0;
849 wmt_params.dlen = sizeof(param);
850 wmt_params.data = &param;
851 wmt_params.status = NULL;
852
853 err = mtk_hci_wmt_sync(hdev, &wmt_params);
7237c4c9
SW
854 if (err < 0) {
855 bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
856 return err;
857 }
858
859 return 0;
860}
861
862static int btmtkuart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
863{
864 struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
865 struct mtk_stp_hdr *shdr;
866 int err, dlen, type = 0;
867
868 /* Prepend skb with frame type */
869 memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
870
871 /* Make sure that there is enough rooms for STP header and trailer */
872 if (unlikely(skb_headroom(skb) < sizeof(*shdr)) ||
873 (skb_tailroom(skb) < MTK_STP_TLR_SIZE)) {
874 err = pskb_expand_head(skb, sizeof(*shdr), MTK_STP_TLR_SIZE,
875 GFP_ATOMIC);
876 if (err < 0)
877 return err;
878 }
879
880 /* Add the STP header */
881 dlen = skb->len;
882 shdr = skb_push(skb, sizeof(*shdr));
883 shdr->prefix = 0x80;
884 shdr->dlen = cpu_to_be16((dlen & 0x0fff) | (type << 12));
885 shdr->cs = 0; /* MT7622 doesn't care about checksum value */
886
887 /* Add the STP trailer */
888 skb_put_zero(skb, MTK_STP_TLR_SIZE);
889
890 skb_queue_tail(&bdev->txq, skb);
891
892 btmtkuart_tx_wakeup(bdev);
893 return 0;
894}
895
22eaf6c9
SW
896static int btmtkuart_parse_dt(struct serdev_device *serdev)
897{
898 struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
899 struct device_node *node = serdev->dev.of_node;
900 u32 speed = 921600;
901 int err;
902
903 if (btmtkuart_is_standalone(bdev)) {
904 of_property_read_u32(node, "current-speed", &speed);
905
906 bdev->desired_speed = speed;
907
908 bdev->vcc = devm_regulator_get(&serdev->dev, "vcc");
909 if (IS_ERR(bdev->vcc)) {
910 err = PTR_ERR(bdev->vcc);
911 return err;
912 }
913
914 bdev->pinctrl = devm_pinctrl_get(&serdev->dev);
915 if (IS_ERR(bdev->pinctrl)) {
916 err = PTR_ERR(bdev->pinctrl);
917 return err;
918 }
919
920 bdev->pins_boot = pinctrl_lookup_state(bdev->pinctrl,
921 "default");
922 if (IS_ERR(bdev->pins_boot)) {
923 err = PTR_ERR(bdev->pins_boot);
924 return err;
925 }
926
927 bdev->pins_runtime = pinctrl_lookup_state(bdev->pinctrl,
928 "runtime");
929 if (IS_ERR(bdev->pins_runtime)) {
930 err = PTR_ERR(bdev->pins_runtime);
931 return err;
932 }
933
934 bdev->reset = devm_gpiod_get_optional(&serdev->dev, "reset",
935 GPIOD_OUT_LOW);
936 if (IS_ERR(bdev->reset)) {
937 err = PTR_ERR(bdev->reset);
938 return err;
939 }
940 } else if (btmtkuart_is_builtin_soc(bdev)) {
941 bdev->clk = devm_clk_get(&serdev->dev, "ref");
942 if (IS_ERR(bdev->clk))
943 return PTR_ERR(bdev->clk);
944 }
945
946 return 0;
947}
948
7237c4c9
SW
949static int btmtkuart_probe(struct serdev_device *serdev)
950{
951 struct btmtkuart_dev *bdev;
952 struct hci_dev *hdev;
22eaf6c9 953 int err;
7237c4c9
SW
954
955 bdev = devm_kzalloc(&serdev->dev, sizeof(*bdev), GFP_KERNEL);
956 if (!bdev)
957 return -ENOMEM;
958
22eaf6c9
SW
959 bdev->data = of_device_get_match_data(&serdev->dev);
960 if (!bdev->data)
961 return -ENODEV;
7237c4c9
SW
962
963 bdev->serdev = serdev;
964 serdev_device_set_drvdata(serdev, bdev);
965
966 serdev_device_set_client_ops(serdev, &btmtkuart_client_ops);
967
22eaf6c9
SW
968 err = btmtkuart_parse_dt(serdev);
969 if (err < 0)
970 return err;
971
7237c4c9
SW
972 INIT_WORK(&bdev->tx_work, btmtkuart_tx_work);
973 skb_queue_head_init(&bdev->txq);
974
975 /* Initialize and register HCI device */
976 hdev = hci_alloc_dev();
977 if (!hdev) {
978 dev_err(&serdev->dev, "Can't allocate HCI device\n");
979 return -ENOMEM;
980 }
981
982 bdev->hdev = hdev;
983
984 hdev->bus = HCI_UART;
985 hci_set_drvdata(hdev, bdev);
986
987 hdev->open = btmtkuart_open;
988 hdev->close = btmtkuart_close;
989 hdev->flush = btmtkuart_flush;
990 hdev->setup = btmtkuart_setup;
991 hdev->shutdown = btmtkuart_shutdown;
992 hdev->send = btmtkuart_send_frame;
993 SET_HCIDEV_DEV(hdev, &serdev->dev);
994
995 hdev->manufacturer = 70;
996 set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
997
22eaf6c9
SW
998 if (btmtkuart_is_standalone(bdev)) {
999 /* Switch to the specific pin state for the booting requires */
1000 pinctrl_select_state(bdev->pinctrl, bdev->pins_boot);
1001
1002 /* Power on */
1003 err = regulator_enable(bdev->vcc);
1004 if (err < 0)
1005 return err;
1006
1007 /* Reset if the reset-gpios is available otherwise the board
1008 * -level design should be guaranteed.
1009 */
1010 if (bdev->reset) {
1011 gpiod_set_value_cansleep(bdev->reset, 1);
1012 usleep_range(1000, 2000);
1013 gpiod_set_value_cansleep(bdev->reset, 0);
1014 }
1015
1016 /* Wait some time until device got ready and switch to the pin
1017 * mode the device requires for UART transfers.
1018 */
1019 msleep(50);
1020 pinctrl_select_state(bdev->pinctrl, bdev->pins_runtime);
1021
1022 /* A standalone device doesn't depends on power domain on SoC,
1023 * so mark it as no callbacks.
1024 */
1025 pm_runtime_no_callbacks(&serdev->dev);
1026
1027 set_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state);
1028 }
1029
1030 err = hci_register_dev(hdev);
1031 if (err < 0) {
7237c4c9
SW
1032 dev_err(&serdev->dev, "Can't register HCI device\n");
1033 hci_free_dev(hdev);
22eaf6c9 1034 goto err_regulator_disable;
7237c4c9
SW
1035 }
1036
1037 return 0;
22eaf6c9
SW
1038
1039err_regulator_disable:
1040 if (btmtkuart_is_standalone(bdev)) {
1041 pinctrl_select_state(bdev->pinctrl, bdev->pins_boot);
1042 regulator_disable(bdev->vcc);
1043 }
1044
1045 return err;
7237c4c9
SW
1046}
1047
1048static void btmtkuart_remove(struct serdev_device *serdev)
1049{
1050 struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
1051 struct hci_dev *hdev = bdev->hdev;
1052
22eaf6c9
SW
1053 if (btmtkuart_is_standalone(bdev)) {
1054 pinctrl_select_state(bdev->pinctrl, bdev->pins_boot);
1055 regulator_disable(bdev->vcc);
1056 }
1057
7237c4c9
SW
1058 hci_unregister_dev(hdev);
1059 hci_free_dev(hdev);
1060}
1061
22eaf6c9
SW
1062static const struct btmtkuart_data mt7622_data = {
1063 .fwname = FIRMWARE_MT7622,
1064};
1065
1066static const struct btmtkuart_data mt7663_data = {
1067 .flags = BTMTKUART_FLAG_STANDALONE_HW,
1068 .fwname = FIRMWARE_MT7663,
1069};
1070
1071static const struct btmtkuart_data mt7668_data = {
1072 .flags = BTMTKUART_FLAG_STANDALONE_HW,
1073 .fwname = FIRMWARE_MT7668,
1074};
1075
7237c4c9
SW
1076#ifdef CONFIG_OF
1077static const struct of_device_id mtk_of_match_table[] = {
22eaf6c9
SW
1078 { .compatible = "mediatek,mt7622-bluetooth", .data = &mt7622_data},
1079 { .compatible = "mediatek,mt7663u-bluetooth", .data = &mt7663_data},
1080 { .compatible = "mediatek,mt7668u-bluetooth", .data = &mt7668_data},
7237c4c9
SW
1081 { }
1082};
1083MODULE_DEVICE_TABLE(of, mtk_of_match_table);
1084#endif
1085
1086static struct serdev_device_driver btmtkuart_driver = {
1087 .probe = btmtkuart_probe,
1088 .remove = btmtkuart_remove,
1089 .driver = {
1090 .name = "btmtkuart",
1091 .of_match_table = of_match_ptr(mtk_of_match_table),
1092 },
1093};
1094
1095module_serdev_device_driver(btmtkuart_driver);
1096
1097MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
1098MODULE_DESCRIPTION("MediaTek Bluetooth Serial driver ver " VERSION);
1099MODULE_VERSION(VERSION);
1100MODULE_LICENSE("GPL");
1101MODULE_FIRMWARE(FIRMWARE_MT7622);
22eaf6c9
SW
1102MODULE_FIRMWARE(FIRMWARE_MT7663);
1103MODULE_FIRMWARE(FIRMWARE_MT7668);