Bluetooth: hci_qca: Add wrapper functions for setting UART speed
[linux-2.6-block.git] / drivers / bluetooth / hci_qca.c
CommitLineData
0ff252c1
BYTK
1/*
2 * Bluetooth Software UART Qualcomm protocol
3 *
4 * HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management
5 * protocol extension to H4.
6 *
7 * Copyright (C) 2007 Texas Instruments, Inc.
8 * Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved.
9 *
10 * Acknowledgements:
11 * This file is based on hci_ll.c, which was...
12 * Written by Ohad Ben-Cohen <ohad@bencohen.org>
13 * which was in turn based on hci_h4.c, which was written
14 * by Maxim Krasnyansky and Marcel Holtmann.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2
18 * as published by the Free Software Foundation
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 *
29 */
30
31#include <linux/kernel.h>
05ba533c 32#include <linux/clk.h>
0ff252c1 33#include <linux/debugfs.h>
05ba533c
TE
34#include <linux/gpio/consumer.h>
35#include <linux/mod_devicetable.h>
36#include <linux/module.h>
37#include <linux/serdev.h>
0ff252c1
BYTK
38
39#include <net/bluetooth/bluetooth.h>
40#include <net/bluetooth/hci_core.h>
41
42#include "hci_uart.h"
43#include "btqca.h"
44
45/* HCI_IBS protocol messages */
46#define HCI_IBS_SLEEP_IND 0xFE
47#define HCI_IBS_WAKE_IND 0xFD
48#define HCI_IBS_WAKE_ACK 0xFC
f81b001a 49#define HCI_MAX_IBS_SIZE 10
0ff252c1
BYTK
50
51/* Controller states */
52#define STATE_IN_BAND_SLEEP_ENABLED 1
53
f81b001a
MH
54#define IBS_WAKE_RETRANS_TIMEOUT_MS 100
55#define IBS_TX_IDLE_TIMEOUT_MS 2000
0ff252c1
BYTK
56#define BAUDRATE_SETTLE_TIMEOUT_MS 300
57
05ba533c
TE
58/* susclk rate */
59#define SUSCLK_RATE_32KHZ 32768
60
0ff252c1
BYTK
61/* HCI_IBS transmit side sleep protocol states */
62enum tx_ibs_states {
63 HCI_IBS_TX_ASLEEP,
64 HCI_IBS_TX_WAKING,
65 HCI_IBS_TX_AWAKE,
66};
67
68/* HCI_IBS receive side sleep protocol states */
69enum rx_states {
70 HCI_IBS_RX_ASLEEP,
71 HCI_IBS_RX_AWAKE,
72};
73
74/* HCI_IBS transmit and receive side clock state vote */
75enum hci_ibs_clock_state_vote {
76 HCI_IBS_VOTE_STATS_UPDATE,
77 HCI_IBS_TX_VOTE_CLOCK_ON,
78 HCI_IBS_TX_VOTE_CLOCK_OFF,
79 HCI_IBS_RX_VOTE_CLOCK_ON,
80 HCI_IBS_RX_VOTE_CLOCK_OFF,
81};
82
83struct qca_data {
84 struct hci_uart *hu;
85 struct sk_buff *rx_skb;
86 struct sk_buff_head txq;
87 struct sk_buff_head tx_wait_q; /* HCI_IBS wait queue */
88 spinlock_t hci_ibs_lock; /* HCI_IBS state lock */
89 u8 tx_ibs_state; /* HCI_IBS transmit side power state*/
90 u8 rx_ibs_state; /* HCI_IBS receive side power state */
621a5f7a
VK
91 bool tx_vote; /* Clock must be on for TX */
92 bool rx_vote; /* Clock must be on for RX */
0ff252c1
BYTK
93 struct timer_list tx_idle_timer;
94 u32 tx_idle_delay;
95 struct timer_list wake_retrans_timer;
96 u32 wake_retrans;
97 struct workqueue_struct *workqueue;
98 struct work_struct ws_awake_rx;
99 struct work_struct ws_awake_device;
100 struct work_struct ws_rx_vote_off;
101 struct work_struct ws_tx_vote_off;
102 unsigned long flags;
103
104 /* For debugging purpose */
105 u64 ibs_sent_wacks;
106 u64 ibs_sent_slps;
107 u64 ibs_sent_wakes;
108 u64 ibs_recv_wacks;
109 u64 ibs_recv_slps;
110 u64 ibs_recv_wakes;
111 u64 vote_last_jif;
112 u32 vote_on_ms;
113 u32 vote_off_ms;
114 u64 tx_votes_on;
115 u64 rx_votes_on;
116 u64 tx_votes_off;
117 u64 rx_votes_off;
118 u64 votes_on;
119 u64 votes_off;
120};
121
83d9c5e5
BG
122enum qca_speed_type {
123 QCA_INIT_SPEED = 1,
124 QCA_OPER_SPEED
125};
126
05ba533c
TE
127struct qca_serdev {
128 struct hci_uart serdev_hu;
129 struct gpio_desc *bt_en;
130 struct clk *susclk;
131};
132
0ff252c1
BYTK
133static void __serial_clock_on(struct tty_struct *tty)
134{
135 /* TODO: Some chipset requires to enable UART clock on client
136 * side to save power consumption or manual work is required.
137 * Please put your code to control UART clock here if needed
138 */
139}
140
141static void __serial_clock_off(struct tty_struct *tty)
142{
143 /* TODO: Some chipset requires to disable UART clock on client
144 * side to save power consumption or manual work is required.
145 * Please put your code to control UART clock off here if needed
146 */
147}
148
149/* serial_clock_vote needs to be called with the ibs lock held */
150static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
151{
152 struct qca_data *qca = hu->priv;
153 unsigned int diff;
154
155 bool old_vote = (qca->tx_vote | qca->rx_vote);
156 bool new_vote;
157
158 switch (vote) {
159 case HCI_IBS_VOTE_STATS_UPDATE:
160 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
161
162 if (old_vote)
163 qca->vote_off_ms += diff;
164 else
165 qca->vote_on_ms += diff;
166 return;
167
168 case HCI_IBS_TX_VOTE_CLOCK_ON:
169 qca->tx_vote = true;
170 qca->tx_votes_on++;
171 new_vote = true;
172 break;
173
174 case HCI_IBS_RX_VOTE_CLOCK_ON:
175 qca->rx_vote = true;
176 qca->rx_votes_on++;
177 new_vote = true;
178 break;
179
180 case HCI_IBS_TX_VOTE_CLOCK_OFF:
181 qca->tx_vote = false;
182 qca->tx_votes_off++;
183 new_vote = qca->rx_vote | qca->tx_vote;
184 break;
185
186 case HCI_IBS_RX_VOTE_CLOCK_OFF:
187 qca->rx_vote = false;
188 qca->rx_votes_off++;
189 new_vote = qca->rx_vote | qca->tx_vote;
190 break;
191
192 default:
193 BT_ERR("Voting irregularity");
194 return;
195 }
196
197 if (new_vote != old_vote) {
198 if (new_vote)
199 __serial_clock_on(hu->tty);
200 else
201 __serial_clock_off(hu->tty);
202
ce26d813
PK
203 BT_DBG("Vote serial clock %s(%s)", new_vote ? "true" : "false",
204 vote ? "true" : "false");
0ff252c1
BYTK
205
206 diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
207
208 if (new_vote) {
209 qca->votes_on++;
210 qca->vote_off_ms += diff;
211 } else {
212 qca->votes_off++;
213 qca->vote_on_ms += diff;
214 }
215 qca->vote_last_jif = jiffies;
216 }
217}
218
219/* Builds and sends an HCI_IBS command packet.
220 * These are very simple packets with only 1 cmd byte.
221 */
222static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
223{
224 int err = 0;
225 struct sk_buff *skb = NULL;
226 struct qca_data *qca = hu->priv;
227
228 BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd);
229
230 skb = bt_skb_alloc(1, GFP_ATOMIC);
231 if (!skb) {
232 BT_ERR("Failed to allocate memory for HCI_IBS packet");
233 return -ENOMEM;
234 }
235
236 /* Assign HCI_IBS type */
634fef61 237 skb_put_u8(skb, cmd);
0ff252c1
BYTK
238
239 skb_queue_tail(&qca->txq, skb);
240
241 return err;
242}
243
244static void qca_wq_awake_device(struct work_struct *work)
245{
246 struct qca_data *qca = container_of(work, struct qca_data,
247 ws_awake_device);
248 struct hci_uart *hu = qca->hu;
249 unsigned long retrans_delay;
250
251 BT_DBG("hu %p wq awake device", hu);
252
253 /* Vote for serial clock */
254 serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
255
256 spin_lock(&qca->hci_ibs_lock);
257
258 /* Send wake indication to device */
259 if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
260 BT_ERR("Failed to send WAKE to device");
261
262 qca->ibs_sent_wakes++;
263
264 /* Start retransmit timer */
265 retrans_delay = msecs_to_jiffies(qca->wake_retrans);
266 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
267
268 spin_unlock(&qca->hci_ibs_lock);
269
270 /* Actually send the packets */
271 hci_uart_tx_wakeup(hu);
272}
273
274static void qca_wq_awake_rx(struct work_struct *work)
275{
276 struct qca_data *qca = container_of(work, struct qca_data,
277 ws_awake_rx);
278 struct hci_uart *hu = qca->hu;
279
280 BT_DBG("hu %p wq awake rx", hu);
281
282 serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
283
284 spin_lock(&qca->hci_ibs_lock);
285 qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
286
287 /* Always acknowledge device wake up,
288 * sending IBS message doesn't count as TX ON.
289 */
290 if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
291 BT_ERR("Failed to acknowledge device wake up");
292
293 qca->ibs_sent_wacks++;
294
295 spin_unlock(&qca->hci_ibs_lock);
296
297 /* Actually send the packets */
298 hci_uart_tx_wakeup(hu);
299}
300
301static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
302{
303 struct qca_data *qca = container_of(work, struct qca_data,
304 ws_rx_vote_off);
305 struct hci_uart *hu = qca->hu;
306
307 BT_DBG("hu %p rx clock vote off", hu);
308
309 serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
310}
311
312static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
313{
314 struct qca_data *qca = container_of(work, struct qca_data,
315 ws_tx_vote_off);
316 struct hci_uart *hu = qca->hu;
317
318 BT_DBG("hu %p tx clock vote off", hu);
319
320 /* Run HCI tx handling unlocked */
321 hci_uart_tx_wakeup(hu);
322
323 /* Now that message queued to tty driver, vote for tty clocks off.
324 * It is up to the tty driver to pend the clocks off until tx done.
325 */
326 serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
327}
328
04356052 329static void hci_ibs_tx_idle_timeout(struct timer_list *t)
0ff252c1 330{
04356052
KC
331 struct qca_data *qca = from_timer(qca, t, tx_idle_timer);
332 struct hci_uart *hu = qca->hu;
0ff252c1
BYTK
333 unsigned long flags;
334
335 BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
336
337 spin_lock_irqsave_nested(&qca->hci_ibs_lock,
338 flags, SINGLE_DEPTH_NESTING);
339
340 switch (qca->tx_ibs_state) {
341 case HCI_IBS_TX_AWAKE:
342 /* TX_IDLE, go to SLEEP */
343 if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
344 BT_ERR("Failed to send SLEEP to device");
345 break;
346 }
347 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
348 qca->ibs_sent_slps++;
349 queue_work(qca->workqueue, &qca->ws_tx_vote_off);
350 break;
351
352 case HCI_IBS_TX_ASLEEP:
353 case HCI_IBS_TX_WAKING:
354 /* Fall through */
355
356 default:
e059a465 357 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
0ff252c1
BYTK
358 break;
359 }
360
361 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
362}
363
04356052 364static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
0ff252c1 365{
04356052
KC
366 struct qca_data *qca = from_timer(qca, t, wake_retrans_timer);
367 struct hci_uart *hu = qca->hu;
0ff252c1 368 unsigned long flags, retrans_delay;
a9137188 369 bool retransmit = false;
0ff252c1
BYTK
370
371 BT_DBG("hu %p wake retransmit timeout in %d state",
372 hu, qca->tx_ibs_state);
373
374 spin_lock_irqsave_nested(&qca->hci_ibs_lock,
375 flags, SINGLE_DEPTH_NESTING);
376
377 switch (qca->tx_ibs_state) {
378 case HCI_IBS_TX_WAKING:
379 /* No WAKE_ACK, retransmit WAKE */
a9137188 380 retransmit = true;
0ff252c1
BYTK
381 if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
382 BT_ERR("Failed to acknowledge device wake up");
383 break;
384 }
385 qca->ibs_sent_wakes++;
386 retrans_delay = msecs_to_jiffies(qca->wake_retrans);
387 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
388 break;
389
390 case HCI_IBS_TX_ASLEEP:
391 case HCI_IBS_TX_AWAKE:
392 /* Fall through */
393
394 default:
e059a465 395 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
0ff252c1
BYTK
396 break;
397 }
398
399 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
400
401 if (retransmit)
402 hci_uart_tx_wakeup(hu);
403}
404
405/* Initialize protocol */
406static int qca_open(struct hci_uart *hu)
407{
05ba533c 408 struct qca_serdev *qcadev;
0ff252c1
BYTK
409 struct qca_data *qca;
410
411 BT_DBG("hu %p qca_open", hu);
412
25a13e38 413 qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
0ff252c1
BYTK
414 if (!qca)
415 return -ENOMEM;
416
417 skb_queue_head_init(&qca->txq);
418 skb_queue_head_init(&qca->tx_wait_q);
419 spin_lock_init(&qca->hci_ibs_lock);
fac9a602 420 qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
0ff252c1
BYTK
421 if (!qca->workqueue) {
422 BT_ERR("QCA Workqueue not initialized properly");
423 kfree(qca);
424 return -ENOMEM;
425 }
426
427 INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
428 INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
429 INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
430 INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
431
432 qca->hu = hu;
433
434 /* Assume we start with both sides asleep -- extra wakes OK */
435 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
436 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
437
438 /* clocks actually on, but we start votes off */
439 qca->tx_vote = false;
440 qca->rx_vote = false;
441 qca->flags = 0;
442
443 qca->ibs_sent_wacks = 0;
444 qca->ibs_sent_slps = 0;
445 qca->ibs_sent_wakes = 0;
446 qca->ibs_recv_wacks = 0;
447 qca->ibs_recv_slps = 0;
448 qca->ibs_recv_wakes = 0;
449 qca->vote_last_jif = jiffies;
450 qca->vote_on_ms = 0;
451 qca->vote_off_ms = 0;
452 qca->votes_on = 0;
453 qca->votes_off = 0;
454 qca->tx_votes_on = 0;
455 qca->tx_votes_off = 0;
456 qca->rx_votes_on = 0;
457 qca->rx_votes_off = 0;
458
459 hu->priv = qca;
460
04356052 461 timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
0ff252c1
BYTK
462 qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
463
04356052 464 timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
0ff252c1
BYTK
465 qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
466
05ba533c
TE
467 if (hu->serdev) {
468 serdev_device_open(hu->serdev);
469
470 qcadev = serdev_device_get_drvdata(hu->serdev);
471 gpiod_set_value_cansleep(qcadev->bt_en, 1);
472 }
473
0ff252c1
BYTK
474 BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
475 qca->tx_idle_delay, qca->wake_retrans);
476
477 return 0;
478}
479
480static void qca_debugfs_init(struct hci_dev *hdev)
481{
482 struct hci_uart *hu = hci_get_drvdata(hdev);
483 struct qca_data *qca = hu->priv;
484 struct dentry *ibs_dir;
485 umode_t mode;
486
487 if (!hdev->debugfs)
488 return;
489
490 ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
491
492 /* read only */
493 mode = S_IRUGO;
494 debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state);
495 debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state);
496 debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir,
497 &qca->ibs_sent_slps);
498 debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir,
499 &qca->ibs_sent_wakes);
500 debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir,
501 &qca->ibs_sent_wacks);
502 debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir,
503 &qca->ibs_recv_slps);
504 debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir,
505 &qca->ibs_recv_wakes);
506 debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir,
507 &qca->ibs_recv_wacks);
10be6c0f 508 debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote);
0ff252c1
BYTK
509 debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on);
510 debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off);
10be6c0f 511 debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote);
0ff252c1
BYTK
512 debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on);
513 debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off);
514 debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on);
515 debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off);
516 debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms);
517 debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms);
518
519 /* read/write */
520 mode = S_IRUGO | S_IWUSR;
521 debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans);
522 debugfs_create_u32("tx_idle_delay", mode, ibs_dir,
523 &qca->tx_idle_delay);
524}
525
526/* Flush protocol data */
527static int qca_flush(struct hci_uart *hu)
528{
529 struct qca_data *qca = hu->priv;
530
531 BT_DBG("hu %p qca flush", hu);
532
533 skb_queue_purge(&qca->tx_wait_q);
534 skb_queue_purge(&qca->txq);
535
536 return 0;
537}
538
539/* Close protocol */
540static int qca_close(struct hci_uart *hu)
541{
05ba533c 542 struct qca_serdev *qcadev;
0ff252c1
BYTK
543 struct qca_data *qca = hu->priv;
544
545 BT_DBG("hu %p qca close", hu);
546
547 serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
548
549 skb_queue_purge(&qca->tx_wait_q);
550 skb_queue_purge(&qca->txq);
551 del_timer(&qca->tx_idle_timer);
552 del_timer(&qca->wake_retrans_timer);
553 destroy_workqueue(qca->workqueue);
554 qca->hu = NULL;
555
05ba533c
TE
556 if (hu->serdev) {
557 serdev_device_close(hu->serdev);
558
559 qcadev = serdev_device_get_drvdata(hu->serdev);
560 gpiod_set_value_cansleep(qcadev->bt_en, 0);
561 }
562
0ff252c1
BYTK
563 kfree_skb(qca->rx_skb);
564
565 hu->priv = NULL;
566
567 kfree(qca);
568
569 return 0;
570}
571
572/* Called upon a wake-up-indication from the device.
573 */
574static void device_want_to_wakeup(struct hci_uart *hu)
575{
576 unsigned long flags;
577 struct qca_data *qca = hu->priv;
578
579 BT_DBG("hu %p want to wake up", hu);
580
581 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
582
583 qca->ibs_recv_wakes++;
584
585 switch (qca->rx_ibs_state) {
586 case HCI_IBS_RX_ASLEEP:
587 /* Make sure clock is on - we may have turned clock off since
588 * receiving the wake up indicator awake rx clock.
589 */
590 queue_work(qca->workqueue, &qca->ws_awake_rx);
591 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
592 return;
593
594 case HCI_IBS_RX_AWAKE:
595 /* Always acknowledge device wake up,
596 * sending IBS message doesn't count as TX ON.
597 */
598 if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) {
599 BT_ERR("Failed to acknowledge device wake up");
600 break;
601 }
602 qca->ibs_sent_wacks++;
603 break;
604
605 default:
606 /* Any other state is illegal */
607 BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
608 qca->rx_ibs_state);
609 break;
610 }
611
612 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
613
614 /* Actually send the packets */
615 hci_uart_tx_wakeup(hu);
616}
617
618/* Called upon a sleep-indication from the device.
619 */
620static void device_want_to_sleep(struct hci_uart *hu)
621{
622 unsigned long flags;
623 struct qca_data *qca = hu->priv;
624
625 BT_DBG("hu %p want to sleep", hu);
626
627 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
628
629 qca->ibs_recv_slps++;
630
631 switch (qca->rx_ibs_state) {
632 case HCI_IBS_RX_AWAKE:
633 /* Update state */
634 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
635 /* Vote off rx clock under workqueue */
636 queue_work(qca->workqueue, &qca->ws_rx_vote_off);
637 break;
638
639 case HCI_IBS_RX_ASLEEP:
640 /* Fall through */
641
642 default:
643 /* Any other state is illegal */
644 BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
645 qca->rx_ibs_state);
646 break;
647 }
648
649 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
650}
651
652/* Called upon wake-up-acknowledgement from the device
653 */
654static void device_woke_up(struct hci_uart *hu)
655{
656 unsigned long flags, idle_delay;
657 struct qca_data *qca = hu->priv;
658 struct sk_buff *skb = NULL;
659
660 BT_DBG("hu %p woke up", hu);
661
662 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
663
664 qca->ibs_recv_wacks++;
665
666 switch (qca->tx_ibs_state) {
667 case HCI_IBS_TX_AWAKE:
668 /* Expect one if we send 2 WAKEs */
669 BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
670 qca->tx_ibs_state);
671 break;
672
673 case HCI_IBS_TX_WAKING:
674 /* Send pending packets */
675 while ((skb = skb_dequeue(&qca->tx_wait_q)))
676 skb_queue_tail(&qca->txq, skb);
677
678 /* Switch timers and change state to HCI_IBS_TX_AWAKE */
679 del_timer(&qca->wake_retrans_timer);
680 idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
681 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
682 qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
683 break;
684
685 case HCI_IBS_TX_ASLEEP:
686 /* Fall through */
687
688 default:
689 BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
690 qca->tx_ibs_state);
691 break;
692 }
693
694 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
695
696 /* Actually send the packets */
697 hci_uart_tx_wakeup(hu);
698}
699
700/* Enqueue frame for transmittion (padding, crc, etc) may be called from
701 * two simultaneous tasklets.
702 */
703static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
704{
705 unsigned long flags = 0, idle_delay;
706 struct qca_data *qca = hu->priv;
707
708 BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
709 qca->tx_ibs_state);
710
711 /* Prepend skb with frame type */
618e8bc2 712 memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
0ff252c1
BYTK
713
714 /* Don't go to sleep in middle of patch download or
715 * Out-Of-Band(GPIOs control) sleep is selected.
716 */
717 if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags)) {
718 skb_queue_tail(&qca->txq, skb);
719 return 0;
720 }
721
722 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
723
724 /* Act according to current state */
725 switch (qca->tx_ibs_state) {
726 case HCI_IBS_TX_AWAKE:
727 BT_DBG("Device awake, sending normally");
728 skb_queue_tail(&qca->txq, skb);
729 idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
730 mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
731 break;
732
733 case HCI_IBS_TX_ASLEEP:
734 BT_DBG("Device asleep, waking up and queueing packet");
735 /* Save packet for later */
736 skb_queue_tail(&qca->tx_wait_q, skb);
737
738 qca->tx_ibs_state = HCI_IBS_TX_WAKING;
739 /* Schedule a work queue to wake up device */
740 queue_work(qca->workqueue, &qca->ws_awake_device);
741 break;
742
743 case HCI_IBS_TX_WAKING:
744 BT_DBG("Device waking up, queueing packet");
745 /* Transient state; just keep packet for later */
746 skb_queue_tail(&qca->tx_wait_q, skb);
747 break;
748
749 default:
750 BT_ERR("Illegal tx state: %d (losing packet)",
751 qca->tx_ibs_state);
752 kfree_skb(skb);
753 break;
754 }
755
756 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
757
758 return 0;
759}
760
761static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb)
762{
763 struct hci_uart *hu = hci_get_drvdata(hdev);
764
765 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND);
766
767 device_want_to_sleep(hu);
768
769 kfree_skb(skb);
770 return 0;
771}
772
773static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb)
774{
775 struct hci_uart *hu = hci_get_drvdata(hdev);
776
777 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND);
778
779 device_want_to_wakeup(hu);
780
781 kfree_skb(skb);
782 return 0;
783}
784
785static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb)
786{
787 struct hci_uart *hu = hci_get_drvdata(hdev);
788
789 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK);
790
791 device_woke_up(hu);
792
793 kfree_skb(skb);
794 return 0;
795}
796
797#define QCA_IBS_SLEEP_IND_EVENT \
798 .type = HCI_IBS_SLEEP_IND, \
799 .hlen = 0, \
800 .loff = 0, \
801 .lsize = 0, \
802 .maxlen = HCI_MAX_IBS_SIZE
803
804#define QCA_IBS_WAKE_IND_EVENT \
805 .type = HCI_IBS_WAKE_IND, \
806 .hlen = 0, \
807 .loff = 0, \
808 .lsize = 0, \
809 .maxlen = HCI_MAX_IBS_SIZE
810
811#define QCA_IBS_WAKE_ACK_EVENT \
812 .type = HCI_IBS_WAKE_ACK, \
813 .hlen = 0, \
814 .loff = 0, \
815 .lsize = 0, \
816 .maxlen = HCI_MAX_IBS_SIZE
817
818static const struct h4_recv_pkt qca_recv_pkts[] = {
819 { H4_RECV_ACL, .recv = hci_recv_frame },
820 { H4_RECV_SCO, .recv = hci_recv_frame },
821 { H4_RECV_EVENT, .recv = hci_recv_frame },
822 { QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind },
823 { QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack },
824 { QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
825};
826
827static int qca_recv(struct hci_uart *hu, const void *data, int count)
828{
829 struct qca_data *qca = hu->priv;
830
831 if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
832 return -EUNATCH;
833
834 qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
835 qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
836 if (IS_ERR(qca->rx_skb)) {
837 int err = PTR_ERR(qca->rx_skb);
2064ee33 838 bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
0ff252c1
BYTK
839 qca->rx_skb = NULL;
840 return err;
841 }
842
843 return count;
844}
845
846static struct sk_buff *qca_dequeue(struct hci_uart *hu)
847{
848 struct qca_data *qca = hu->priv;
849
850 return skb_dequeue(&qca->txq);
851}
852
853static uint8_t qca_get_baudrate_value(int speed)
854{
ce26d813 855 switch (speed) {
0ff252c1
BYTK
856 case 9600:
857 return QCA_BAUDRATE_9600;
858 case 19200:
859 return QCA_BAUDRATE_19200;
860 case 38400:
861 return QCA_BAUDRATE_38400;
862 case 57600:
863 return QCA_BAUDRATE_57600;
864 case 115200:
865 return QCA_BAUDRATE_115200;
866 case 230400:
867 return QCA_BAUDRATE_230400;
868 case 460800:
869 return QCA_BAUDRATE_460800;
870 case 500000:
871 return QCA_BAUDRATE_500000;
872 case 921600:
873 return QCA_BAUDRATE_921600;
874 case 1000000:
875 return QCA_BAUDRATE_1000000;
876 case 2000000:
877 return QCA_BAUDRATE_2000000;
878 case 3000000:
879 return QCA_BAUDRATE_3000000;
880 case 3500000:
881 return QCA_BAUDRATE_3500000;
882 default:
883 return QCA_BAUDRATE_115200;
884 }
885}
886
887static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
888{
889 struct hci_uart *hu = hci_get_drvdata(hdev);
890 struct qca_data *qca = hu->priv;
891 struct sk_buff *skb;
892 u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
893
894 if (baudrate > QCA_BAUDRATE_3000000)
895 return -EINVAL;
896
897 cmd[4] = baudrate;
898
25a13e38 899 skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
0ff252c1 900 if (!skb) {
2064ee33 901 bt_dev_err(hdev, "Failed to allocate baudrate packet");
0ff252c1
BYTK
902 return -ENOMEM;
903 }
904
905 /* Assign commands to change baudrate and packet type. */
59ae1d12 906 skb_put_data(skb, cmd, sizeof(cmd));
618e8bc2 907 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
0ff252c1
BYTK
908
909 skb_queue_tail(&qca->txq, skb);
910 hci_uart_tx_wakeup(hu);
911
912 /* wait 300ms to change new baudrate on controller side
913 * controller will come back after they receive this HCI command
914 * then host can communicate with new baudrate to controller
915 */
916 set_current_state(TASK_UNINTERRUPTIBLE);
917 schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
9960521c 918 set_current_state(TASK_RUNNING);
0ff252c1
BYTK
919
920 return 0;
921}
922
05ba533c
TE
923static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed)
924{
925 if (hu->serdev)
926 serdev_device_set_baudrate(hu->serdev, speed);
927 else
928 hci_uart_set_baudrate(hu, speed);
929}
930
83d9c5e5
BG
931static unsigned int qca_get_speed(struct hci_uart *hu,
932 enum qca_speed_type speed_type)
933{
934 unsigned int speed = 0;
935
936 if (speed_type == QCA_INIT_SPEED) {
937 if (hu->init_speed)
938 speed = hu->init_speed;
939 else if (hu->proto->init_speed)
940 speed = hu->proto->init_speed;
941 } else {
942 if (hu->oper_speed)
943 speed = hu->oper_speed;
944 else if (hu->proto->oper_speed)
945 speed = hu->proto->oper_speed;
946 }
947
948 return speed;
949}
950
951static int qca_check_speeds(struct hci_uart *hu)
952{
953 if (!qca_get_speed(hu, QCA_INIT_SPEED) ||
954 !qca_get_speed(hu, QCA_OPER_SPEED))
955 return -EINVAL;
956
957 return 0;
958}
959
960static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
961{
962 unsigned int speed, qca_baudrate;
963 int ret;
964
965 if (speed_type == QCA_INIT_SPEED) {
966 speed = qca_get_speed(hu, QCA_INIT_SPEED);
967 if (speed)
968 host_set_baudrate(hu, speed);
969 } else {
970 speed = qca_get_speed(hu, QCA_OPER_SPEED);
971 if (!speed)
972 return 0;
973
974 qca_baudrate = qca_get_baudrate_value(speed);
975 bt_dev_info(hu->hdev, "Set UART speed to %d", speed);
976 ret = qca_set_baudrate(hu->hdev, qca_baudrate);
977 if (ret)
978 return ret;
979
980 host_set_baudrate(hu, speed);
981 }
982
983 return 0;
984}
985
0ff252c1
BYTK
986static int qca_setup(struct hci_uart *hu)
987{
988 struct hci_dev *hdev = hu->hdev;
989 struct qca_data *qca = hu->priv;
990 unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
991 int ret;
aadebac4 992 int soc_ver = 0;
0ff252c1 993
2064ee33 994 bt_dev_info(hdev, "ROME setup");
0ff252c1 995
83d9c5e5
BG
996 ret = qca_check_speeds(hu);
997 if (ret)
998 return ret;
999
0ff252c1
BYTK
1000 /* Patch downloading has to be done without IBS mode */
1001 clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
1002
1003 /* Setup initial baudrate */
83d9c5e5 1004 qca_set_speed(hu, QCA_INIT_SPEED);
0ff252c1
BYTK
1005
1006 /* Setup user speed if needed */
83d9c5e5 1007 speed = qca_get_speed(hu, QCA_OPER_SPEED);
0ff252c1 1008 if (speed) {
83d9c5e5
BG
1009 ret = qca_set_speed(hu, QCA_OPER_SPEED);
1010 if (ret)
0ff252c1 1011 return ret;
83d9c5e5
BG
1012
1013 qca_baudrate = qca_get_baudrate_value(speed);
0ff252c1
BYTK
1014 }
1015
aadebac4
BG
1016 /* Get QCA version information */
1017 ret = qca_read_soc_version(hdev, &soc_ver);
1018 if (ret)
1019 return ret;
1020
1021 bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver);
1022
0ff252c1 1023 /* Setup patch / NVM configurations */
aadebac4 1024 ret = qca_uart_setup(hdev, qca_baudrate, QCA_ROME, soc_ver);
0ff252c1
BYTK
1025 if (!ret) {
1026 set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
1027 qca_debugfs_init(hdev);
ba8f3597
LP
1028 } else if (ret == -ENOENT) {
1029 /* No patch/nvm-config found, run with original fw/config */
1030 ret = 0;
7dc5fe08
AP
1031 } else if (ret == -EAGAIN) {
1032 /*
1033 * Userspace firmware loader will return -EAGAIN in case no
1034 * patch/nvm-config is found, so run with original fw/config.
1035 */
1036 ret = 0;
0ff252c1
BYTK
1037 }
1038
1039 /* Setup bdaddr */
1040 hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
1041
1042 return ret;
1043}
1044
1045static struct hci_uart_proto qca_proto = {
1046 .id = HCI_UART_QCA,
1047 .name = "QCA",
aee61f7a 1048 .manufacturer = 29,
0ff252c1
BYTK
1049 .init_speed = 115200,
1050 .oper_speed = 3000000,
1051 .open = qca_open,
1052 .close = qca_close,
1053 .flush = qca_flush,
1054 .setup = qca_setup,
1055 .recv = qca_recv,
1056 .enqueue = qca_enqueue,
1057 .dequeue = qca_dequeue,
1058};
1059
05ba533c
TE
1060static int qca_serdev_probe(struct serdev_device *serdev)
1061{
1062 struct qca_serdev *qcadev;
1063 int err;
1064
1065 qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL);
1066 if (!qcadev)
1067 return -ENOMEM;
1068
1069 qcadev->serdev_hu.serdev = serdev;
1070 serdev_device_set_drvdata(serdev, qcadev);
1071
1072 qcadev->bt_en = devm_gpiod_get(&serdev->dev, "enable",
1073 GPIOD_OUT_LOW);
1074 if (IS_ERR(qcadev->bt_en)) {
1075 dev_err(&serdev->dev, "failed to acquire enable gpio\n");
1076 return PTR_ERR(qcadev->bt_en);
1077 }
1078
1079 qcadev->susclk = devm_clk_get(&serdev->dev, NULL);
1080 if (IS_ERR(qcadev->susclk)) {
1081 dev_err(&serdev->dev, "failed to acquire clk\n");
1082 return PTR_ERR(qcadev->susclk);
1083 }
1084
1085 err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
1086 if (err)
1087 return err;
1088
1089 err = clk_prepare_enable(qcadev->susclk);
1090 if (err)
1091 return err;
1092
1093 err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
1094 if (err)
1095 clk_disable_unprepare(qcadev->susclk);
1096
1097 return err;
1098}
1099
1100static void qca_serdev_remove(struct serdev_device *serdev)
1101{
1102 struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
1103
1104 hci_uart_unregister_device(&qcadev->serdev_hu);
1105
1106 clk_disable_unprepare(qcadev->susclk);
1107}
1108
1109static const struct of_device_id qca_bluetooth_of_match[] = {
1110 { .compatible = "qcom,qca6174-bt" },
1111 { /* sentinel */ }
1112};
1113MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
1114
1115static struct serdev_device_driver qca_serdev_driver = {
1116 .probe = qca_serdev_probe,
1117 .remove = qca_serdev_remove,
1118 .driver = {
1119 .name = "hci_uart_qca",
1120 .of_match_table = qca_bluetooth_of_match,
1121 },
1122};
1123
0ff252c1
BYTK
1124int __init qca_init(void)
1125{
05ba533c
TE
1126 serdev_device_driver_register(&qca_serdev_driver);
1127
0ff252c1
BYTK
1128 return hci_uart_register_proto(&qca_proto);
1129}
1130
1131int __exit qca_deinit(void)
1132{
05ba533c
TE
1133 serdev_device_driver_unregister(&qca_serdev_driver);
1134
0ff252c1
BYTK
1135 return hci_uart_unregister_proto(&qca_proto);
1136}