Commit | Line | Data |
---|---|---|
55046237 MR |
1 | /* |
2 | * MSM 7k/8k High speed uart driver | |
3 | * | |
4 | * Copyright (c) 2007-2011, Code Aurora Forum. All rights reserved. | |
5 | * Copyright (c) 2008 Google Inc. | |
6 | * Modified: Nick Pelly <npelly@google.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU General Public License | |
10 | * version 2 as published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | |
15 | * See the GNU General Public License for more details. | |
16 | * | |
17 | * Has optional support for uart power management independent of linux | |
18 | * suspend/resume: | |
19 | * | |
20 | * RX wakeup. | |
21 | * UART wakeup can be triggered by RX activity (using a wakeup GPIO on the | |
22 | * UART RX pin). This should only be used if there is not a wakeup | |
23 | * GPIO on the UART CTS, and the first RX byte is known (for example, with the | |
24 | * Bluetooth Texas Instruments HCILL protocol), since the first RX byte will | |
25 | * always be lost. RTS will be asserted even while the UART is off in this mode | |
26 | * of operation. See msm_serial_hs_platform_data.rx_wakeup_irq. | |
27 | */ | |
28 | ||
29 | #include <linux/module.h> | |
30 | ||
31 | #include <linux/serial.h> | |
32 | #include <linux/serial_core.h> | |
33 | #include <linux/slab.h> | |
34 | #include <linux/init.h> | |
35 | #include <linux/interrupt.h> | |
36 | #include <linux/irq.h> | |
37 | #include <linux/io.h> | |
38 | #include <linux/ioport.h> | |
39 | #include <linux/kernel.h> | |
40 | #include <linux/timer.h> | |
41 | #include <linux/clk.h> | |
42 | #include <linux/platform_device.h> | |
43 | #include <linux/pm_runtime.h> | |
44 | #include <linux/dma-mapping.h> | |
45 | #include <linux/dmapool.h> | |
46 | #include <linux/wait.h> | |
47 | #include <linux/workqueue.h> | |
48 | ||
49 | #include <linux/atomic.h> | |
50 | #include <asm/irq.h> | |
51 | #include <asm/system.h> | |
52 | ||
53 | #include <mach/hardware.h> | |
54 | #include <mach/dma.h> | |
55 | #include <linux/platform_data/msm_serial_hs.h> | |
56 | ||
57 | /* HSUART Registers */ | |
58 | #define UARTDM_MR1_ADDR 0x0 | |
59 | #define UARTDM_MR2_ADDR 0x4 | |
60 | ||
61 | /* Data Mover result codes */ | |
62 | #define RSLT_FIFO_CNTR_BMSK (0xE << 28) | |
63 | #define RSLT_VLD BIT(1) | |
64 | ||
65 | /* write only register */ | |
66 | #define UARTDM_CSR_ADDR 0x8 | |
67 | #define UARTDM_CSR_115200 0xFF | |
68 | #define UARTDM_CSR_57600 0xEE | |
69 | #define UARTDM_CSR_38400 0xDD | |
70 | #define UARTDM_CSR_28800 0xCC | |
71 | #define UARTDM_CSR_19200 0xBB | |
72 | #define UARTDM_CSR_14400 0xAA | |
73 | #define UARTDM_CSR_9600 0x99 | |
74 | #define UARTDM_CSR_7200 0x88 | |
75 | #define UARTDM_CSR_4800 0x77 | |
76 | #define UARTDM_CSR_3600 0x66 | |
77 | #define UARTDM_CSR_2400 0x55 | |
78 | #define UARTDM_CSR_1200 0x44 | |
79 | #define UARTDM_CSR_600 0x33 | |
80 | #define UARTDM_CSR_300 0x22 | |
81 | #define UARTDM_CSR_150 0x11 | |
82 | #define UARTDM_CSR_75 0x00 | |
83 | ||
84 | /* write only register */ | |
85 | #define UARTDM_TF_ADDR 0x70 | |
86 | #define UARTDM_TF2_ADDR 0x74 | |
87 | #define UARTDM_TF3_ADDR 0x78 | |
88 | #define UARTDM_TF4_ADDR 0x7C | |
89 | ||
90 | /* write only register */ | |
91 | #define UARTDM_CR_ADDR 0x10 | |
92 | #define UARTDM_IMR_ADDR 0x14 | |
93 | ||
94 | #define UARTDM_IPR_ADDR 0x18 | |
95 | #define UARTDM_TFWR_ADDR 0x1c | |
96 | #define UARTDM_RFWR_ADDR 0x20 | |
97 | #define UARTDM_HCR_ADDR 0x24 | |
98 | #define UARTDM_DMRX_ADDR 0x34 | |
99 | #define UARTDM_IRDA_ADDR 0x38 | |
100 | #define UARTDM_DMEN_ADDR 0x3c | |
101 | ||
102 | /* UART_DM_NO_CHARS_FOR_TX */ | |
103 | #define UARTDM_NCF_TX_ADDR 0x40 | |
104 | ||
105 | #define UARTDM_BADR_ADDR 0x44 | |
106 | ||
107 | #define UARTDM_SIM_CFG_ADDR 0x80 | |
108 | /* Read Only register */ | |
109 | #define UARTDM_SR_ADDR 0x8 | |
110 | ||
111 | /* Read Only register */ | |
112 | #define UARTDM_RF_ADDR 0x70 | |
113 | #define UARTDM_RF2_ADDR 0x74 | |
114 | #define UARTDM_RF3_ADDR 0x78 | |
115 | #define UARTDM_RF4_ADDR 0x7C | |
116 | ||
117 | /* Read Only register */ | |
118 | #define UARTDM_MISR_ADDR 0x10 | |
119 | ||
120 | /* Read Only register */ | |
121 | #define UARTDM_ISR_ADDR 0x14 | |
122 | #define UARTDM_RX_TOTAL_SNAP_ADDR 0x38 | |
123 | ||
124 | #define UARTDM_RXFS_ADDR 0x50 | |
125 | ||
126 | /* Register field Mask Mapping */ | |
127 | #define UARTDM_SR_PAR_FRAME_BMSK BIT(5) | |
128 | #define UARTDM_SR_OVERRUN_BMSK BIT(4) | |
129 | #define UARTDM_SR_TXEMT_BMSK BIT(3) | |
130 | #define UARTDM_SR_TXRDY_BMSK BIT(2) | |
131 | #define UARTDM_SR_RXRDY_BMSK BIT(0) | |
132 | ||
133 | #define UARTDM_CR_TX_DISABLE_BMSK BIT(3) | |
134 | #define UARTDM_CR_RX_DISABLE_BMSK BIT(1) | |
135 | #define UARTDM_CR_TX_EN_BMSK BIT(2) | |
136 | #define UARTDM_CR_RX_EN_BMSK BIT(0) | |
137 | ||
138 | /* UARTDM_CR channel_comman bit value (register field is bits 8:4) */ | |
139 | #define RESET_RX 0x10 | |
140 | #define RESET_TX 0x20 | |
141 | #define RESET_ERROR_STATUS 0x30 | |
142 | #define RESET_BREAK_INT 0x40 | |
143 | #define START_BREAK 0x50 | |
144 | #define STOP_BREAK 0x60 | |
145 | #define RESET_CTS 0x70 | |
146 | #define RESET_STALE_INT 0x80 | |
147 | #define RFR_LOW 0xD0 | |
148 | #define RFR_HIGH 0xE0 | |
149 | #define CR_PROTECTION_EN 0x100 | |
150 | #define STALE_EVENT_ENABLE 0x500 | |
151 | #define STALE_EVENT_DISABLE 0x600 | |
152 | #define FORCE_STALE_EVENT 0x400 | |
153 | #define CLEAR_TX_READY 0x300 | |
154 | #define RESET_TX_ERROR 0x800 | |
155 | #define RESET_TX_DONE 0x810 | |
156 | ||
157 | #define UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK 0xffffff00 | |
158 | #define UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK 0x3f | |
159 | #define UARTDM_MR1_CTS_CTL_BMSK 0x40 | |
160 | #define UARTDM_MR1_RX_RDY_CTL_BMSK 0x80 | |
161 | ||
162 | #define UARTDM_MR2_ERROR_MODE_BMSK 0x40 | |
163 | #define UARTDM_MR2_BITS_PER_CHAR_BMSK 0x30 | |
164 | ||
165 | /* bits per character configuration */ | |
166 | #define FIVE_BPC (0 << 4) | |
167 | #define SIX_BPC (1 << 4) | |
168 | #define SEVEN_BPC (2 << 4) | |
169 | #define EIGHT_BPC (3 << 4) | |
170 | ||
171 | #define UARTDM_MR2_STOP_BIT_LEN_BMSK 0xc | |
172 | #define STOP_BIT_ONE (1 << 2) | |
173 | #define STOP_BIT_TWO (3 << 2) | |
174 | ||
175 | #define UARTDM_MR2_PARITY_MODE_BMSK 0x3 | |
176 | ||
177 | /* Parity configuration */ | |
178 | #define NO_PARITY 0x0 | |
179 | #define EVEN_PARITY 0x1 | |
180 | #define ODD_PARITY 0x2 | |
181 | #define SPACE_PARITY 0x3 | |
182 | ||
183 | #define UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK 0xffffff80 | |
184 | #define UARTDM_IPR_STALE_LSB_BMSK 0x1f | |
185 | ||
186 | /* These can be used for both ISR and IMR register */ | |
187 | #define UARTDM_ISR_TX_READY_BMSK BIT(7) | |
188 | #define UARTDM_ISR_CURRENT_CTS_BMSK BIT(6) | |
189 | #define UARTDM_ISR_DELTA_CTS_BMSK BIT(5) | |
190 | #define UARTDM_ISR_RXLEV_BMSK BIT(4) | |
191 | #define UARTDM_ISR_RXSTALE_BMSK BIT(3) | |
192 | #define UARTDM_ISR_RXBREAK_BMSK BIT(2) | |
193 | #define UARTDM_ISR_RXHUNT_BMSK BIT(1) | |
194 | #define UARTDM_ISR_TXLEV_BMSK BIT(0) | |
195 | ||
196 | /* Field definitions for UART_DM_DMEN*/ | |
197 | #define UARTDM_TX_DM_EN_BMSK 0x1 | |
198 | #define UARTDM_RX_DM_EN_BMSK 0x2 | |
199 | ||
200 | #define UART_FIFOSIZE 64 | |
201 | #define UARTCLK 7372800 | |
202 | ||
203 | /* Rx DMA request states */ | |
204 | enum flush_reason { | |
205 | FLUSH_NONE, | |
206 | FLUSH_DATA_READY, | |
207 | FLUSH_DATA_INVALID, /* values after this indicate invalid data */ | |
208 | FLUSH_IGNORE = FLUSH_DATA_INVALID, | |
209 | FLUSH_STOP, | |
210 | FLUSH_SHUTDOWN, | |
211 | }; | |
212 | ||
213 | /* UART clock states */ | |
214 | enum msm_hs_clk_states_e { | |
215 | MSM_HS_CLK_PORT_OFF, /* port not in use */ | |
216 | MSM_HS_CLK_OFF, /* clock disabled */ | |
217 | MSM_HS_CLK_REQUEST_OFF, /* disable after TX and RX flushed */ | |
218 | MSM_HS_CLK_ON, /* clock enabled */ | |
219 | }; | |
220 | ||
221 | /* Track the forced RXSTALE flush during clock off sequence. | |
222 | * These states are only valid during MSM_HS_CLK_REQUEST_OFF */ | |
223 | enum msm_hs_clk_req_off_state_e { | |
224 | CLK_REQ_OFF_START, | |
225 | CLK_REQ_OFF_RXSTALE_ISSUED, | |
226 | CLK_REQ_OFF_FLUSH_ISSUED, | |
227 | CLK_REQ_OFF_RXSTALE_FLUSHED, | |
228 | }; | |
229 | ||
230 | /** | |
231 | * struct msm_hs_tx | |
232 | * @tx_ready_int_en: ok to dma more tx? | |
233 | * @dma_in_flight: tx dma in progress | |
234 | * @xfer: top level DMA command pointer structure | |
235 | * @command_ptr: third level command struct pointer | |
236 | * @command_ptr_ptr: second level command list struct pointer | |
237 | * @mapped_cmd_ptr: DMA view of third level command struct | |
238 | * @mapped_cmd_ptr_ptr: DMA view of second level command list struct | |
239 | * @tx_count: number of bytes to transfer in DMA transfer | |
240 | * @dma_base: DMA view of UART xmit buffer | |
241 | * | |
242 | * This structure describes a single Tx DMA transaction. MSM DMA | |
243 | * commands have two levels of indirection. The top level command | |
244 | * ptr points to a list of command ptr which in turn points to a | |
245 | * single DMA 'command'. In our case each Tx transaction consists | |
246 | * of a single second level pointer pointing to a 'box type' command. | |
247 | */ | |
248 | struct msm_hs_tx { | |
249 | unsigned int tx_ready_int_en; | |
250 | unsigned int dma_in_flight; | |
251 | struct msm_dmov_cmd xfer; | |
252 | dmov_box *command_ptr; | |
253 | u32 *command_ptr_ptr; | |
254 | dma_addr_t mapped_cmd_ptr; | |
255 | dma_addr_t mapped_cmd_ptr_ptr; | |
256 | int tx_count; | |
257 | dma_addr_t dma_base; | |
258 | }; | |
259 | ||
260 | /** | |
261 | * struct msm_hs_rx | |
262 | * @flush: Rx DMA request state | |
263 | * @xfer: top level DMA command pointer structure | |
264 | * @cmdptr_dmaaddr: DMA view of second level command structure | |
265 | * @command_ptr: third level DMA command pointer structure | |
266 | * @command_ptr_ptr: second level DMA command list pointer | |
267 | * @mapped_cmd_ptr: DMA view of the third level command structure | |
268 | * @wait: wait for DMA completion before shutdown | |
269 | * @buffer: destination buffer for RX DMA | |
270 | * @rbuffer: DMA view of buffer | |
271 | * @pool: dma pool out of which coherent rx buffer is allocated | |
272 | * @tty_work: private work-queue for tty flip buffer push task | |
273 | * | |
274 | * This structure describes a single Rx DMA transaction. Rx DMA | |
275 | * transactions use box mode DMA commands. | |
276 | */ | |
277 | struct msm_hs_rx { | |
278 | enum flush_reason flush; | |
279 | struct msm_dmov_cmd xfer; | |
280 | dma_addr_t cmdptr_dmaaddr; | |
281 | dmov_box *command_ptr; | |
282 | u32 *command_ptr_ptr; | |
283 | dma_addr_t mapped_cmd_ptr; | |
284 | wait_queue_head_t wait; | |
285 | dma_addr_t rbuffer; | |
286 | unsigned char *buffer; | |
287 | struct dma_pool *pool; | |
288 | struct work_struct tty_work; | |
289 | }; | |
290 | ||
291 | /** | |
292 | * struct msm_hs_rx_wakeup | |
293 | * @irq: IRQ line to be configured as interrupt source on Rx activity | |
294 | * @ignore: boolean value. 1 = ignore the wakeup interrupt | |
295 | * @rx_to_inject: extra character to be inserted to Rx tty on wakeup | |
296 | * @inject_rx: 1 = insert rx_to_inject. 0 = do not insert extra character | |
297 | * | |
298 | * This is an optional structure required for UART Rx GPIO IRQ based | |
299 | * wakeup from low power state. UART wakeup can be triggered by RX activity | |
300 | * (using a wakeup GPIO on the UART RX pin). This should only be used if | |
301 | * there is not a wakeup GPIO on the UART CTS, and the first RX byte is | |
302 | * known (eg., with the Bluetooth Texas Instruments HCILL protocol), | |
303 | * since the first RX byte will always be lost. RTS will be asserted even | |
304 | * while the UART is clocked off in this mode of operation. | |
305 | */ | |
306 | struct msm_hs_rx_wakeup { | |
307 | int irq; /* < 0 indicates low power wakeup disabled */ | |
308 | unsigned char ignore; | |
309 | unsigned char inject_rx; | |
310 | char rx_to_inject; | |
311 | }; | |
312 | ||
313 | /** | |
314 | * struct msm_hs_port | |
315 | * @uport: embedded uart port structure | |
316 | * @imr_reg: shadow value of UARTDM_IMR | |
317 | * @clk: uart input clock handle | |
318 | * @tx: Tx transaction related data structure | |
319 | * @rx: Rx transaction related data structure | |
320 | * @dma_tx_channel: Tx DMA command channel | |
321 | * @dma_rx_channel Rx DMA command channel | |
322 | * @dma_tx_crci: Tx channel rate control interface number | |
323 | * @dma_rx_crci: Rx channel rate control interface number | |
324 | * @clk_off_timer: Timer to poll DMA event completion before clock off | |
325 | * @clk_off_delay: clk_off_timer poll interval | |
326 | * @clk_state: overall clock state | |
327 | * @clk_req_off_state: post flush clock states | |
328 | * @rx_wakeup: optional rx_wakeup feature related data | |
329 | * @exit_lpm_cb: optional callback to exit low power mode | |
330 | * | |
331 | * Low level serial port structure. | |
332 | */ | |
333 | struct msm_hs_port { | |
334 | struct uart_port uport; | |
335 | unsigned long imr_reg; | |
336 | struct clk *clk; | |
337 | struct msm_hs_tx tx; | |
338 | struct msm_hs_rx rx; | |
339 | ||
340 | int dma_tx_channel; | |
341 | int dma_rx_channel; | |
342 | int dma_tx_crci; | |
343 | int dma_rx_crci; | |
344 | ||
345 | struct hrtimer clk_off_timer; | |
346 | ktime_t clk_off_delay; | |
347 | enum msm_hs_clk_states_e clk_state; | |
348 | enum msm_hs_clk_req_off_state_e clk_req_off_state; | |
349 | ||
350 | struct msm_hs_rx_wakeup rx_wakeup; | |
351 | void (*exit_lpm_cb)(struct uart_port *); | |
352 | }; | |
353 | ||
354 | #define MSM_UARTDM_BURST_SIZE 16 /* DM burst size (in bytes) */ | |
355 | #define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE | |
356 | #define UARTDM_RX_BUF_SIZE 512 | |
357 | ||
358 | #define UARTDM_NR 2 | |
359 | ||
360 | static struct msm_hs_port q_uart_port[UARTDM_NR]; | |
361 | static struct platform_driver msm_serial_hs_platform_driver; | |
362 | static struct uart_driver msm_hs_driver; | |
363 | static struct uart_ops msm_hs_ops; | |
364 | static struct workqueue_struct *msm_hs_workqueue; | |
365 | ||
366 | #define UARTDM_TO_MSM(uart_port) \ | |
367 | container_of((uart_port), struct msm_hs_port, uport) | |
368 | ||
369 | static unsigned int use_low_power_rx_wakeup(struct msm_hs_port | |
370 | *msm_uport) | |
371 | { | |
372 | return (msm_uport->rx_wakeup.irq >= 0); | |
373 | } | |
374 | ||
375 | static unsigned int msm_hs_read(struct uart_port *uport, | |
376 | unsigned int offset) | |
377 | { | |
378 | return ioread32(uport->membase + offset); | |
379 | } | |
380 | ||
381 | static void msm_hs_write(struct uart_port *uport, unsigned int offset, | |
382 | unsigned int value) | |
383 | { | |
384 | iowrite32(value, uport->membase + offset); | |
385 | } | |
386 | ||
387 | static void msm_hs_release_port(struct uart_port *port) | |
388 | { | |
389 | iounmap(port->membase); | |
390 | } | |
391 | ||
392 | static int msm_hs_request_port(struct uart_port *port) | |
393 | { | |
394 | port->membase = ioremap(port->mapbase, PAGE_SIZE); | |
395 | if (unlikely(!port->membase)) | |
396 | return -ENOMEM; | |
397 | ||
398 | /* configure the CR Protection to Enable */ | |
399 | msm_hs_write(port, UARTDM_CR_ADDR, CR_PROTECTION_EN); | |
400 | return 0; | |
401 | } | |
402 | ||
403 | static int __devexit msm_hs_remove(struct platform_device *pdev) | |
404 | { | |
405 | ||
406 | struct msm_hs_port *msm_uport; | |
407 | struct device *dev; | |
408 | ||
409 | if (pdev->id < 0 || pdev->id >= UARTDM_NR) { | |
410 | printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id); | |
411 | return -EINVAL; | |
412 | } | |
413 | ||
414 | msm_uport = &q_uart_port[pdev->id]; | |
415 | dev = msm_uport->uport.dev; | |
416 | ||
417 | dma_unmap_single(dev, msm_uport->rx.mapped_cmd_ptr, sizeof(dmov_box), | |
418 | DMA_TO_DEVICE); | |
419 | dma_pool_free(msm_uport->rx.pool, msm_uport->rx.buffer, | |
420 | msm_uport->rx.rbuffer); | |
421 | dma_pool_destroy(msm_uport->rx.pool); | |
422 | ||
423 | dma_unmap_single(dev, msm_uport->rx.cmdptr_dmaaddr, sizeof(u32 *), | |
424 | DMA_TO_DEVICE); | |
425 | dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr_ptr, sizeof(u32 *), | |
426 | DMA_TO_DEVICE); | |
427 | dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr, sizeof(dmov_box), | |
428 | DMA_TO_DEVICE); | |
429 | ||
430 | uart_remove_one_port(&msm_hs_driver, &msm_uport->uport); | |
431 | clk_put(msm_uport->clk); | |
432 | ||
433 | /* Free the tx resources */ | |
434 | kfree(msm_uport->tx.command_ptr); | |
435 | kfree(msm_uport->tx.command_ptr_ptr); | |
436 | ||
437 | /* Free the rx resources */ | |
438 | kfree(msm_uport->rx.command_ptr); | |
439 | kfree(msm_uport->rx.command_ptr_ptr); | |
440 | ||
441 | iounmap(msm_uport->uport.membase); | |
442 | ||
443 | return 0; | |
444 | } | |
445 | ||
446 | static int msm_hs_init_clk_locked(struct uart_port *uport) | |
447 | { | |
448 | int ret; | |
449 | struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); | |
450 | ||
451 | ret = clk_enable(msm_uport->clk); | |
452 | if (ret) { | |
453 | printk(KERN_ERR "Error could not turn on UART clk\n"); | |
454 | return ret; | |
455 | } | |
456 | ||
457 | /* Set up the MREG/NREG/DREG/MNDREG */ | |
458 | ret = clk_set_rate(msm_uport->clk, uport->uartclk); | |
459 | if (ret) { | |
460 | printk(KERN_WARNING "Error setting clock rate on UART\n"); | |
461 | clk_disable(msm_uport->clk); | |
462 | return ret; | |
463 | } | |
464 | ||
465 | msm_uport->clk_state = MSM_HS_CLK_ON; | |
466 | return 0; | |
467 | } | |
468 | ||
469 | /* Enable and Disable clocks (Used for power management) */ | |
470 | static void msm_hs_pm(struct uart_port *uport, unsigned int state, | |
471 | unsigned int oldstate) | |
472 | { | |
473 | struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); | |
474 | ||
475 | if (use_low_power_rx_wakeup(msm_uport) || | |
476 | msm_uport->exit_lpm_cb) | |
477 | return; /* ignore linux PM states, | |
478 | use msm_hs_request_clock API */ | |
479 | ||
480 | switch (state) { | |
481 | case 0: | |
482 | clk_enable(msm_uport->clk); | |
483 | break; | |
484 | case 3: | |
485 | clk_disable(msm_uport->clk); | |
486 | break; | |
487 | default: | |
488 | dev_err(uport->dev, "msm_serial: Unknown PM state %d\n", | |
489 | state); | |
490 | } | |
491 | } | |
492 | ||
493 | /* | |
494 | * programs the UARTDM_CSR register with correct bit rates | |
495 | * | |
496 | * Interrupts should be disabled before we are called, as | |
497 | * we modify Set Baud rate | |
25985edc | 498 | * Set receive stale interrupt level, dependent on Bit Rate |
55046237 MR |
499 | * Goal is to have around 8 ms before indicate stale. |
500 | * roundup (((Bit Rate * .008) / 10) + 1 | |
501 | */ | |
502 | static void msm_hs_set_bps_locked(struct uart_port *uport, | |
503 | unsigned int bps) | |
504 | { | |
505 | unsigned long rxstale; | |
506 | unsigned long data; | |
507 | struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); | |
508 | ||
509 | switch (bps) { | |
510 | case 300: | |
511 | msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_75); | |
512 | rxstale = 1; | |
513 | break; | |
514 | case 600: | |
515 | msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_150); | |
516 | rxstale = 1; | |
517 | break; | |
518 | case 1200: | |
519 | msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_300); | |
520 | rxstale = 1; | |
521 | break; | |
522 | case 2400: | |
523 | msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_600); | |
524 | rxstale = 1; | |
525 | break; | |
526 | case 4800: | |
527 | msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_1200); | |
528 | rxstale = 1; | |
529 | break; | |
530 | case 9600: | |
531 | msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_2400); | |
532 | rxstale = 2; | |
533 | break; | |
534 | case 14400: | |
535 | msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_3600); | |
536 | rxstale = 3; | |
537 | break; | |
538 | case 19200: | |
539 | msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_4800); | |
540 | rxstale = 4; | |
541 | break; | |
542 | case 28800: | |
543 | msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_7200); | |
544 | rxstale = 6; | |
545 | break; | |
546 | case 38400: | |
547 | msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_9600); | |
548 | rxstale = 8; | |
549 | break; | |
550 | case 57600: | |
551 | msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_14400); | |
552 | rxstale = 16; | |
553 | break; | |
554 | case 76800: | |
555 | msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_19200); | |
556 | rxstale = 16; | |
557 | break; | |
558 | case 115200: | |
559 | msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_28800); | |
560 | rxstale = 31; | |
561 | break; | |
562 | case 230400: | |
563 | msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_57600); | |
564 | rxstale = 31; | |
565 | break; | |
566 | case 460800: | |
567 | msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_115200); | |
568 | rxstale = 31; | |
569 | break; | |
570 | case 4000000: | |
571 | case 3686400: | |
572 | case 3200000: | |
573 | case 3500000: | |
574 | case 3000000: | |
575 | case 2500000: | |
576 | case 1500000: | |
577 | case 1152000: | |
578 | case 1000000: | |
579 | case 921600: | |
580 | msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_115200); | |
581 | rxstale = 31; | |
582 | break; | |
583 | default: | |
584 | msm_hs_write(uport, UARTDM_CSR_ADDR, UARTDM_CSR_2400); | |
585 | /* default to 9600 */ | |
586 | bps = 9600; | |
587 | rxstale = 2; | |
588 | break; | |
589 | } | |
590 | if (bps > 460800) | |
591 | uport->uartclk = bps * 16; | |
592 | else | |
593 | uport->uartclk = UARTCLK; | |
594 | ||
595 | if (clk_set_rate(msm_uport->clk, uport->uartclk)) { | |
596 | printk(KERN_WARNING "Error setting clock rate on UART\n"); | |
597 | return; | |
598 | } | |
599 | ||
600 | data = rxstale & UARTDM_IPR_STALE_LSB_BMSK; | |
601 | data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2); | |
602 | ||
603 | msm_hs_write(uport, UARTDM_IPR_ADDR, data); | |
604 | } | |
605 | ||
606 | /* | |
607 | * termios : new ktermios | |
608 | * oldtermios: old ktermios previous setting | |
609 | * | |
610 | * Configure the serial port | |
611 | */ | |
612 | static void msm_hs_set_termios(struct uart_port *uport, | |
613 | struct ktermios *termios, | |
614 | struct ktermios *oldtermios) | |
615 | { | |
616 | unsigned int bps; | |
617 | unsigned long data; | |
618 | unsigned long flags; | |
619 | unsigned int c_cflag = termios->c_cflag; | |
620 | struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); | |
621 | ||
622 | spin_lock_irqsave(&uport->lock, flags); | |
623 | clk_enable(msm_uport->clk); | |
624 | ||
625 | /* 300 is the minimum baud support by the driver */ | |
626 | bps = uart_get_baud_rate(uport, termios, oldtermios, 200, 4000000); | |
627 | ||
628 | /* Temporary remapping 200 BAUD to 3.2 mbps */ | |
629 | if (bps == 200) | |
630 | bps = 3200000; | |
631 | ||
632 | msm_hs_set_bps_locked(uport, bps); | |
633 | ||
634 | data = msm_hs_read(uport, UARTDM_MR2_ADDR); | |
635 | data &= ~UARTDM_MR2_PARITY_MODE_BMSK; | |
636 | /* set parity */ | |
637 | if (PARENB == (c_cflag & PARENB)) { | |
638 | if (PARODD == (c_cflag & PARODD)) | |
639 | data |= ODD_PARITY; | |
640 | else if (CMSPAR == (c_cflag & CMSPAR)) | |
641 | data |= SPACE_PARITY; | |
642 | else | |
643 | data |= EVEN_PARITY; | |
644 | } | |
645 | ||
646 | /* Set bits per char */ | |
647 | data &= ~UARTDM_MR2_BITS_PER_CHAR_BMSK; | |
648 | ||
649 | switch (c_cflag & CSIZE) { | |
650 | case CS5: | |
651 | data |= FIVE_BPC; | |
652 | break; | |
653 | case CS6: | |
654 | data |= SIX_BPC; | |
655 | break; | |
656 | case CS7: | |
657 | data |= SEVEN_BPC; | |
658 | break; | |
659 | default: | |
660 | data |= EIGHT_BPC; | |
661 | break; | |
662 | } | |
663 | /* stop bits */ | |
664 | if (c_cflag & CSTOPB) { | |
665 | data |= STOP_BIT_TWO; | |
666 | } else { | |
667 | /* otherwise 1 stop bit */ | |
668 | data |= STOP_BIT_ONE; | |
669 | } | |
670 | data |= UARTDM_MR2_ERROR_MODE_BMSK; | |
671 | /* write parity/bits per char/stop bit configuration */ | |
672 | msm_hs_write(uport, UARTDM_MR2_ADDR, data); | |
673 | ||
674 | /* Configure HW flow control */ | |
675 | data = msm_hs_read(uport, UARTDM_MR1_ADDR); | |
676 | ||
677 | data &= ~(UARTDM_MR1_CTS_CTL_BMSK | UARTDM_MR1_RX_RDY_CTL_BMSK); | |
678 | ||
679 | if (c_cflag & CRTSCTS) { | |
680 | data |= UARTDM_MR1_CTS_CTL_BMSK; | |
681 | data |= UARTDM_MR1_RX_RDY_CTL_BMSK; | |
682 | } | |
683 | ||
684 | msm_hs_write(uport, UARTDM_MR1_ADDR, data); | |
685 | ||
686 | uport->ignore_status_mask = termios->c_iflag & INPCK; | |
687 | uport->ignore_status_mask |= termios->c_iflag & IGNPAR; | |
688 | uport->read_status_mask = (termios->c_cflag & CREAD); | |
689 | ||
690 | msm_hs_write(uport, UARTDM_IMR_ADDR, 0); | |
691 | ||
692 | /* Set Transmit software time out */ | |
693 | uart_update_timeout(uport, c_cflag, bps); | |
694 | ||
695 | msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX); | |
696 | msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX); | |
697 | ||
698 | if (msm_uport->rx.flush == FLUSH_NONE) { | |
699 | msm_uport->rx.flush = FLUSH_IGNORE; | |
700 | msm_dmov_stop_cmd(msm_uport->dma_rx_channel, NULL, 1); | |
701 | } | |
702 | ||
703 | msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); | |
704 | ||
705 | clk_disable(msm_uport->clk); | |
706 | spin_unlock_irqrestore(&uport->lock, flags); | |
707 | } | |
708 | ||
709 | /* | |
710 | * Standard API, Transmitter | |
711 | * Any character in the transmit shift register is sent | |
712 | */ | |
713 | static unsigned int msm_hs_tx_empty(struct uart_port *uport) | |
714 | { | |
715 | unsigned int data; | |
716 | unsigned int ret = 0; | |
717 | struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); | |
718 | ||
719 | clk_enable(msm_uport->clk); | |
720 | ||
721 | data = msm_hs_read(uport, UARTDM_SR_ADDR); | |
722 | if (data & UARTDM_SR_TXEMT_BMSK) | |
723 | ret = TIOCSER_TEMT; | |
724 | ||
725 | clk_disable(msm_uport->clk); | |
726 | ||
727 | return ret; | |
728 | } | |
729 | ||
730 | /* | |
731 | * Standard API, Stop transmitter. | |
732 | * Any character in the transmit shift register is sent as | |
733 | * well as the current data mover transfer . | |
734 | */ | |
735 | static void msm_hs_stop_tx_locked(struct uart_port *uport) | |
736 | { | |
737 | struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); | |
738 | ||
739 | msm_uport->tx.tx_ready_int_en = 0; | |
740 | } | |
741 | ||
742 | /* | |
743 | * Standard API, Stop receiver as soon as possible. | |
744 | * | |
745 | * Function immediately terminates the operation of the | |
746 | * channel receiver and any incoming characters are lost. None | |
747 | * of the receiver status bits are affected by this command and | |
748 | * characters that are already in the receive FIFO there. | |
749 | */ | |
750 | static void msm_hs_stop_rx_locked(struct uart_port *uport) | |
751 | { | |
752 | struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); | |
753 | unsigned int data; | |
754 | ||
755 | clk_enable(msm_uport->clk); | |
756 | ||
757 | /* disable dlink */ | |
758 | data = msm_hs_read(uport, UARTDM_DMEN_ADDR); | |
759 | data &= ~UARTDM_RX_DM_EN_BMSK; | |
760 | msm_hs_write(uport, UARTDM_DMEN_ADDR, data); | |
761 | ||
762 | /* Disable the receiver */ | |
763 | if (msm_uport->rx.flush == FLUSH_NONE) | |
764 | msm_dmov_stop_cmd(msm_uport->dma_rx_channel, NULL, 1); | |
765 | ||
766 | if (msm_uport->rx.flush != FLUSH_SHUTDOWN) | |
767 | msm_uport->rx.flush = FLUSH_STOP; | |
768 | ||
769 | clk_disable(msm_uport->clk); | |
770 | } | |
771 | ||
772 | /* Transmit the next chunk of data */ | |
773 | static void msm_hs_submit_tx_locked(struct uart_port *uport) | |
774 | { | |
775 | int left; | |
776 | int tx_count; | |
777 | dma_addr_t src_addr; | |
778 | struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); | |
779 | struct msm_hs_tx *tx = &msm_uport->tx; | |
780 | struct circ_buf *tx_buf = &msm_uport->uport.state->xmit; | |
781 | ||
782 | if (uart_circ_empty(tx_buf) || uport->state->port.tty->stopped) { | |
783 | msm_hs_stop_tx_locked(uport); | |
784 | return; | |
785 | } | |
786 | ||
787 | tx->dma_in_flight = 1; | |
788 | ||
789 | tx_count = uart_circ_chars_pending(tx_buf); | |
790 | ||
791 | if (UARTDM_TX_BUF_SIZE < tx_count) | |
792 | tx_count = UARTDM_TX_BUF_SIZE; | |
793 | ||
794 | left = UART_XMIT_SIZE - tx_buf->tail; | |
795 | ||
796 | if (tx_count > left) | |
797 | tx_count = left; | |
798 | ||
799 | src_addr = tx->dma_base + tx_buf->tail; | |
800 | dma_sync_single_for_device(uport->dev, src_addr, tx_count, | |
801 | DMA_TO_DEVICE); | |
802 | ||
803 | tx->command_ptr->num_rows = (((tx_count + 15) >> 4) << 16) | | |
804 | ((tx_count + 15) >> 4); | |
805 | tx->command_ptr->src_row_addr = src_addr; | |
806 | ||
807 | dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr, | |
808 | sizeof(dmov_box), DMA_TO_DEVICE); | |
809 | ||
810 | *tx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(tx->mapped_cmd_ptr); | |
811 | ||
812 | dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr_ptr, | |
813 | sizeof(u32 *), DMA_TO_DEVICE); | |
814 | ||
815 | /* Save tx_count to use in Callback */ | |
816 | tx->tx_count = tx_count; | |
817 | msm_hs_write(uport, UARTDM_NCF_TX_ADDR, tx_count); | |
818 | ||
819 | /* Disable the tx_ready interrupt */ | |
820 | msm_uport->imr_reg &= ~UARTDM_ISR_TX_READY_BMSK; | |
821 | msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); | |
822 | msm_dmov_enqueue_cmd(msm_uport->dma_tx_channel, &tx->xfer); | |
823 | } | |
824 | ||
825 | /* Start to receive the next chunk of data */ | |
826 | static void msm_hs_start_rx_locked(struct uart_port *uport) | |
827 | { | |
828 | struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); | |
829 | ||
830 | msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT); | |
831 | msm_hs_write(uport, UARTDM_DMRX_ADDR, UARTDM_RX_BUF_SIZE); | |
832 | msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_ENABLE); | |
833 | msm_uport->imr_reg |= UARTDM_ISR_RXLEV_BMSK; | |
834 | msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); | |
835 | ||
836 | msm_uport->rx.flush = FLUSH_NONE; | |
837 | msm_dmov_enqueue_cmd(msm_uport->dma_rx_channel, &msm_uport->rx.xfer); | |
838 | ||
839 | /* might have finished RX and be ready to clock off */ | |
840 | hrtimer_start(&msm_uport->clk_off_timer, msm_uport->clk_off_delay, | |
841 | HRTIMER_MODE_REL); | |
842 | } | |
843 | ||
844 | /* Enable the transmitter Interrupt */ | |
845 | static void msm_hs_start_tx_locked(struct uart_port *uport) | |
846 | { | |
847 | struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); | |
848 | ||
849 | clk_enable(msm_uport->clk); | |
850 | ||
851 | if (msm_uport->exit_lpm_cb) | |
852 | msm_uport->exit_lpm_cb(uport); | |
853 | ||
854 | if (msm_uport->tx.tx_ready_int_en == 0) { | |
855 | msm_uport->tx.tx_ready_int_en = 1; | |
856 | msm_hs_submit_tx_locked(uport); | |
857 | } | |
858 | ||
859 | clk_disable(msm_uport->clk); | |
860 | } | |
861 | ||
862 | /* | |
863 | * This routine is called when we are done with a DMA transfer | |
864 | * | |
865 | * This routine is registered with Data mover when we set | |
866 | * up a Data Mover transfer. It is called from Data mover ISR | |
867 | * when the DMA transfer is done. | |
868 | */ | |
869 | static void msm_hs_dmov_tx_callback(struct msm_dmov_cmd *cmd_ptr, | |
870 | unsigned int result, | |
871 | struct msm_dmov_errdata *err) | |
872 | { | |
873 | unsigned long flags; | |
874 | struct msm_hs_port *msm_uport; | |
875 | ||
876 | /* DMA did not finish properly */ | |
877 | WARN_ON((((result & RSLT_FIFO_CNTR_BMSK) >> 28) == 1) && | |
878 | !(result & RSLT_VLD)); | |
879 | ||
880 | msm_uport = container_of(cmd_ptr, struct msm_hs_port, tx.xfer); | |
881 | ||
882 | spin_lock_irqsave(&msm_uport->uport.lock, flags); | |
883 | clk_enable(msm_uport->clk); | |
884 | ||
885 | msm_uport->imr_reg |= UARTDM_ISR_TX_READY_BMSK; | |
886 | msm_hs_write(&msm_uport->uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); | |
887 | ||
888 | clk_disable(msm_uport->clk); | |
889 | spin_unlock_irqrestore(&msm_uport->uport.lock, flags); | |
890 | } | |
891 | ||
892 | /* | |
893 | * This routine is called when we are done with a DMA transfer or the | |
894 | * a flush has been sent to the data mover driver. | |
895 | * | |
896 | * This routine is registered with Data mover when we set up a Data Mover | |
897 | * transfer. It is called from Data mover ISR when the DMA transfer is done. | |
898 | */ | |
899 | static void msm_hs_dmov_rx_callback(struct msm_dmov_cmd *cmd_ptr, | |
900 | unsigned int result, | |
901 | struct msm_dmov_errdata *err) | |
902 | { | |
903 | int retval; | |
904 | int rx_count; | |
905 | unsigned long status; | |
906 | unsigned int error_f = 0; | |
907 | unsigned long flags; | |
908 | unsigned int flush; | |
909 | struct tty_struct *tty; | |
910 | struct uart_port *uport; | |
911 | struct msm_hs_port *msm_uport; | |
912 | ||
913 | msm_uport = container_of(cmd_ptr, struct msm_hs_port, rx.xfer); | |
914 | uport = &msm_uport->uport; | |
915 | ||
916 | spin_lock_irqsave(&uport->lock, flags); | |
917 | clk_enable(msm_uport->clk); | |
918 | ||
919 | tty = uport->state->port.tty; | |
920 | ||
921 | msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE); | |
922 | ||
923 | status = msm_hs_read(uport, UARTDM_SR_ADDR); | |
924 | ||
925 | /* overflow is not connect to data in a FIFO */ | |
926 | if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) && | |
927 | (uport->read_status_mask & CREAD))) { | |
928 | tty_insert_flip_char(tty, 0, TTY_OVERRUN); | |
929 | uport->icount.buf_overrun++; | |
930 | error_f = 1; | |
931 | } | |
932 | ||
933 | if (!(uport->ignore_status_mask & INPCK)) | |
934 | status = status & ~(UARTDM_SR_PAR_FRAME_BMSK); | |
935 | ||
936 | if (unlikely(status & UARTDM_SR_PAR_FRAME_BMSK)) { | |
937 | /* Can not tell difference between parity & frame error */ | |
938 | uport->icount.parity++; | |
939 | error_f = 1; | |
940 | if (uport->ignore_status_mask & IGNPAR) | |
941 | tty_insert_flip_char(tty, 0, TTY_PARITY); | |
942 | } | |
943 | ||
944 | if (error_f) | |
945 | msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS); | |
946 | ||
947 | if (msm_uport->clk_req_off_state == CLK_REQ_OFF_FLUSH_ISSUED) | |
948 | msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_FLUSHED; | |
949 | ||
950 | flush = msm_uport->rx.flush; | |
951 | if (flush == FLUSH_IGNORE) | |
952 | msm_hs_start_rx_locked(uport); | |
953 | if (flush == FLUSH_STOP) | |
954 | msm_uport->rx.flush = FLUSH_SHUTDOWN; | |
955 | if (flush >= FLUSH_DATA_INVALID) | |
956 | goto out; | |
957 | ||
958 | rx_count = msm_hs_read(uport, UARTDM_RX_TOTAL_SNAP_ADDR); | |
959 | ||
960 | if (0 != (uport->read_status_mask & CREAD)) { | |
961 | retval = tty_insert_flip_string(tty, msm_uport->rx.buffer, | |
962 | rx_count); | |
963 | BUG_ON(retval != rx_count); | |
964 | } | |
965 | ||
966 | msm_hs_start_rx_locked(uport); | |
967 | ||
968 | out: | |
969 | clk_disable(msm_uport->clk); | |
970 | ||
971 | spin_unlock_irqrestore(&uport->lock, flags); | |
972 | ||
973 | if (flush < FLUSH_DATA_INVALID) | |
974 | queue_work(msm_hs_workqueue, &msm_uport->rx.tty_work); | |
975 | } | |
976 | ||
977 | static void msm_hs_tty_flip_buffer_work(struct work_struct *work) | |
978 | { | |
979 | struct msm_hs_port *msm_uport = | |
980 | container_of(work, struct msm_hs_port, rx.tty_work); | |
981 | struct tty_struct *tty = msm_uport->uport.state->port.tty; | |
982 | ||
983 | tty_flip_buffer_push(tty); | |
984 | } | |
985 | ||
986 | /* | |
987 | * Standard API, Current states of modem control inputs | |
988 | * | |
989 | * Since CTS can be handled entirely by HARDWARE we always | |
990 | * indicate clear to send and count on the TX FIFO to block when | |
991 | * it fills up. | |
992 | * | |
993 | * - TIOCM_DCD | |
994 | * - TIOCM_CTS | |
995 | * - TIOCM_DSR | |
996 | * - TIOCM_RI | |
997 | * (Unsupported) DCD and DSR will return them high. RI will return low. | |
998 | */ | |
999 | static unsigned int msm_hs_get_mctrl_locked(struct uart_port *uport) | |
1000 | { | |
1001 | return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS; | |
1002 | } | |
1003 | ||
1004 | /* | |
1005 | * True enables UART auto RFR, which indicates we are ready for data if the RX | |
1006 | * buffer is not full. False disables auto RFR, and deasserts RFR to indicate | |
1007 | * we are not ready for data. Must be called with UART clock on. | |
1008 | */ | |
1009 | static void set_rfr_locked(struct uart_port *uport, int auto_rfr) | |
1010 | { | |
1011 | unsigned int data; | |
1012 | ||
1013 | data = msm_hs_read(uport, UARTDM_MR1_ADDR); | |
1014 | ||
1015 | if (auto_rfr) { | |
1016 | /* enable auto ready-for-receiving */ | |
1017 | data |= UARTDM_MR1_RX_RDY_CTL_BMSK; | |
1018 | msm_hs_write(uport, UARTDM_MR1_ADDR, data); | |
1019 | } else { | |
1020 | /* disable auto ready-for-receiving */ | |
1021 | data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK; | |
1022 | msm_hs_write(uport, UARTDM_MR1_ADDR, data); | |
1023 | /* RFR is active low, set high */ | |
1024 | msm_hs_write(uport, UARTDM_CR_ADDR, RFR_HIGH); | |
1025 | } | |
1026 | } | |
1027 | ||
1028 | /* | |
1029 | * Standard API, used to set or clear RFR | |
1030 | */ | |
1031 | static void msm_hs_set_mctrl_locked(struct uart_port *uport, | |
1032 | unsigned int mctrl) | |
1033 | { | |
1034 | unsigned int auto_rfr; | |
1035 | struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); | |
1036 | ||
1037 | clk_enable(msm_uport->clk); | |
1038 | ||
1039 | auto_rfr = TIOCM_RTS & mctrl ? 1 : 0; | |
1040 | set_rfr_locked(uport, auto_rfr); | |
1041 | ||
1042 | clk_disable(msm_uport->clk); | |
1043 | } | |
1044 | ||
1045 | /* Standard API, Enable modem status (CTS) interrupt */ | |
1046 | static void msm_hs_enable_ms_locked(struct uart_port *uport) | |
1047 | { | |
1048 | struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); | |
1049 | ||
1050 | clk_enable(msm_uport->clk); | |
1051 | ||
1052 | /* Enable DELTA_CTS Interrupt */ | |
1053 | msm_uport->imr_reg |= UARTDM_ISR_DELTA_CTS_BMSK; | |
1054 | msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); | |
1055 | ||
1056 | clk_disable(msm_uport->clk); | |
1057 | ||
1058 | } | |
1059 | ||
1060 | /* | |
1061 | * Standard API, Break Signal | |
1062 | * | |
1063 | * Control the transmission of a break signal. ctl eq 0 => break | |
1064 | * signal terminate ctl ne 0 => start break signal | |
1065 | */ | |
1066 | static void msm_hs_break_ctl(struct uart_port *uport, int ctl) | |
1067 | { | |
1068 | struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); | |
1069 | ||
1070 | clk_enable(msm_uport->clk); | |
1071 | msm_hs_write(uport, UARTDM_CR_ADDR, ctl ? START_BREAK : STOP_BREAK); | |
1072 | clk_disable(msm_uport->clk); | |
1073 | } | |
1074 | ||
1075 | static void msm_hs_config_port(struct uart_port *uport, int cfg_flags) | |
1076 | { | |
1077 | unsigned long flags; | |
1078 | ||
1079 | spin_lock_irqsave(&uport->lock, flags); | |
1080 | if (cfg_flags & UART_CONFIG_TYPE) { | |
1081 | uport->type = PORT_MSM; | |
1082 | msm_hs_request_port(uport); | |
1083 | } | |
1084 | spin_unlock_irqrestore(&uport->lock, flags); | |
1085 | } | |
1086 | ||
1087 | /* Handle CTS changes (Called from interrupt handler) */ | |
1088 | static void msm_hs_handle_delta_cts(struct uart_port *uport) | |
1089 | { | |
1090 | unsigned long flags; | |
1091 | struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); | |
1092 | ||
1093 | spin_lock_irqsave(&uport->lock, flags); | |
1094 | clk_enable(msm_uport->clk); | |
1095 | ||
1096 | /* clear interrupt */ | |
1097 | msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS); | |
1098 | uport->icount.cts++; | |
1099 | ||
1100 | clk_disable(msm_uport->clk); | |
1101 | spin_unlock_irqrestore(&uport->lock, flags); | |
1102 | ||
1103 | /* clear the IOCTL TIOCMIWAIT if called */ | |
1104 | wake_up_interruptible(&uport->state->port.delta_msr_wait); | |
1105 | } | |
1106 | ||
1107 | /* check if the TX path is flushed, and if so clock off | |
1108 | * returns 0 did not clock off, need to retry (still sending final byte) | |
1109 | * -1 did not clock off, do not retry | |
1110 | * 1 if we clocked off | |
1111 | */ | |
1112 | static int msm_hs_check_clock_off_locked(struct uart_port *uport) | |
1113 | { | |
1114 | unsigned long sr_status; | |
1115 | struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); | |
1116 | struct circ_buf *tx_buf = &uport->state->xmit; | |
1117 | ||
1118 | /* Cancel if tx tty buffer is not empty, dma is in flight, | |
1119 | * or tx fifo is not empty, or rx fifo is not empty */ | |
1120 | if (msm_uport->clk_state != MSM_HS_CLK_REQUEST_OFF || | |
1121 | !uart_circ_empty(tx_buf) || msm_uport->tx.dma_in_flight || | |
1122 | (msm_uport->imr_reg & UARTDM_ISR_TXLEV_BMSK) || | |
1123 | !(msm_uport->imr_reg & UARTDM_ISR_RXLEV_BMSK)) { | |
1124 | return -1; | |
1125 | } | |
1126 | ||
1127 | /* Make sure the uart is finished with the last byte */ | |
1128 | sr_status = msm_hs_read(uport, UARTDM_SR_ADDR); | |
1129 | if (!(sr_status & UARTDM_SR_TXEMT_BMSK)) | |
1130 | return 0; /* retry */ | |
1131 | ||
1132 | /* Make sure forced RXSTALE flush complete */ | |
1133 | switch (msm_uport->clk_req_off_state) { | |
1134 | case CLK_REQ_OFF_START: | |
1135 | msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_ISSUED; | |
1136 | msm_hs_write(uport, UARTDM_CR_ADDR, FORCE_STALE_EVENT); | |
1137 | return 0; /* RXSTALE flush not complete - retry */ | |
1138 | case CLK_REQ_OFF_RXSTALE_ISSUED: | |
1139 | case CLK_REQ_OFF_FLUSH_ISSUED: | |
1140 | return 0; /* RXSTALE flush not complete - retry */ | |
1141 | case CLK_REQ_OFF_RXSTALE_FLUSHED: | |
1142 | break; /* continue */ | |
1143 | } | |
1144 | ||
1145 | if (msm_uport->rx.flush != FLUSH_SHUTDOWN) { | |
1146 | if (msm_uport->rx.flush == FLUSH_NONE) | |
1147 | msm_hs_stop_rx_locked(uport); | |
1148 | return 0; /* come back later to really clock off */ | |
1149 | } | |
1150 | ||
1151 | /* we really want to clock off */ | |
1152 | clk_disable(msm_uport->clk); | |
1153 | msm_uport->clk_state = MSM_HS_CLK_OFF; | |
1154 | ||
1155 | if (use_low_power_rx_wakeup(msm_uport)) { | |
1156 | msm_uport->rx_wakeup.ignore = 1; | |
1157 | enable_irq(msm_uport->rx_wakeup.irq); | |
1158 | } | |
1159 | return 1; | |
1160 | } | |
1161 | ||
1162 | static enum hrtimer_restart msm_hs_clk_off_retry(struct hrtimer *timer) | |
1163 | { | |
1164 | unsigned long flags; | |
1165 | int ret = HRTIMER_NORESTART; | |
1166 | struct msm_hs_port *msm_uport = container_of(timer, struct msm_hs_port, | |
1167 | clk_off_timer); | |
1168 | struct uart_port *uport = &msm_uport->uport; | |
1169 | ||
1170 | spin_lock_irqsave(&uport->lock, flags); | |
1171 | ||
1172 | if (!msm_hs_check_clock_off_locked(uport)) { | |
1173 | hrtimer_forward_now(timer, msm_uport->clk_off_delay); | |
1174 | ret = HRTIMER_RESTART; | |
1175 | } | |
1176 | ||
1177 | spin_unlock_irqrestore(&uport->lock, flags); | |
1178 | ||
1179 | return ret; | |
1180 | } | |
1181 | ||
1182 | static irqreturn_t msm_hs_isr(int irq, void *dev) | |
1183 | { | |
1184 | unsigned long flags; | |
1185 | unsigned long isr_status; | |
1186 | struct msm_hs_port *msm_uport = dev; | |
1187 | struct uart_port *uport = &msm_uport->uport; | |
1188 | struct circ_buf *tx_buf = &uport->state->xmit; | |
1189 | struct msm_hs_tx *tx = &msm_uport->tx; | |
1190 | struct msm_hs_rx *rx = &msm_uport->rx; | |
1191 | ||
1192 | spin_lock_irqsave(&uport->lock, flags); | |
1193 | ||
1194 | isr_status = msm_hs_read(uport, UARTDM_MISR_ADDR); | |
1195 | ||
1196 | /* Uart RX starting */ | |
1197 | if (isr_status & UARTDM_ISR_RXLEV_BMSK) { | |
1198 | msm_uport->imr_reg &= ~UARTDM_ISR_RXLEV_BMSK; | |
1199 | msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); | |
1200 | } | |
1201 | /* Stale rx interrupt */ | |
1202 | if (isr_status & UARTDM_ISR_RXSTALE_BMSK) { | |
1203 | msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE); | |
1204 | msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT); | |
1205 | ||
1206 | if (msm_uport->clk_req_off_state == CLK_REQ_OFF_RXSTALE_ISSUED) | |
1207 | msm_uport->clk_req_off_state = | |
1208 | CLK_REQ_OFF_FLUSH_ISSUED; | |
1209 | if (rx->flush == FLUSH_NONE) { | |
1210 | rx->flush = FLUSH_DATA_READY; | |
1211 | msm_dmov_stop_cmd(msm_uport->dma_rx_channel, NULL, 1); | |
1212 | } | |
1213 | } | |
1214 | /* tx ready interrupt */ | |
1215 | if (isr_status & UARTDM_ISR_TX_READY_BMSK) { | |
1216 | /* Clear TX Ready */ | |
1217 | msm_hs_write(uport, UARTDM_CR_ADDR, CLEAR_TX_READY); | |
1218 | ||
1219 | if (msm_uport->clk_state == MSM_HS_CLK_REQUEST_OFF) { | |
1220 | msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK; | |
1221 | msm_hs_write(uport, UARTDM_IMR_ADDR, | |
1222 | msm_uport->imr_reg); | |
1223 | } | |
1224 | ||
1225 | /* Complete DMA TX transactions and submit new transactions */ | |
1226 | tx_buf->tail = (tx_buf->tail + tx->tx_count) & ~UART_XMIT_SIZE; | |
1227 | ||
1228 | tx->dma_in_flight = 0; | |
1229 | ||
1230 | uport->icount.tx += tx->tx_count; | |
1231 | if (tx->tx_ready_int_en) | |
1232 | msm_hs_submit_tx_locked(uport); | |
1233 | ||
1234 | if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS) | |
1235 | uart_write_wakeup(uport); | |
1236 | } | |
1237 | if (isr_status & UARTDM_ISR_TXLEV_BMSK) { | |
1238 | /* TX FIFO is empty */ | |
1239 | msm_uport->imr_reg &= ~UARTDM_ISR_TXLEV_BMSK; | |
1240 | msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); | |
1241 | if (!msm_hs_check_clock_off_locked(uport)) | |
1242 | hrtimer_start(&msm_uport->clk_off_timer, | |
1243 | msm_uport->clk_off_delay, | |
1244 | HRTIMER_MODE_REL); | |
1245 | } | |
1246 | ||
1247 | /* Change in CTS interrupt */ | |
1248 | if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK) | |
1249 | msm_hs_handle_delta_cts(uport); | |
1250 | ||
1251 | spin_unlock_irqrestore(&uport->lock, flags); | |
1252 | ||
1253 | return IRQ_HANDLED; | |
1254 | } | |
1255 | ||
1256 | void msm_hs_request_clock_off_locked(struct uart_port *uport) | |
1257 | { | |
1258 | struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); | |
1259 | ||
1260 | if (msm_uport->clk_state == MSM_HS_CLK_ON) { | |
1261 | msm_uport->clk_state = MSM_HS_CLK_REQUEST_OFF; | |
1262 | msm_uport->clk_req_off_state = CLK_REQ_OFF_START; | |
1263 | if (!use_low_power_rx_wakeup(msm_uport)) | |
1264 | set_rfr_locked(uport, 0); | |
1265 | msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK; | |
1266 | msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); | |
1267 | } | |
1268 | } | |
1269 | ||
1270 | /** | |
1271 | * msm_hs_request_clock_off - request to (i.e. asynchronously) turn off uart | |
1272 | * clock once pending TX is flushed and Rx DMA command is terminated. | |
1273 | * @uport: uart_port structure for the device instance. | |
1274 | * | |
1275 | * This functions puts the device into a partially active low power mode. It | |
1276 | * waits to complete all pending tx transactions, flushes ongoing Rx DMA | |
1277 | * command and terminates UART side Rx transaction, puts UART HW in non DMA | |
1278 | * mode and then clocks off the device. A client calls this when no UART | |
1279 | * data is expected. msm_request_clock_on() must be called before any further | |
1280 | * UART can be sent or received. | |
1281 | */ | |
1282 | void msm_hs_request_clock_off(struct uart_port *uport) | |
1283 | { | |
1284 | unsigned long flags; | |
1285 | ||
1286 | spin_lock_irqsave(&uport->lock, flags); | |
1287 | msm_hs_request_clock_off_locked(uport); | |
1288 | spin_unlock_irqrestore(&uport->lock, flags); | |
1289 | } | |
1290 | ||
1291 | void msm_hs_request_clock_on_locked(struct uart_port *uport) | |
1292 | { | |
1293 | struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); | |
1294 | unsigned int data; | |
1295 | ||
1296 | switch (msm_uport->clk_state) { | |
1297 | case MSM_HS_CLK_OFF: | |
1298 | clk_enable(msm_uport->clk); | |
1299 | disable_irq_nosync(msm_uport->rx_wakeup.irq); | |
1300 | /* fall-through */ | |
1301 | case MSM_HS_CLK_REQUEST_OFF: | |
1302 | if (msm_uport->rx.flush == FLUSH_STOP || | |
1303 | msm_uport->rx.flush == FLUSH_SHUTDOWN) { | |
1304 | msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX); | |
1305 | data = msm_hs_read(uport, UARTDM_DMEN_ADDR); | |
1306 | data |= UARTDM_RX_DM_EN_BMSK; | |
1307 | msm_hs_write(uport, UARTDM_DMEN_ADDR, data); | |
1308 | } | |
1309 | hrtimer_try_to_cancel(&msm_uport->clk_off_timer); | |
1310 | if (msm_uport->rx.flush == FLUSH_SHUTDOWN) | |
1311 | msm_hs_start_rx_locked(uport); | |
1312 | if (!use_low_power_rx_wakeup(msm_uport)) | |
1313 | set_rfr_locked(uport, 1); | |
1314 | if (msm_uport->rx.flush == FLUSH_STOP) | |
1315 | msm_uport->rx.flush = FLUSH_IGNORE; | |
1316 | msm_uport->clk_state = MSM_HS_CLK_ON; | |
1317 | break; | |
1318 | case MSM_HS_CLK_ON: | |
1319 | break; | |
1320 | case MSM_HS_CLK_PORT_OFF: | |
1321 | break; | |
1322 | } | |
1323 | } | |
1324 | ||
1325 | /** | |
1326 | * msm_hs_request_clock_on - Switch the device from partially active low | |
1327 | * power mode to fully active (i.e. clock on) mode. | |
1328 | * @uport: uart_port structure for the device. | |
1329 | * | |
1330 | * This function switches on the input clock, puts UART HW into DMA mode | |
1331 | * and enqueues an Rx DMA command if the device was in partially active | |
1332 | * mode. It has no effect if called with the device in inactive state. | |
1333 | */ | |
1334 | void msm_hs_request_clock_on(struct uart_port *uport) | |
1335 | { | |
1336 | unsigned long flags; | |
1337 | ||
1338 | spin_lock_irqsave(&uport->lock, flags); | |
1339 | msm_hs_request_clock_on_locked(uport); | |
1340 | spin_unlock_irqrestore(&uport->lock, flags); | |
1341 | } | |
1342 | ||
1343 | static irqreturn_t msm_hs_rx_wakeup_isr(int irq, void *dev) | |
1344 | { | |
1345 | unsigned int wakeup = 0; | |
1346 | unsigned long flags; | |
1347 | struct msm_hs_port *msm_uport = dev; | |
1348 | struct uart_port *uport = &msm_uport->uport; | |
1349 | struct tty_struct *tty = NULL; | |
1350 | ||
1351 | spin_lock_irqsave(&uport->lock, flags); | |
1352 | if (msm_uport->clk_state == MSM_HS_CLK_OFF) { | |
25985edc | 1353 | /* ignore the first irq - it is a pending irq that occurred |
55046237 MR |
1354 | * before enable_irq() */ |
1355 | if (msm_uport->rx_wakeup.ignore) | |
1356 | msm_uport->rx_wakeup.ignore = 0; | |
1357 | else | |
1358 | wakeup = 1; | |
1359 | } | |
1360 | ||
1361 | if (wakeup) { | |
1362 | /* the uart was clocked off during an rx, wake up and | |
1363 | * optionally inject char into tty rx */ | |
1364 | msm_hs_request_clock_on_locked(uport); | |
1365 | if (msm_uport->rx_wakeup.inject_rx) { | |
1366 | tty = uport->state->port.tty; | |
1367 | tty_insert_flip_char(tty, | |
1368 | msm_uport->rx_wakeup.rx_to_inject, | |
1369 | TTY_NORMAL); | |
1370 | queue_work(msm_hs_workqueue, &msm_uport->rx.tty_work); | |
1371 | } | |
1372 | } | |
1373 | ||
1374 | spin_unlock_irqrestore(&uport->lock, flags); | |
1375 | ||
1376 | return IRQ_HANDLED; | |
1377 | } | |
1378 | ||
1379 | static const char *msm_hs_type(struct uart_port *port) | |
1380 | { | |
1381 | return (port->type == PORT_MSM) ? "MSM_HS_UART" : NULL; | |
1382 | } | |
1383 | ||
1384 | /* Called when port is opened */ | |
1385 | static int msm_hs_startup(struct uart_port *uport) | |
1386 | { | |
1387 | int ret; | |
1388 | int rfr_level; | |
1389 | unsigned long flags; | |
1390 | unsigned int data; | |
1391 | struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); | |
1392 | struct circ_buf *tx_buf = &uport->state->xmit; | |
1393 | struct msm_hs_tx *tx = &msm_uport->tx; | |
1394 | struct msm_hs_rx *rx = &msm_uport->rx; | |
1395 | ||
1396 | rfr_level = uport->fifosize; | |
1397 | if (rfr_level > 16) | |
1398 | rfr_level -= 16; | |
1399 | ||
1400 | tx->dma_base = dma_map_single(uport->dev, tx_buf->buf, UART_XMIT_SIZE, | |
1401 | DMA_TO_DEVICE); | |
1402 | ||
1403 | /* do not let tty layer execute RX in global workqueue, use a | |
1404 | * dedicated workqueue managed by this driver */ | |
1405 | uport->state->port.tty->low_latency = 1; | |
1406 | ||
1407 | /* turn on uart clk */ | |
1408 | ret = msm_hs_init_clk_locked(uport); | |
1409 | if (unlikely(ret)) { | |
1410 | printk(KERN_ERR "Turning uartclk failed!\n"); | |
1411 | goto err_msm_hs_init_clk; | |
1412 | } | |
1413 | ||
1414 | /* Set auto RFR Level */ | |
1415 | data = msm_hs_read(uport, UARTDM_MR1_ADDR); | |
1416 | data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK; | |
1417 | data &= ~UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK; | |
1418 | data |= (UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK & (rfr_level << 2)); | |
1419 | data |= (UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK & rfr_level); | |
1420 | msm_hs_write(uport, UARTDM_MR1_ADDR, data); | |
1421 | ||
1422 | /* Make sure RXSTALE count is non-zero */ | |
1423 | data = msm_hs_read(uport, UARTDM_IPR_ADDR); | |
1424 | if (!data) { | |
1425 | data |= 0x1f & UARTDM_IPR_STALE_LSB_BMSK; | |
1426 | msm_hs_write(uport, UARTDM_IPR_ADDR, data); | |
1427 | } | |
1428 | ||
1429 | /* Enable Data Mover Mode */ | |
1430 | data = UARTDM_TX_DM_EN_BMSK | UARTDM_RX_DM_EN_BMSK; | |
1431 | msm_hs_write(uport, UARTDM_DMEN_ADDR, data); | |
1432 | ||
1433 | /* Reset TX */ | |
1434 | msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX); | |
1435 | msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX); | |
1436 | msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS); | |
1437 | msm_hs_write(uport, UARTDM_CR_ADDR, RESET_BREAK_INT); | |
1438 | msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT); | |
1439 | msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS); | |
1440 | msm_hs_write(uport, UARTDM_CR_ADDR, RFR_LOW); | |
1441 | /* Turn on Uart Receiver */ | |
1442 | msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_EN_BMSK); | |
1443 | ||
1444 | /* Turn on Uart Transmitter */ | |
1445 | msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_EN_BMSK); | |
1446 | ||
1447 | /* Initialize the tx */ | |
1448 | tx->tx_ready_int_en = 0; | |
1449 | tx->dma_in_flight = 0; | |
1450 | ||
1451 | tx->xfer.complete_func = msm_hs_dmov_tx_callback; | |
1452 | tx->xfer.execute_func = NULL; | |
1453 | ||
1454 | tx->command_ptr->cmd = CMD_LC | | |
1455 | CMD_DST_CRCI(msm_uport->dma_tx_crci) | CMD_MODE_BOX; | |
1456 | ||
1457 | tx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16) | |
1458 | | (MSM_UARTDM_BURST_SIZE); | |
1459 | ||
1460 | tx->command_ptr->row_offset = (MSM_UARTDM_BURST_SIZE << 16); | |
1461 | ||
1462 | tx->command_ptr->dst_row_addr = | |
1463 | msm_uport->uport.mapbase + UARTDM_TF_ADDR; | |
1464 | ||
1465 | ||
1466 | /* Turn on Uart Receive */ | |
1467 | rx->xfer.complete_func = msm_hs_dmov_rx_callback; | |
1468 | rx->xfer.execute_func = NULL; | |
1469 | ||
1470 | rx->command_ptr->cmd = CMD_LC | | |
1471 | CMD_SRC_CRCI(msm_uport->dma_rx_crci) | CMD_MODE_BOX; | |
1472 | ||
1473 | rx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16) | |
1474 | | (MSM_UARTDM_BURST_SIZE); | |
1475 | rx->command_ptr->row_offset = MSM_UARTDM_BURST_SIZE; | |
1476 | rx->command_ptr->src_row_addr = uport->mapbase + UARTDM_RF_ADDR; | |
1477 | ||
1478 | ||
1479 | msm_uport->imr_reg |= UARTDM_ISR_RXSTALE_BMSK; | |
1480 | /* Enable reading the current CTS, no harm even if CTS is ignored */ | |
1481 | msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK; | |
1482 | ||
1483 | msm_hs_write(uport, UARTDM_TFWR_ADDR, 0); /* TXLEV on empty TX fifo */ | |
1484 | ||
1485 | ||
1486 | ret = request_irq(uport->irq, msm_hs_isr, IRQF_TRIGGER_HIGH, | |
1487 | "msm_hs_uart", msm_uport); | |
1488 | if (unlikely(ret)) { | |
1489 | printk(KERN_ERR "Request msm_hs_uart IRQ failed!\n"); | |
1490 | goto err_request_irq; | |
1491 | } | |
1492 | if (use_low_power_rx_wakeup(msm_uport)) { | |
1493 | ret = request_irq(msm_uport->rx_wakeup.irq, | |
1494 | msm_hs_rx_wakeup_isr, | |
1495 | IRQF_TRIGGER_FALLING, | |
1496 | "msm_hs_rx_wakeup", msm_uport); | |
1497 | if (unlikely(ret)) { | |
1498 | printk(KERN_ERR "Request msm_hs_rx_wakeup IRQ failed!\n"); | |
1499 | free_irq(uport->irq, msm_uport); | |
1500 | goto err_request_irq; | |
1501 | } | |
1502 | disable_irq(msm_uport->rx_wakeup.irq); | |
1503 | } | |
1504 | ||
1505 | spin_lock_irqsave(&uport->lock, flags); | |
1506 | ||
1507 | msm_hs_write(uport, UARTDM_RFWR_ADDR, 0); | |
1508 | msm_hs_start_rx_locked(uport); | |
1509 | ||
1510 | spin_unlock_irqrestore(&uport->lock, flags); | |
1511 | ret = pm_runtime_set_active(uport->dev); | |
1512 | if (ret) | |
1513 | dev_err(uport->dev, "set active error:%d\n", ret); | |
1514 | pm_runtime_enable(uport->dev); | |
1515 | ||
1516 | return 0; | |
1517 | ||
1518 | err_request_irq: | |
1519 | err_msm_hs_init_clk: | |
1520 | dma_unmap_single(uport->dev, tx->dma_base, | |
1521 | UART_XMIT_SIZE, DMA_TO_DEVICE); | |
1522 | return ret; | |
1523 | } | |
1524 | ||
1525 | /* Initialize tx and rx data structures */ | |
1526 | static int __devinit uartdm_init_port(struct uart_port *uport) | |
1527 | { | |
1528 | int ret = 0; | |
1529 | struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); | |
1530 | struct msm_hs_tx *tx = &msm_uport->tx; | |
1531 | struct msm_hs_rx *rx = &msm_uport->rx; | |
1532 | ||
1533 | /* Allocate the command pointer. Needs to be 64 bit aligned */ | |
1534 | tx->command_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA); | |
1535 | if (!tx->command_ptr) | |
1536 | return -ENOMEM; | |
1537 | ||
1538 | tx->command_ptr_ptr = kmalloc(sizeof(u32 *), GFP_KERNEL | __GFP_DMA); | |
1539 | if (!tx->command_ptr_ptr) { | |
1540 | ret = -ENOMEM; | |
1541 | goto err_tx_command_ptr_ptr; | |
1542 | } | |
1543 | ||
1544 | tx->mapped_cmd_ptr = dma_map_single(uport->dev, tx->command_ptr, | |
1545 | sizeof(dmov_box), DMA_TO_DEVICE); | |
1546 | tx->mapped_cmd_ptr_ptr = dma_map_single(uport->dev, | |
1547 | tx->command_ptr_ptr, | |
1548 | sizeof(u32 *), DMA_TO_DEVICE); | |
1549 | tx->xfer.cmdptr = DMOV_CMD_ADDR(tx->mapped_cmd_ptr_ptr); | |
1550 | ||
1551 | init_waitqueue_head(&rx->wait); | |
1552 | ||
1553 | rx->pool = dma_pool_create("rx_buffer_pool", uport->dev, | |
1554 | UARTDM_RX_BUF_SIZE, 16, 0); | |
1555 | if (!rx->pool) { | |
1556 | pr_err("%s(): cannot allocate rx_buffer_pool", __func__); | |
1557 | ret = -ENOMEM; | |
1558 | goto err_dma_pool_create; | |
1559 | } | |
1560 | ||
1561 | rx->buffer = dma_pool_alloc(rx->pool, GFP_KERNEL, &rx->rbuffer); | |
1562 | if (!rx->buffer) { | |
1563 | pr_err("%s(): cannot allocate rx->buffer", __func__); | |
1564 | ret = -ENOMEM; | |
1565 | goto err_dma_pool_alloc; | |
1566 | } | |
1567 | ||
1568 | /* Allocate the command pointer. Needs to be 64 bit aligned */ | |
1569 | rx->command_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA); | |
1570 | if (!rx->command_ptr) { | |
1571 | pr_err("%s(): cannot allocate rx->command_ptr", __func__); | |
1572 | ret = -ENOMEM; | |
1573 | goto err_rx_command_ptr; | |
1574 | } | |
1575 | ||
1576 | rx->command_ptr_ptr = kmalloc(sizeof(u32 *), GFP_KERNEL | __GFP_DMA); | |
1577 | if (!rx->command_ptr_ptr) { | |
1578 | pr_err("%s(): cannot allocate rx->command_ptr_ptr", __func__); | |
1579 | ret = -ENOMEM; | |
1580 | goto err_rx_command_ptr_ptr; | |
1581 | } | |
1582 | ||
1583 | rx->command_ptr->num_rows = ((UARTDM_RX_BUF_SIZE >> 4) << 16) | | |
1584 | (UARTDM_RX_BUF_SIZE >> 4); | |
1585 | ||
1586 | rx->command_ptr->dst_row_addr = rx->rbuffer; | |
1587 | ||
1588 | rx->mapped_cmd_ptr = dma_map_single(uport->dev, rx->command_ptr, | |
1589 | sizeof(dmov_box), DMA_TO_DEVICE); | |
1590 | ||
1591 | *rx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(rx->mapped_cmd_ptr); | |
1592 | ||
1593 | rx->cmdptr_dmaaddr = dma_map_single(uport->dev, rx->command_ptr_ptr, | |
1594 | sizeof(u32 *), DMA_TO_DEVICE); | |
1595 | rx->xfer.cmdptr = DMOV_CMD_ADDR(rx->cmdptr_dmaaddr); | |
1596 | ||
1597 | INIT_WORK(&rx->tty_work, msm_hs_tty_flip_buffer_work); | |
1598 | ||
1599 | return ret; | |
1600 | ||
1601 | err_rx_command_ptr_ptr: | |
1602 | kfree(rx->command_ptr); | |
1603 | err_rx_command_ptr: | |
1604 | dma_pool_free(msm_uport->rx.pool, msm_uport->rx.buffer, | |
1605 | msm_uport->rx.rbuffer); | |
1606 | err_dma_pool_alloc: | |
1607 | dma_pool_destroy(msm_uport->rx.pool); | |
1608 | err_dma_pool_create: | |
1609 | dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr_ptr, | |
1610 | sizeof(u32 *), DMA_TO_DEVICE); | |
1611 | dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr, | |
1612 | sizeof(dmov_box), DMA_TO_DEVICE); | |
1613 | kfree(msm_uport->tx.command_ptr_ptr); | |
1614 | err_tx_command_ptr_ptr: | |
1615 | kfree(msm_uport->tx.command_ptr); | |
1616 | return ret; | |
1617 | } | |
1618 | ||
1619 | static int __devinit msm_hs_probe(struct platform_device *pdev) | |
1620 | { | |
1621 | int ret; | |
1622 | struct uart_port *uport; | |
1623 | struct msm_hs_port *msm_uport; | |
1624 | struct resource *resource; | |
1625 | const struct msm_serial_hs_platform_data *pdata = | |
1626 | pdev->dev.platform_data; | |
1627 | ||
1628 | if (pdev->id < 0 || pdev->id >= UARTDM_NR) { | |
1629 | printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id); | |
1630 | return -EINVAL; | |
1631 | } | |
1632 | ||
1633 | msm_uport = &q_uart_port[pdev->id]; | |
1634 | uport = &msm_uport->uport; | |
1635 | ||
1636 | uport->dev = &pdev->dev; | |
1637 | ||
1638 | resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1639 | if (unlikely(!resource)) | |
1640 | return -ENXIO; | |
1641 | ||
1642 | uport->mapbase = resource->start; | |
1643 | uport->irq = platform_get_irq(pdev, 0); | |
1644 | if (unlikely(uport->irq < 0)) | |
1645 | return -ENXIO; | |
1646 | ||
dced35ae | 1647 | if (unlikely(irq_set_irq_wake(uport->irq, 1))) |
55046237 MR |
1648 | return -ENXIO; |
1649 | ||
1650 | if (pdata == NULL || pdata->rx_wakeup_irq < 0) | |
1651 | msm_uport->rx_wakeup.irq = -1; | |
1652 | else { | |
1653 | msm_uport->rx_wakeup.irq = pdata->rx_wakeup_irq; | |
1654 | msm_uport->rx_wakeup.ignore = 1; | |
1655 | msm_uport->rx_wakeup.inject_rx = pdata->inject_rx_on_wakeup; | |
1656 | msm_uport->rx_wakeup.rx_to_inject = pdata->rx_to_inject; | |
1657 | ||
1658 | if (unlikely(msm_uport->rx_wakeup.irq < 0)) | |
1659 | return -ENXIO; | |
1660 | ||
dced35ae | 1661 | if (unlikely(irq_set_irq_wake(msm_uport->rx_wakeup.irq, 1))) |
55046237 MR |
1662 | return -ENXIO; |
1663 | } | |
1664 | ||
1665 | if (pdata == NULL) | |
1666 | msm_uport->exit_lpm_cb = NULL; | |
1667 | else | |
1668 | msm_uport->exit_lpm_cb = pdata->exit_lpm_cb; | |
1669 | ||
1670 | resource = platform_get_resource_byname(pdev, IORESOURCE_DMA, | |
1671 | "uartdm_channels"); | |
1672 | if (unlikely(!resource)) | |
1673 | return -ENXIO; | |
1674 | ||
1675 | msm_uport->dma_tx_channel = resource->start; | |
1676 | msm_uport->dma_rx_channel = resource->end; | |
1677 | ||
1678 | resource = platform_get_resource_byname(pdev, IORESOURCE_DMA, | |
1679 | "uartdm_crci"); | |
1680 | if (unlikely(!resource)) | |
1681 | return -ENXIO; | |
1682 | ||
1683 | msm_uport->dma_tx_crci = resource->start; | |
1684 | msm_uport->dma_rx_crci = resource->end; | |
1685 | ||
1686 | uport->iotype = UPIO_MEM; | |
1687 | uport->fifosize = UART_FIFOSIZE; | |
1688 | uport->ops = &msm_hs_ops; | |
1689 | uport->flags = UPF_BOOT_AUTOCONF; | |
1690 | uport->uartclk = UARTCLK; | |
1691 | msm_uport->imr_reg = 0x0; | |
1692 | msm_uport->clk = clk_get(&pdev->dev, "uartdm_clk"); | |
1693 | if (IS_ERR(msm_uport->clk)) | |
1694 | return PTR_ERR(msm_uport->clk); | |
1695 | ||
1696 | ret = uartdm_init_port(uport); | |
1697 | if (unlikely(ret)) | |
1698 | return ret; | |
1699 | ||
1700 | msm_uport->clk_state = MSM_HS_CLK_PORT_OFF; | |
1701 | hrtimer_init(&msm_uport->clk_off_timer, CLOCK_MONOTONIC, | |
1702 | HRTIMER_MODE_REL); | |
1703 | msm_uport->clk_off_timer.function = msm_hs_clk_off_retry; | |
1704 | msm_uport->clk_off_delay = ktime_set(0, 1000000); /* 1ms */ | |
1705 | ||
1706 | uport->line = pdev->id; | |
1707 | return uart_add_one_port(&msm_hs_driver, uport); | |
1708 | } | |
1709 | ||
1710 | static int __init msm_serial_hs_init(void) | |
1711 | { | |
1712 | int ret, i; | |
1713 | ||
1714 | /* Init all UARTS as non-configured */ | |
1715 | for (i = 0; i < UARTDM_NR; i++) | |
1716 | q_uart_port[i].uport.type = PORT_UNKNOWN; | |
1717 | ||
1718 | msm_hs_workqueue = create_singlethread_workqueue("msm_serial_hs"); | |
1719 | if (unlikely(!msm_hs_workqueue)) | |
1720 | return -ENOMEM; | |
1721 | ||
1722 | ret = uart_register_driver(&msm_hs_driver); | |
1723 | if (unlikely(ret)) { | |
1724 | printk(KERN_ERR "%s failed to load\n", __func__); | |
1725 | goto err_uart_register_driver; | |
1726 | } | |
1727 | ||
1728 | ret = platform_driver_register(&msm_serial_hs_platform_driver); | |
1729 | if (ret) { | |
1730 | printk(KERN_ERR "%s failed to load\n", __func__); | |
1731 | goto err_platform_driver_register; | |
1732 | } | |
1733 | ||
1734 | return ret; | |
1735 | ||
1736 | err_platform_driver_register: | |
1737 | uart_unregister_driver(&msm_hs_driver); | |
1738 | err_uart_register_driver: | |
1739 | destroy_workqueue(msm_hs_workqueue); | |
1740 | return ret; | |
1741 | } | |
1742 | module_init(msm_serial_hs_init); | |
1743 | ||
1744 | /* | |
1745 | * Called by the upper layer when port is closed. | |
1746 | * - Disables the port | |
1747 | * - Unhook the ISR | |
1748 | */ | |
1749 | static void msm_hs_shutdown(struct uart_port *uport) | |
1750 | { | |
1751 | unsigned long flags; | |
1752 | struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport); | |
1753 | ||
1754 | BUG_ON(msm_uport->rx.flush < FLUSH_STOP); | |
1755 | ||
1756 | spin_lock_irqsave(&uport->lock, flags); | |
1757 | clk_enable(msm_uport->clk); | |
1758 | ||
1759 | /* Disable the transmitter */ | |
1760 | msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_DISABLE_BMSK); | |
1761 | /* Disable the receiver */ | |
1762 | msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_DISABLE_BMSK); | |
1763 | ||
1764 | pm_runtime_disable(uport->dev); | |
1765 | pm_runtime_set_suspended(uport->dev); | |
1766 | ||
1767 | /* Free the interrupt */ | |
1768 | free_irq(uport->irq, msm_uport); | |
1769 | if (use_low_power_rx_wakeup(msm_uport)) | |
1770 | free_irq(msm_uport->rx_wakeup.irq, msm_uport); | |
1771 | ||
1772 | msm_uport->imr_reg = 0; | |
1773 | msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg); | |
1774 | ||
1775 | wait_event(msm_uport->rx.wait, msm_uport->rx.flush == FLUSH_SHUTDOWN); | |
1776 | ||
1777 | clk_disable(msm_uport->clk); /* to balance local clk_enable() */ | |
1778 | if (msm_uport->clk_state != MSM_HS_CLK_OFF) | |
1779 | clk_disable(msm_uport->clk); /* to balance clk_state */ | |
1780 | msm_uport->clk_state = MSM_HS_CLK_PORT_OFF; | |
1781 | ||
1782 | dma_unmap_single(uport->dev, msm_uport->tx.dma_base, | |
1783 | UART_XMIT_SIZE, DMA_TO_DEVICE); | |
1784 | ||
1785 | spin_unlock_irqrestore(&uport->lock, flags); | |
1786 | ||
1787 | if (cancel_work_sync(&msm_uport->rx.tty_work)) | |
1788 | msm_hs_tty_flip_buffer_work(&msm_uport->rx.tty_work); | |
1789 | } | |
1790 | ||
1791 | static void __exit msm_serial_hs_exit(void) | |
1792 | { | |
1793 | flush_workqueue(msm_hs_workqueue); | |
1794 | destroy_workqueue(msm_hs_workqueue); | |
1795 | platform_driver_unregister(&msm_serial_hs_platform_driver); | |
1796 | uart_unregister_driver(&msm_hs_driver); | |
1797 | } | |
1798 | module_exit(msm_serial_hs_exit); | |
1799 | ||
1800 | #ifdef CONFIG_PM_RUNTIME | |
1801 | static int msm_hs_runtime_idle(struct device *dev) | |
1802 | { | |
1803 | /* | |
1804 | * returning success from idle results in runtime suspend to be | |
1805 | * called | |
1806 | */ | |
1807 | return 0; | |
1808 | } | |
1809 | ||
1810 | static int msm_hs_runtime_resume(struct device *dev) | |
1811 | { | |
1812 | struct platform_device *pdev = container_of(dev, struct | |
1813 | platform_device, dev); | |
1814 | struct msm_hs_port *msm_uport = &q_uart_port[pdev->id]; | |
1815 | ||
1816 | msm_hs_request_clock_on(&msm_uport->uport); | |
1817 | return 0; | |
1818 | } | |
1819 | ||
1820 | static int msm_hs_runtime_suspend(struct device *dev) | |
1821 | { | |
1822 | struct platform_device *pdev = container_of(dev, struct | |
1823 | platform_device, dev); | |
1824 | struct msm_hs_port *msm_uport = &q_uart_port[pdev->id]; | |
1825 | ||
1826 | msm_hs_request_clock_off(&msm_uport->uport); | |
1827 | return 0; | |
1828 | } | |
1829 | #else | |
1830 | #define msm_hs_runtime_idle NULL | |
1831 | #define msm_hs_runtime_resume NULL | |
1832 | #define msm_hs_runtime_suspend NULL | |
1833 | #endif | |
1834 | ||
1835 | static const struct dev_pm_ops msm_hs_dev_pm_ops = { | |
1836 | .runtime_suspend = msm_hs_runtime_suspend, | |
1837 | .runtime_resume = msm_hs_runtime_resume, | |
1838 | .runtime_idle = msm_hs_runtime_idle, | |
1839 | }; | |
1840 | ||
1841 | static struct platform_driver msm_serial_hs_platform_driver = { | |
1842 | .probe = msm_hs_probe, | |
1843 | .remove = __devexit_p(msm_hs_remove), | |
1844 | .driver = { | |
1845 | .name = "msm_serial_hs", | |
1846 | .owner = THIS_MODULE, | |
1847 | .pm = &msm_hs_dev_pm_ops, | |
1848 | }, | |
1849 | }; | |
1850 | ||
1851 | static struct uart_driver msm_hs_driver = { | |
1852 | .owner = THIS_MODULE, | |
1853 | .driver_name = "msm_serial_hs", | |
1854 | .dev_name = "ttyHS", | |
1855 | .nr = UARTDM_NR, | |
1856 | .cons = 0, | |
1857 | }; | |
1858 | ||
1859 | static struct uart_ops msm_hs_ops = { | |
1860 | .tx_empty = msm_hs_tx_empty, | |
1861 | .set_mctrl = msm_hs_set_mctrl_locked, | |
1862 | .get_mctrl = msm_hs_get_mctrl_locked, | |
1863 | .stop_tx = msm_hs_stop_tx_locked, | |
1864 | .start_tx = msm_hs_start_tx_locked, | |
1865 | .stop_rx = msm_hs_stop_rx_locked, | |
1866 | .enable_ms = msm_hs_enable_ms_locked, | |
1867 | .break_ctl = msm_hs_break_ctl, | |
1868 | .startup = msm_hs_startup, | |
1869 | .shutdown = msm_hs_shutdown, | |
1870 | .set_termios = msm_hs_set_termios, | |
1871 | .pm = msm_hs_pm, | |
1872 | .type = msm_hs_type, | |
1873 | .config_port = msm_hs_config_port, | |
1874 | .release_port = msm_hs_release_port, | |
1875 | .request_port = msm_hs_request_port, | |
1876 | }; | |
1877 | ||
1878 | MODULE_DESCRIPTION("High Speed UART Driver for the MSM chipset"); | |
1879 | MODULE_VERSION("1.2"); | |
1880 | MODULE_LICENSE("GPL v2"); |