Commit | Line | Data |
---|---|---|
161e773c RW |
1 | /* |
2 | * Driver for CSR SiRFprimaII onboard UARTs. | |
3 | * | |
4 | * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. | |
5 | * | |
6 | * Licensed under GPLv2 or later. | |
7 | */ | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/ioport.h> | |
11 | #include <linux/platform_device.h> | |
12 | #include <linux/init.h> | |
13 | #include <linux/sysrq.h> | |
14 | #include <linux/console.h> | |
15 | #include <linux/tty.h> | |
16 | #include <linux/tty_flip.h> | |
17 | #include <linux/serial_core.h> | |
18 | #include <linux/serial.h> | |
19 | #include <linux/clk.h> | |
20 | #include <linux/of.h> | |
21 | #include <linux/slab.h> | |
22 | #include <linux/io.h> | |
2eb5618d | 23 | #include <linux/of_gpio.h> |
8316d04c QL |
24 | #include <linux/dmaengine.h> |
25 | #include <linux/dma-direction.h> | |
26 | #include <linux/dma-mapping.h> | |
27 | #include <linux/sirfsoc_dma.h> | |
161e773c RW |
28 | #include <asm/irq.h> |
29 | #include <asm/mach/irq.h> | |
161e773c RW |
30 | |
31 | #include "sirfsoc_uart.h" | |
32 | ||
33 | static unsigned int | |
34 | sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count); | |
35 | static unsigned int | |
36 | sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count); | |
37 | static struct uart_driver sirfsoc_uart_drv; | |
38 | ||
8316d04c QL |
39 | static void sirfsoc_uart_tx_dma_complete_callback(void *param); |
40 | static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port); | |
41 | static void sirfsoc_uart_rx_dma_complete_callback(void *param); | |
161e773c RW |
42 | static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = { |
43 | {4000000, 2359296}, | |
44 | {3500000, 1310721}, | |
45 | {3000000, 1572865}, | |
46 | {2500000, 1245186}, | |
47 | {2000000, 1572866}, | |
48 | {1500000, 1245188}, | |
49 | {1152000, 1638404}, | |
50 | {1000000, 1572869}, | |
51 | {921600, 1114120}, | |
52 | {576000, 1245196}, | |
53 | {500000, 1245198}, | |
54 | {460800, 1572876}, | |
55 | {230400, 1310750}, | |
56 | {115200, 1310781}, | |
57 | {57600, 1310843}, | |
58 | {38400, 1114328}, | |
59 | {19200, 1114545}, | |
60 | {9600, 1114979}, | |
61 | }; | |
62 | ||
63 | static struct sirfsoc_uart_port sirfsoc_uart_ports[SIRFSOC_UART_NR] = { | |
64 | [0] = { | |
65 | .port = { | |
66 | .iotype = UPIO_MEM, | |
67 | .flags = UPF_BOOT_AUTOCONF, | |
68 | .line = 0, | |
69 | }, | |
70 | }, | |
71 | [1] = { | |
72 | .port = { | |
73 | .iotype = UPIO_MEM, | |
74 | .flags = UPF_BOOT_AUTOCONF, | |
75 | .line = 1, | |
76 | }, | |
77 | }, | |
78 | [2] = { | |
79 | .port = { | |
80 | .iotype = UPIO_MEM, | |
81 | .flags = UPF_BOOT_AUTOCONF, | |
82 | .line = 2, | |
83 | }, | |
84 | }, | |
5425e03f BS |
85 | [3] = { |
86 | .port = { | |
87 | .iotype = UPIO_MEM, | |
88 | .flags = UPF_BOOT_AUTOCONF, | |
89 | .line = 3, | |
90 | }, | |
91 | }, | |
92 | [4] = { | |
93 | .port = { | |
94 | .iotype = UPIO_MEM, | |
95 | .flags = UPF_BOOT_AUTOCONF, | |
96 | .line = 4, | |
97 | }, | |
98 | }, | |
b60dfbae QL |
99 | [5] = { |
100 | .port = { | |
101 | .iotype = UPIO_MEM, | |
102 | .flags = UPF_BOOT_AUTOCONF, | |
103 | .line = 5, | |
104 | }, | |
105 | }, | |
161e773c RW |
106 | }; |
107 | ||
108 | static inline struct sirfsoc_uart_port *to_sirfport(struct uart_port *port) | |
109 | { | |
110 | return container_of(port, struct sirfsoc_uart_port, port); | |
111 | } | |
112 | ||
113 | static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port *port) | |
114 | { | |
115 | unsigned long reg; | |
5df83111 QL |
116 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); |
117 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; | |
118 | struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status; | |
119 | reg = rd_regl(port, ureg->sirfsoc_tx_fifo_status); | |
120 | ||
121 | return (reg & ufifo_st->ff_empty(port->line)) ? TIOCSER_TEMT : 0; | |
161e773c RW |
122 | } |
123 | ||
124 | static unsigned int sirfsoc_uart_get_mctrl(struct uart_port *port) | |
125 | { | |
126 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); | |
5df83111 | 127 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; |
2eb5618d | 128 | if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled) |
161e773c | 129 | goto cts_asserted; |
2eb5618d | 130 | if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { |
5df83111 QL |
131 | if (!(rd_regl(port, ureg->sirfsoc_afc_ctrl) & |
132 | SIRFUART_AFC_CTS_STATUS)) | |
161e773c RW |
133 | goto cts_asserted; |
134 | else | |
135 | goto cts_deasserted; | |
2eb5618d QL |
136 | } else { |
137 | if (!gpio_get_value(sirfport->cts_gpio)) | |
138 | goto cts_asserted; | |
139 | else | |
140 | goto cts_deasserted; | |
161e773c RW |
141 | } |
142 | cts_deasserted: | |
143 | return TIOCM_CAR | TIOCM_DSR; | |
144 | cts_asserted: | |
145 | return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; | |
146 | } | |
147 | ||
148 | static void sirfsoc_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) | |
149 | { | |
150 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); | |
5df83111 | 151 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; |
161e773c RW |
152 | unsigned int assert = mctrl & TIOCM_RTS; |
153 | unsigned int val = assert ? SIRFUART_AFC_CTRL_RX_THD : 0x0; | |
154 | unsigned int current_val; | |
2eb5618d QL |
155 | |
156 | if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled) | |
157 | return; | |
158 | if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { | |
5df83111 | 159 | current_val = rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0xFF; |
161e773c | 160 | val |= current_val; |
5df83111 | 161 | wr_regl(port, ureg->sirfsoc_afc_ctrl, val); |
2eb5618d QL |
162 | } else { |
163 | if (!val) | |
164 | gpio_set_value(sirfport->rts_gpio, 1); | |
165 | else | |
166 | gpio_set_value(sirfport->rts_gpio, 0); | |
161e773c RW |
167 | } |
168 | } | |
169 | ||
170 | static void sirfsoc_uart_stop_tx(struct uart_port *port) | |
171 | { | |
909102db | 172 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); |
5df83111 QL |
173 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; |
174 | struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; | |
909102db | 175 | |
8316d04c QL |
176 | if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) { |
177 | if (sirfport->tx_dma_state == TX_DMA_RUNNING) { | |
178 | dmaengine_pause(sirfport->tx_dma_chan); | |
179 | sirfport->tx_dma_state = TX_DMA_PAUSE; | |
180 | } else { | |
181 | if (!sirfport->is_marco) | |
182 | wr_regl(port, ureg->sirfsoc_int_en_reg, | |
183 | rd_regl(port, ureg->sirfsoc_int_en_reg) & | |
184 | ~uint_en->sirfsoc_txfifo_empty_en); | |
185 | else | |
186 | wr_regl(port, SIRFUART_INT_EN_CLR, | |
187 | uint_en->sirfsoc_txfifo_empty_en); | |
188 | } | |
189 | } else { | |
190 | if (!sirfport->is_marco) | |
191 | wr_regl(port, ureg->sirfsoc_int_en_reg, | |
192 | rd_regl(port, ureg->sirfsoc_int_en_reg) & | |
193 | ~uint_en->sirfsoc_txfifo_empty_en); | |
194 | else | |
195 | wr_regl(port, SIRFUART_INT_EN_CLR, | |
196 | uint_en->sirfsoc_txfifo_empty_en); | |
197 | } | |
198 | } | |
199 | ||
200 | static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port *sirfport) | |
201 | { | |
202 | struct uart_port *port = &sirfport->port; | |
203 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; | |
204 | struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; | |
205 | struct circ_buf *xmit = &port->state->xmit; | |
206 | unsigned long tran_size; | |
207 | unsigned long tran_start; | |
208 | unsigned long pio_tx_size; | |
209 | ||
210 | tran_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); | |
211 | tran_start = (unsigned long)(xmit->buf + xmit->tail); | |
212 | if (uart_circ_empty(xmit) || uart_tx_stopped(port) || | |
213 | !tran_size) | |
214 | return; | |
215 | if (sirfport->tx_dma_state == TX_DMA_PAUSE) { | |
216 | dmaengine_resume(sirfport->tx_dma_chan); | |
217 | return; | |
218 | } | |
219 | if (sirfport->tx_dma_state == TX_DMA_RUNNING) | |
220 | return; | |
221 | if (!sirfport->is_marco) | |
5df83111 | 222 | wr_regl(port, ureg->sirfsoc_int_en_reg, |
8316d04c QL |
223 | rd_regl(port, ureg->sirfsoc_int_en_reg)& |
224 | ~(uint_en->sirfsoc_txfifo_empty_en)); | |
225 | else | |
5df83111 QL |
226 | wr_regl(port, SIRFUART_INT_EN_CLR, |
227 | uint_en->sirfsoc_txfifo_empty_en); | |
8316d04c QL |
228 | /* |
229 | * DMA requires buffer address and buffer length are both aligned with | |
230 | * 4 bytes, so we use PIO for | |
231 | * 1. if address is not aligned with 4bytes, use PIO for the first 1~3 | |
232 | * bytes, and move to DMA for the left part aligned with 4bytes | |
233 | * 2. if buffer length is not aligned with 4bytes, use DMA for aligned | |
234 | * part first, move to PIO for the left 1~3 bytes | |
235 | */ | |
236 | if (tran_size < 4 || BYTES_TO_ALIGN(tran_start)) { | |
237 | wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP); | |
238 | wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, | |
239 | rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)| | |
240 | SIRFUART_IO_MODE); | |
241 | if (BYTES_TO_ALIGN(tran_start)) { | |
242 | pio_tx_size = sirfsoc_uart_pio_tx_chars(sirfport, | |
243 | BYTES_TO_ALIGN(tran_start)); | |
244 | tran_size -= pio_tx_size; | |
245 | } | |
246 | if (tran_size < 4) | |
247 | sirfsoc_uart_pio_tx_chars(sirfport, tran_size); | |
248 | if (!sirfport->is_marco) | |
249 | wr_regl(port, ureg->sirfsoc_int_en_reg, | |
250 | rd_regl(port, ureg->sirfsoc_int_en_reg)| | |
251 | uint_en->sirfsoc_txfifo_empty_en); | |
252 | else | |
253 | wr_regl(port, ureg->sirfsoc_int_en_reg, | |
254 | uint_en->sirfsoc_txfifo_empty_en); | |
255 | wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START); | |
256 | } else { | |
257 | /* tx transfer mode switch into dma mode */ | |
258 | wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP); | |
259 | wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, | |
260 | rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)& | |
261 | ~SIRFUART_IO_MODE); | |
262 | wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START); | |
263 | tran_size &= ~(0x3); | |
264 | ||
265 | sirfport->tx_dma_addr = dma_map_single(port->dev, | |
266 | xmit->buf + xmit->tail, | |
267 | tran_size, DMA_TO_DEVICE); | |
268 | sirfport->tx_dma_desc = dmaengine_prep_slave_single( | |
269 | sirfport->tx_dma_chan, sirfport->tx_dma_addr, | |
270 | tran_size, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); | |
271 | if (!sirfport->tx_dma_desc) { | |
272 | dev_err(port->dev, "DMA prep slave single fail\n"); | |
273 | return; | |
274 | } | |
275 | sirfport->tx_dma_desc->callback = | |
276 | sirfsoc_uart_tx_dma_complete_callback; | |
277 | sirfport->tx_dma_desc->callback_param = (void *)sirfport; | |
278 | sirfport->transfer_size = tran_size; | |
279 | ||
280 | dmaengine_submit(sirfport->tx_dma_desc); | |
281 | dma_async_issue_pending(sirfport->tx_dma_chan); | |
282 | sirfport->tx_dma_state = TX_DMA_RUNNING; | |
283 | } | |
161e773c RW |
284 | } |
285 | ||
ada1f443 | 286 | static void sirfsoc_uart_start_tx(struct uart_port *port) |
161e773c RW |
287 | { |
288 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); | |
5df83111 QL |
289 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; |
290 | struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; | |
8316d04c QL |
291 | if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) |
292 | sirfsoc_uart_tx_with_dma(sirfport); | |
293 | else { | |
294 | sirfsoc_uart_pio_tx_chars(sirfport, 1); | |
295 | wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START); | |
296 | if (!sirfport->is_marco) | |
297 | wr_regl(port, ureg->sirfsoc_int_en_reg, | |
298 | rd_regl(port, ureg->sirfsoc_int_en_reg)| | |
299 | uint_en->sirfsoc_txfifo_empty_en); | |
300 | else | |
301 | wr_regl(port, ureg->sirfsoc_int_en_reg, | |
302 | uint_en->sirfsoc_txfifo_empty_en); | |
303 | } | |
161e773c RW |
304 | } |
305 | ||
306 | static void sirfsoc_uart_stop_rx(struct uart_port *port) | |
307 | { | |
909102db | 308 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); |
5df83111 QL |
309 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; |
310 | struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; | |
8316d04c | 311 | |
5df83111 | 312 | wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); |
8316d04c QL |
313 | if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) { |
314 | if (!sirfport->is_marco) | |
315 | wr_regl(port, ureg->sirfsoc_int_en_reg, | |
316 | rd_regl(port, ureg->sirfsoc_int_en_reg) & | |
317 | ~(SIRFUART_RX_DMA_INT_EN(port, uint_en) | | |
318 | uint_en->sirfsoc_rx_done_en)); | |
319 | else | |
320 | wr_regl(port, SIRFUART_INT_EN_CLR, | |
321 | SIRFUART_RX_DMA_INT_EN(port, uint_en)| | |
322 | uint_en->sirfsoc_rx_done_en); | |
323 | dmaengine_terminate_all(sirfport->rx_dma_chan); | |
324 | } else { | |
325 | if (!sirfport->is_marco) | |
326 | wr_regl(port, ureg->sirfsoc_int_en_reg, | |
327 | rd_regl(port, ureg->sirfsoc_int_en_reg)& | |
328 | ~(SIRFUART_RX_IO_INT_EN(port, uint_en))); | |
329 | else | |
330 | wr_regl(port, SIRFUART_INT_EN_CLR, | |
331 | SIRFUART_RX_IO_INT_EN(port, uint_en)); | |
332 | } | |
161e773c RW |
333 | } |
334 | ||
335 | static void sirfsoc_uart_disable_ms(struct uart_port *port) | |
336 | { | |
337 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); | |
5df83111 QL |
338 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; |
339 | struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; | |
909102db | 340 | |
161e773c RW |
341 | if (!sirfport->hw_flow_ctrl) |
342 | return; | |
2eb5618d QL |
343 | sirfport->ms_enabled = false; |
344 | if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { | |
345 | wr_regl(port, ureg->sirfsoc_afc_ctrl, | |
346 | rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0x3FF); | |
347 | if (!sirfport->is_marco) | |
348 | wr_regl(port, ureg->sirfsoc_int_en_reg, | |
349 | rd_regl(port, ureg->sirfsoc_int_en_reg)& | |
350 | ~uint_en->sirfsoc_cts_en); | |
351 | else | |
352 | wr_regl(port, SIRFUART_INT_EN_CLR, | |
353 | uint_en->sirfsoc_cts_en); | |
5df83111 | 354 | } else |
2eb5618d QL |
355 | disable_irq(gpio_to_irq(sirfport->cts_gpio)); |
356 | } | |
357 | ||
358 | static irqreturn_t sirfsoc_uart_usp_cts_handler(int irq, void *dev_id) | |
359 | { | |
360 | struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id; | |
361 | struct uart_port *port = &sirfport->port; | |
362 | if (gpio_is_valid(sirfport->cts_gpio) && sirfport->ms_enabled) | |
363 | uart_handle_cts_change(port, | |
364 | !gpio_get_value(sirfport->cts_gpio)); | |
365 | return IRQ_HANDLED; | |
161e773c RW |
366 | } |
367 | ||
368 | static void sirfsoc_uart_enable_ms(struct uart_port *port) | |
369 | { | |
370 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); | |
5df83111 QL |
371 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; |
372 | struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; | |
909102db | 373 | |
161e773c RW |
374 | if (!sirfport->hw_flow_ctrl) |
375 | return; | |
2eb5618d QL |
376 | sirfport->ms_enabled = true; |
377 | if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { | |
378 | wr_regl(port, ureg->sirfsoc_afc_ctrl, | |
379 | rd_regl(port, ureg->sirfsoc_afc_ctrl) | | |
380 | SIRFUART_AFC_TX_EN | SIRFUART_AFC_RX_EN); | |
381 | if (!sirfport->is_marco) | |
382 | wr_regl(port, ureg->sirfsoc_int_en_reg, | |
383 | rd_regl(port, ureg->sirfsoc_int_en_reg) | |
384 | | uint_en->sirfsoc_cts_en); | |
385 | else | |
386 | wr_regl(port, ureg->sirfsoc_int_en_reg, | |
387 | uint_en->sirfsoc_cts_en); | |
5df83111 | 388 | } else |
2eb5618d | 389 | enable_irq(gpio_to_irq(sirfport->cts_gpio)); |
161e773c RW |
390 | } |
391 | ||
392 | static void sirfsoc_uart_break_ctl(struct uart_port *port, int break_state) | |
393 | { | |
5df83111 QL |
394 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); |
395 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; | |
396 | if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { | |
397 | unsigned long ulcon = rd_regl(port, ureg->sirfsoc_line_ctrl); | |
398 | if (break_state) | |
399 | ulcon |= SIRFUART_SET_BREAK; | |
400 | else | |
401 | ulcon &= ~SIRFUART_SET_BREAK; | |
402 | wr_regl(port, ureg->sirfsoc_line_ctrl, ulcon); | |
403 | } | |
161e773c RW |
404 | } |
405 | ||
406 | static unsigned int | |
407 | sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count) | |
408 | { | |
5df83111 QL |
409 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); |
410 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; | |
411 | struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status; | |
161e773c | 412 | unsigned int ch, rx_count = 0; |
5df83111 QL |
413 | struct tty_struct *tty; |
414 | tty = tty_port_tty_get(&port->state->port); | |
415 | if (!tty) | |
416 | return -ENODEV; | |
417 | while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) & | |
418 | ufifo_st->ff_empty(port->line))) { | |
419 | ch = rd_regl(port, ureg->sirfsoc_rx_fifo_data) | | |
420 | SIRFUART_DUMMY_READ; | |
161e773c RW |
421 | if (unlikely(uart_handle_sysrq_char(port, ch))) |
422 | continue; | |
423 | uart_insert_char(port, 0, 0, ch, TTY_NORMAL); | |
424 | rx_count++; | |
425 | if (rx_count >= max_rx_count) | |
426 | break; | |
427 | } | |
428 | ||
8316d04c | 429 | sirfport->rx_io_count += rx_count; |
161e773c | 430 | port->icount.rx += rx_count; |
8b9ade9f VK |
431 | |
432 | spin_unlock(&port->lock); | |
2e124b4a | 433 | tty_flip_buffer_push(&port->state->port); |
8b9ade9f | 434 | spin_lock(&port->lock); |
161e773c RW |
435 | |
436 | return rx_count; | |
437 | } | |
438 | ||
439 | static unsigned int | |
440 | sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count) | |
441 | { | |
442 | struct uart_port *port = &sirfport->port; | |
5df83111 QL |
443 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; |
444 | struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status; | |
161e773c RW |
445 | struct circ_buf *xmit = &port->state->xmit; |
446 | unsigned int num_tx = 0; | |
447 | while (!uart_circ_empty(xmit) && | |
5df83111 QL |
448 | !(rd_regl(port, ureg->sirfsoc_tx_fifo_status) & |
449 | ufifo_st->ff_full(port->line)) && | |
161e773c | 450 | count--) { |
5df83111 QL |
451 | wr_regl(port, ureg->sirfsoc_tx_fifo_data, |
452 | xmit->buf[xmit->tail]); | |
161e773c RW |
453 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); |
454 | port->icount.tx++; | |
455 | num_tx++; | |
456 | } | |
457 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | |
458 | uart_write_wakeup(port); | |
459 | return num_tx; | |
460 | } | |
461 | ||
8316d04c QL |
462 | static void sirfsoc_uart_tx_dma_complete_callback(void *param) |
463 | { | |
464 | struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param; | |
465 | struct uart_port *port = &sirfport->port; | |
466 | struct circ_buf *xmit = &port->state->xmit; | |
467 | unsigned long flags; | |
468 | ||
469 | xmit->tail = (xmit->tail + sirfport->transfer_size) & | |
470 | (UART_XMIT_SIZE - 1); | |
471 | port->icount.tx += sirfport->transfer_size; | |
472 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | |
473 | uart_write_wakeup(port); | |
474 | if (sirfport->tx_dma_addr) | |
475 | dma_unmap_single(port->dev, sirfport->tx_dma_addr, | |
476 | sirfport->transfer_size, DMA_TO_DEVICE); | |
477 | spin_lock_irqsave(&sirfport->tx_lock, flags); | |
478 | sirfport->tx_dma_state = TX_DMA_IDLE; | |
479 | sirfsoc_uart_tx_with_dma(sirfport); | |
480 | spin_unlock_irqrestore(&sirfport->tx_lock, flags); | |
481 | } | |
482 | ||
483 | static void sirfsoc_uart_insert_rx_buf_to_tty( | |
484 | struct sirfsoc_uart_port *sirfport, int count) | |
485 | { | |
486 | struct uart_port *port = &sirfport->port; | |
487 | struct tty_port *tport = &port->state->port; | |
488 | int inserted; | |
489 | ||
490 | inserted = tty_insert_flip_string(tport, | |
491 | sirfport->rx_dma_items[sirfport->rx_completed].xmit.buf, count); | |
492 | port->icount.rx += inserted; | |
493 | tty_flip_buffer_push(tport); | |
494 | } | |
495 | ||
496 | static void sirfsoc_rx_submit_one_dma_desc(struct uart_port *port, int index) | |
497 | { | |
498 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); | |
499 | ||
500 | sirfport->rx_dma_items[index].xmit.tail = | |
501 | sirfport->rx_dma_items[index].xmit.head = 0; | |
502 | sirfport->rx_dma_items[index].desc = | |
503 | dmaengine_prep_slave_single(sirfport->rx_dma_chan, | |
504 | sirfport->rx_dma_items[index].dma_addr, SIRFSOC_RX_DMA_BUF_SIZE, | |
505 | DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); | |
506 | if (!sirfport->rx_dma_items[index].desc) { | |
507 | dev_err(port->dev, "DMA slave single fail\n"); | |
508 | return; | |
509 | } | |
510 | sirfport->rx_dma_items[index].desc->callback = | |
511 | sirfsoc_uart_rx_dma_complete_callback; | |
512 | sirfport->rx_dma_items[index].desc->callback_param = sirfport; | |
513 | sirfport->rx_dma_items[index].cookie = | |
514 | dmaengine_submit(sirfport->rx_dma_items[index].desc); | |
515 | dma_async_issue_pending(sirfport->rx_dma_chan); | |
516 | } | |
517 | ||
518 | static void sirfsoc_rx_tmo_process_tl(unsigned long param) | |
519 | { | |
520 | struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param; | |
521 | struct uart_port *port = &sirfport->port; | |
522 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; | |
523 | struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; | |
524 | struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st; | |
525 | unsigned int count; | |
526 | unsigned long flags; | |
527 | ||
528 | spin_lock_irqsave(&sirfport->rx_lock, flags); | |
529 | while (sirfport->rx_completed != sirfport->rx_issued) { | |
530 | sirfsoc_uart_insert_rx_buf_to_tty(sirfport, | |
531 | SIRFSOC_RX_DMA_BUF_SIZE); | |
532 | sirfsoc_rx_submit_one_dma_desc(port, sirfport->rx_completed++); | |
533 | sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT; | |
534 | } | |
535 | count = CIRC_CNT(sirfport->rx_dma_items[sirfport->rx_issued].xmit.head, | |
536 | sirfport->rx_dma_items[sirfport->rx_issued].xmit.tail, | |
537 | SIRFSOC_RX_DMA_BUF_SIZE); | |
538 | if (count > 0) | |
539 | sirfsoc_uart_insert_rx_buf_to_tty(sirfport, count); | |
540 | wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, | |
541 | rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | | |
542 | SIRFUART_IO_MODE); | |
543 | sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count); | |
544 | spin_unlock_irqrestore(&sirfport->rx_lock, flags); | |
545 | if (sirfport->rx_io_count == 4) { | |
546 | spin_lock_irqsave(&sirfport->rx_lock, flags); | |
547 | sirfport->rx_io_count = 0; | |
548 | wr_regl(port, ureg->sirfsoc_int_st_reg, | |
549 | uint_st->sirfsoc_rx_done); | |
550 | if (!sirfport->is_marco) | |
551 | wr_regl(port, ureg->sirfsoc_int_en_reg, | |
552 | rd_regl(port, ureg->sirfsoc_int_en_reg) & | |
553 | ~(uint_en->sirfsoc_rx_done_en)); | |
554 | else | |
555 | wr_regl(port, SIRFUART_INT_EN_CLR, | |
556 | uint_en->sirfsoc_rx_done_en); | |
557 | spin_unlock_irqrestore(&sirfport->rx_lock, flags); | |
558 | ||
559 | sirfsoc_uart_start_next_rx_dma(port); | |
560 | } else { | |
561 | spin_lock_irqsave(&sirfport->rx_lock, flags); | |
562 | wr_regl(port, ureg->sirfsoc_int_st_reg, | |
563 | uint_st->sirfsoc_rx_done); | |
564 | if (!sirfport->is_marco) | |
565 | wr_regl(port, ureg->sirfsoc_int_en_reg, | |
566 | rd_regl(port, ureg->sirfsoc_int_en_reg) | | |
567 | (uint_en->sirfsoc_rx_done_en)); | |
568 | else | |
569 | wr_regl(port, ureg->sirfsoc_int_en_reg, | |
570 | uint_en->sirfsoc_rx_done_en); | |
571 | spin_unlock_irqrestore(&sirfport->rx_lock, flags); | |
572 | } | |
573 | } | |
574 | ||
575 | static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port *sirfport) | |
576 | { | |
577 | struct uart_port *port = &sirfport->port; | |
578 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; | |
579 | struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; | |
580 | struct dma_tx_state tx_state; | |
581 | spin_lock(&sirfport->rx_lock); | |
582 | ||
583 | dmaengine_tx_status(sirfport->rx_dma_chan, | |
584 | sirfport->rx_dma_items[sirfport->rx_issued].cookie, &tx_state); | |
585 | dmaengine_terminate_all(sirfport->rx_dma_chan); | |
586 | sirfport->rx_dma_items[sirfport->rx_issued].xmit.head = | |
587 | SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue; | |
588 | if (!sirfport->is_marco) | |
589 | wr_regl(port, ureg->sirfsoc_int_en_reg, | |
590 | rd_regl(port, ureg->sirfsoc_int_en_reg) & | |
591 | ~(uint_en->sirfsoc_rx_timeout_en)); | |
592 | else | |
593 | wr_regl(port, SIRFUART_INT_EN_CLR, | |
594 | uint_en->sirfsoc_rx_timeout_en); | |
595 | spin_unlock(&sirfport->rx_lock); | |
596 | tasklet_schedule(&sirfport->rx_tmo_process_tasklet); | |
597 | } | |
598 | ||
599 | static void sirfsoc_uart_handle_rx_done(struct sirfsoc_uart_port *sirfport) | |
600 | { | |
601 | struct uart_port *port = &sirfport->port; | |
602 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; | |
603 | struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; | |
604 | struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st; | |
605 | ||
606 | sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count); | |
607 | if (sirfport->rx_io_count == 4) { | |
608 | sirfport->rx_io_count = 0; | |
609 | if (!sirfport->is_marco) | |
610 | wr_regl(port, ureg->sirfsoc_int_en_reg, | |
611 | rd_regl(port, ureg->sirfsoc_int_en_reg) & | |
612 | ~(uint_en->sirfsoc_rx_done_en)); | |
613 | else | |
614 | wr_regl(port, SIRFUART_INT_EN_CLR, | |
615 | uint_en->sirfsoc_rx_done_en); | |
616 | wr_regl(port, ureg->sirfsoc_int_st_reg, | |
617 | uint_st->sirfsoc_rx_timeout); | |
618 | sirfsoc_uart_start_next_rx_dma(port); | |
619 | } | |
620 | } | |
621 | ||
161e773c RW |
622 | static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id) |
623 | { | |
624 | unsigned long intr_status; | |
625 | unsigned long cts_status; | |
626 | unsigned long flag = TTY_NORMAL; | |
627 | struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id; | |
628 | struct uart_port *port = &sirfport->port; | |
5df83111 QL |
629 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; |
630 | struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status; | |
631 | struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st; | |
632 | struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; | |
161e773c RW |
633 | struct uart_state *state = port->state; |
634 | struct circ_buf *xmit = &port->state->xmit; | |
5425e03f | 635 | spin_lock(&port->lock); |
5df83111 QL |
636 | intr_status = rd_regl(port, ureg->sirfsoc_int_st_reg); |
637 | wr_regl(port, ureg->sirfsoc_int_st_reg, intr_status); | |
8316d04c | 638 | intr_status &= rd_regl(port, ureg->sirfsoc_int_en_reg); |
5df83111 QL |
639 | if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT(port, uint_st)))) { |
640 | if (intr_status & uint_st->sirfsoc_rxd_brk) { | |
641 | port->icount.brk++; | |
161e773c RW |
642 | if (uart_handle_break(port)) |
643 | goto recv_char; | |
161e773c | 644 | } |
5df83111 | 645 | if (intr_status & uint_st->sirfsoc_rx_oflow) |
161e773c | 646 | port->icount.overrun++; |
5df83111 | 647 | if (intr_status & uint_st->sirfsoc_frm_err) { |
161e773c RW |
648 | port->icount.frame++; |
649 | flag = TTY_FRAME; | |
650 | } | |
5df83111 | 651 | if (intr_status & uint_st->sirfsoc_parity_err) |
161e773c | 652 | flag = TTY_PARITY; |
5df83111 QL |
653 | wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET); |
654 | wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); | |
655 | wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START); | |
161e773c RW |
656 | intr_status &= port->read_status_mask; |
657 | uart_insert_char(port, intr_status, | |
5df83111 QL |
658 | uint_en->sirfsoc_rx_oflow_en, 0, flag); |
659 | tty_flip_buffer_push(&state->port); | |
161e773c RW |
660 | } |
661 | recv_char: | |
5df83111 | 662 | if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) && |
8316d04c QL |
663 | (intr_status & SIRFUART_CTS_INT_ST(uint_st)) && |
664 | !sirfport->tx_dma_state) { | |
5df83111 QL |
665 | cts_status = rd_regl(port, ureg->sirfsoc_afc_ctrl) & |
666 | SIRFUART_AFC_CTS_STATUS; | |
667 | if (cts_status != 0) | |
668 | cts_status = 0; | |
669 | else | |
670 | cts_status = 1; | |
671 | uart_handle_cts_change(port, cts_status); | |
672 | wake_up_interruptible(&state->port.delta_msr_wait); | |
161e773c | 673 | } |
8316d04c QL |
674 | if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) { |
675 | if (intr_status & uint_st->sirfsoc_rx_timeout) | |
676 | sirfsoc_uart_handle_rx_tmo(sirfport); | |
677 | if (intr_status & uint_st->sirfsoc_rx_done) | |
678 | sirfsoc_uart_handle_rx_done(sirfport); | |
679 | } else { | |
680 | if (intr_status & SIRFUART_RX_IO_INT_ST(uint_st)) | |
681 | sirfsoc_uart_pio_rx_chars(port, | |
682 | SIRFSOC_UART_IO_RX_MAX_CNT); | |
683 | } | |
5df83111 | 684 | if (intr_status & uint_st->sirfsoc_txfifo_empty) { |
8316d04c QL |
685 | if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) |
686 | sirfsoc_uart_tx_with_dma(sirfport); | |
687 | else { | |
688 | if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { | |
689 | spin_unlock(&port->lock); | |
690 | return IRQ_HANDLED; | |
691 | } else { | |
692 | sirfsoc_uart_pio_tx_chars(sirfport, | |
161e773c | 693 | SIRFSOC_UART_IO_TX_REASONABLE_CNT); |
8316d04c | 694 | if ((uart_circ_empty(xmit)) && |
5df83111 | 695 | (rd_regl(port, ureg->sirfsoc_tx_fifo_status) & |
8316d04c QL |
696 | ufifo_st->ff_empty(port->line))) |
697 | sirfsoc_uart_stop_tx(port); | |
698 | } | |
161e773c RW |
699 | } |
700 | } | |
5425e03f | 701 | spin_unlock(&port->lock); |
161e773c RW |
702 | return IRQ_HANDLED; |
703 | } | |
704 | ||
8316d04c QL |
705 | static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param) |
706 | { | |
707 | struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param; | |
708 | struct uart_port *port = &sirfport->port; | |
709 | unsigned long flags; | |
710 | spin_lock_irqsave(&sirfport->rx_lock, flags); | |
711 | while (sirfport->rx_completed != sirfport->rx_issued) { | |
712 | sirfsoc_uart_insert_rx_buf_to_tty(sirfport, | |
713 | SIRFSOC_RX_DMA_BUF_SIZE); | |
714 | sirfsoc_rx_submit_one_dma_desc(port, sirfport->rx_completed++); | |
715 | sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT; | |
716 | } | |
717 | spin_unlock_irqrestore(&sirfport->rx_lock, flags); | |
718 | } | |
719 | ||
720 | static void sirfsoc_uart_rx_dma_complete_callback(void *param) | |
721 | { | |
722 | struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param; | |
723 | spin_lock(&sirfport->rx_lock); | |
724 | sirfport->rx_issued++; | |
725 | sirfport->rx_issued %= SIRFSOC_RX_LOOP_BUF_CNT; | |
726 | spin_unlock(&sirfport->rx_lock); | |
727 | tasklet_schedule(&sirfport->rx_dma_complete_tasklet); | |
728 | } | |
729 | ||
730 | /* submit rx dma task into dmaengine */ | |
731 | static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port) | |
161e773c | 732 | { |
909102db | 733 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); |
5df83111 QL |
734 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; |
735 | struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; | |
8316d04c QL |
736 | unsigned long flags; |
737 | int i; | |
738 | spin_lock_irqsave(&sirfport->rx_lock, flags); | |
739 | sirfport->rx_io_count = 0; | |
740 | wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, | |
741 | rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) & | |
742 | ~SIRFUART_IO_MODE); | |
743 | spin_unlock_irqrestore(&sirfport->rx_lock, flags); | |
744 | for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++) | |
745 | sirfsoc_rx_submit_one_dma_desc(port, i); | |
746 | sirfport->rx_completed = sirfport->rx_issued = 0; | |
747 | spin_lock_irqsave(&sirfport->rx_lock, flags); | |
748 | if (!sirfport->is_marco) | |
5df83111 | 749 | wr_regl(port, ureg->sirfsoc_int_en_reg, |
8316d04c QL |
750 | rd_regl(port, ureg->sirfsoc_int_en_reg) | |
751 | SIRFUART_RX_DMA_INT_EN(port, uint_en)); | |
752 | else | |
753 | wr_regl(port, ureg->sirfsoc_int_en_reg, | |
754 | SIRFUART_RX_DMA_INT_EN(port, uint_en)); | |
755 | spin_unlock_irqrestore(&sirfport->rx_lock, flags); | |
756 | } | |
757 | ||
758 | static void sirfsoc_uart_start_rx(struct uart_port *port) | |
759 | { | |
760 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); | |
761 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; | |
762 | struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; | |
763 | ||
764 | sirfport->rx_io_count = 0; | |
5df83111 QL |
765 | wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET); |
766 | wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); | |
767 | wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START); | |
8316d04c QL |
768 | if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) |
769 | sirfsoc_uart_start_next_rx_dma(port); | |
770 | else { | |
771 | if (!sirfport->is_marco) | |
772 | wr_regl(port, ureg->sirfsoc_int_en_reg, | |
773 | rd_regl(port, ureg->sirfsoc_int_en_reg) | | |
774 | SIRFUART_RX_IO_INT_EN(port, uint_en)); | |
775 | else | |
776 | wr_regl(port, ureg->sirfsoc_int_en_reg, | |
777 | SIRFUART_RX_IO_INT_EN(port, uint_en)); | |
778 | } | |
5df83111 QL |
779 | } |
780 | ||
781 | static unsigned int | |
782 | sirfsoc_usp_calc_sample_div(unsigned long set_rate, | |
783 | unsigned long ioclk_rate, unsigned long *sample_reg) | |
784 | { | |
785 | unsigned long min_delta = ~0UL; | |
786 | unsigned short sample_div; | |
787 | unsigned long ioclk_div = 0; | |
788 | unsigned long temp_delta; | |
789 | ||
790 | for (sample_div = SIRF_MIN_SAMPLE_DIV; | |
791 | sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) { | |
792 | temp_delta = ioclk_rate - | |
793 | (ioclk_rate + (set_rate * sample_div) / 2) | |
794 | / (set_rate * sample_div) * set_rate * sample_div; | |
909102db | 795 | |
5df83111 QL |
796 | temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta; |
797 | if (temp_delta < min_delta) { | |
798 | ioclk_div = (2 * ioclk_rate / | |
799 | (set_rate * sample_div) + 1) / 2 - 1; | |
800 | if (ioclk_div > SIRF_IOCLK_DIV_MAX) | |
801 | continue; | |
802 | min_delta = temp_delta; | |
803 | *sample_reg = sample_div; | |
804 | if (!temp_delta) | |
805 | break; | |
806 | } | |
807 | } | |
808 | return ioclk_div; | |
161e773c RW |
809 | } |
810 | ||
811 | static unsigned int | |
5df83111 QL |
812 | sirfsoc_uart_calc_sample_div(unsigned long baud_rate, |
813 | unsigned long ioclk_rate, unsigned long *set_baud) | |
161e773c RW |
814 | { |
815 | unsigned long min_delta = ~0UL; | |
816 | unsigned short sample_div; | |
817 | unsigned int regv = 0; | |
818 | unsigned long ioclk_div; | |
819 | unsigned long baud_tmp; | |
820 | int temp_delta; | |
821 | ||
822 | for (sample_div = SIRF_MIN_SAMPLE_DIV; | |
823 | sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) { | |
824 | ioclk_div = (ioclk_rate / (baud_rate * (sample_div + 1))) - 1; | |
825 | if (ioclk_div > SIRF_IOCLK_DIV_MAX) | |
826 | continue; | |
827 | baud_tmp = ioclk_rate / ((ioclk_div + 1) * (sample_div + 1)); | |
828 | temp_delta = baud_tmp - baud_rate; | |
829 | temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta; | |
830 | if (temp_delta < min_delta) { | |
831 | regv = regv & (~SIRF_IOCLK_DIV_MASK); | |
832 | regv = regv | ioclk_div; | |
833 | regv = regv & (~SIRF_SAMPLE_DIV_MASK); | |
834 | regv = regv | (sample_div << SIRF_SAMPLE_DIV_SHIFT); | |
835 | min_delta = temp_delta; | |
5df83111 | 836 | *set_baud = baud_tmp; |
161e773c RW |
837 | } |
838 | } | |
839 | return regv; | |
840 | } | |
841 | ||
842 | static void sirfsoc_uart_set_termios(struct uart_port *port, | |
843 | struct ktermios *termios, | |
844 | struct ktermios *old) | |
845 | { | |
846 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); | |
5df83111 QL |
847 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; |
848 | struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en; | |
161e773c RW |
849 | unsigned long config_reg = 0; |
850 | unsigned long baud_rate; | |
5df83111 | 851 | unsigned long set_baud; |
161e773c RW |
852 | unsigned long flags; |
853 | unsigned long ic; | |
854 | unsigned int clk_div_reg = 0; | |
8316d04c | 855 | unsigned long txfifo_op_reg, ioclk_rate; |
161e773c RW |
856 | unsigned long rx_time_out; |
857 | int threshold_div; | |
5df83111 QL |
858 | u32 data_bit_len, stop_bit_len, len_val; |
859 | unsigned long sample_div_reg = 0xf; | |
860 | ioclk_rate = port->uartclk; | |
161e773c | 861 | |
161e773c RW |
862 | switch (termios->c_cflag & CSIZE) { |
863 | default: | |
864 | case CS8: | |
5df83111 | 865 | data_bit_len = 8; |
161e773c RW |
866 | config_reg |= SIRFUART_DATA_BIT_LEN_8; |
867 | break; | |
868 | case CS7: | |
5df83111 | 869 | data_bit_len = 7; |
161e773c RW |
870 | config_reg |= SIRFUART_DATA_BIT_LEN_7; |
871 | break; | |
872 | case CS6: | |
5df83111 | 873 | data_bit_len = 6; |
161e773c RW |
874 | config_reg |= SIRFUART_DATA_BIT_LEN_6; |
875 | break; | |
876 | case CS5: | |
5df83111 | 877 | data_bit_len = 5; |
161e773c RW |
878 | config_reg |= SIRFUART_DATA_BIT_LEN_5; |
879 | break; | |
880 | } | |
5df83111 | 881 | if (termios->c_cflag & CSTOPB) { |
161e773c | 882 | config_reg |= SIRFUART_STOP_BIT_LEN_2; |
5df83111 QL |
883 | stop_bit_len = 2; |
884 | } else | |
885 | stop_bit_len = 1; | |
886 | ||
161e773c | 887 | spin_lock_irqsave(&port->lock, flags); |
5df83111 | 888 | port->read_status_mask = uint_en->sirfsoc_rx_oflow_en; |
161e773c | 889 | port->ignore_status_mask = 0; |
5df83111 QL |
890 | if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { |
891 | if (termios->c_iflag & INPCK) | |
892 | port->read_status_mask |= uint_en->sirfsoc_frm_err_en | | |
893 | uint_en->sirfsoc_parity_err_en; | |
2eb5618d | 894 | } else { |
5df83111 QL |
895 | if (termios->c_iflag & INPCK) |
896 | port->read_status_mask |= uint_en->sirfsoc_frm_err_en; | |
897 | } | |
161e773c | 898 | if (termios->c_iflag & (BRKINT | PARMRK)) |
5df83111 QL |
899 | port->read_status_mask |= uint_en->sirfsoc_rxd_brk_en; |
900 | if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { | |
901 | if (termios->c_iflag & IGNPAR) | |
902 | port->ignore_status_mask |= | |
903 | uint_en->sirfsoc_frm_err_en | | |
904 | uint_en->sirfsoc_parity_err_en; | |
905 | if (termios->c_cflag & PARENB) { | |
906 | if (termios->c_cflag & CMSPAR) { | |
907 | if (termios->c_cflag & PARODD) | |
908 | config_reg |= SIRFUART_STICK_BIT_MARK; | |
909 | else | |
910 | config_reg |= SIRFUART_STICK_BIT_SPACE; | |
911 | } else if (termios->c_cflag & PARODD) { | |
912 | config_reg |= SIRFUART_STICK_BIT_ODD; | |
913 | } else { | |
914 | config_reg |= SIRFUART_STICK_BIT_EVEN; | |
915 | } | |
916 | } | |
2eb5618d | 917 | } else { |
5df83111 QL |
918 | if (termios->c_iflag & IGNPAR) |
919 | port->ignore_status_mask |= | |
920 | uint_en->sirfsoc_frm_err_en; | |
921 | if (termios->c_cflag & PARENB) | |
922 | dev_warn(port->dev, | |
923 | "USP-UART not support parity err\n"); | |
924 | } | |
925 | if (termios->c_iflag & IGNBRK) { | |
161e773c | 926 | port->ignore_status_mask |= |
5df83111 QL |
927 | uint_en->sirfsoc_rxd_brk_en; |
928 | if (termios->c_iflag & IGNPAR) | |
929 | port->ignore_status_mask |= | |
930 | uint_en->sirfsoc_rx_oflow_en; | |
931 | } | |
161e773c RW |
932 | if ((termios->c_cflag & CREAD) == 0) |
933 | port->ignore_status_mask |= SIRFUART_DUMMY_READ; | |
161e773c RW |
934 | /* Hardware Flow Control Settings */ |
935 | if (UART_ENABLE_MS(port, termios->c_cflag)) { | |
936 | if (!sirfport->ms_enabled) | |
937 | sirfsoc_uart_enable_ms(port); | |
938 | } else { | |
939 | if (sirfport->ms_enabled) | |
940 | sirfsoc_uart_disable_ms(port); | |
941 | } | |
5df83111 QL |
942 | baud_rate = uart_get_baud_rate(port, termios, old, 0, 4000000); |
943 | if (ioclk_rate == 150000000) { | |
ac4ce718 BS |
944 | for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++) |
945 | if (baud_rate == baudrate_to_regv[ic].baud_rate) | |
946 | clk_div_reg = baudrate_to_regv[ic].reg_val; | |
947 | } | |
5df83111 QL |
948 | set_baud = baud_rate; |
949 | if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { | |
950 | if (unlikely(clk_div_reg == 0)) | |
951 | clk_div_reg = sirfsoc_uart_calc_sample_div(baud_rate, | |
952 | ioclk_rate, &set_baud); | |
953 | wr_regl(port, ureg->sirfsoc_divisor, clk_div_reg); | |
2eb5618d | 954 | } else { |
5df83111 QL |
955 | clk_div_reg = sirfsoc_usp_calc_sample_div(baud_rate, |
956 | ioclk_rate, &sample_div_reg); | |
957 | sample_div_reg--; | |
958 | set_baud = ((ioclk_rate / (clk_div_reg+1) - 1) / | |
959 | (sample_div_reg + 1)); | |
960 | /* setting usp mode 2 */ | |
459f15c4 QL |
961 | len_val = ((1 << SIRFSOC_USP_MODE2_RXD_DELAY_OFFSET) | |
962 | (1 << SIRFSOC_USP_MODE2_TXD_DELAY_OFFSET)); | |
963 | len_val |= ((clk_div_reg & SIRFSOC_USP_MODE2_CLK_DIVISOR_MASK) | |
964 | << SIRFSOC_USP_MODE2_CLK_DIVISOR_OFFSET); | |
965 | wr_regl(port, ureg->sirfsoc_mode2, len_val); | |
5df83111 | 966 | } |
161e773c | 967 | if (tty_termios_baud_rate(termios)) |
5df83111 QL |
968 | tty_termios_encode_baud_rate(termios, set_baud, set_baud); |
969 | /* set receive timeout && data bits len */ | |
970 | rx_time_out = SIRFSOC_UART_RX_TIMEOUT(set_baud, 20000); | |
971 | rx_time_out = SIRFUART_RECV_TIMEOUT_VALUE(rx_time_out); | |
8316d04c | 972 | txfifo_op_reg = rd_regl(port, ureg->sirfsoc_tx_fifo_op); |
459f15c4 | 973 | wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_STOP); |
5df83111 | 974 | wr_regl(port, ureg->sirfsoc_tx_fifo_op, |
8316d04c | 975 | (txfifo_op_reg & ~SIRFUART_FIFO_START)); |
5df83111 QL |
976 | if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) { |
977 | config_reg |= SIRFUART_RECV_TIMEOUT(port, rx_time_out); | |
978 | wr_regl(port, ureg->sirfsoc_line_ctrl, config_reg); | |
2eb5618d | 979 | } else { |
5df83111 | 980 | /*tx frame ctrl*/ |
459f15c4 QL |
981 | len_val = (data_bit_len - 1) << SIRFSOC_USP_TX_DATA_LEN_OFFSET; |
982 | len_val |= (data_bit_len + 1 + stop_bit_len - 1) << | |
983 | SIRFSOC_USP_TX_FRAME_LEN_OFFSET; | |
984 | len_val |= ((data_bit_len - 1) << | |
985 | SIRFSOC_USP_TX_SHIFTER_LEN_OFFSET); | |
986 | len_val |= (((clk_div_reg & 0xc00) >> 10) << | |
987 | SIRFSOC_USP_TX_CLK_DIVISOR_OFFSET); | |
5df83111 QL |
988 | wr_regl(port, ureg->sirfsoc_tx_frame_ctrl, len_val); |
989 | /*rx frame ctrl*/ | |
459f15c4 QL |
990 | len_val = (data_bit_len - 1) << SIRFSOC_USP_RX_DATA_LEN_OFFSET; |
991 | len_val |= (data_bit_len + 1 + stop_bit_len - 1) << | |
992 | SIRFSOC_USP_RX_FRAME_LEN_OFFSET; | |
993 | len_val |= (data_bit_len - 1) << | |
994 | SIRFSOC_USP_RX_SHIFTER_LEN_OFFSET; | |
995 | len_val |= (((clk_div_reg & 0xf000) >> 12) << | |
996 | SIRFSOC_USP_RX_CLK_DIVISOR_OFFSET); | |
5df83111 QL |
997 | wr_regl(port, ureg->sirfsoc_rx_frame_ctrl, len_val); |
998 | /*async param*/ | |
999 | wr_regl(port, ureg->sirfsoc_async_param_reg, | |
1000 | (SIRFUART_RECV_TIMEOUT(port, rx_time_out)) | | |
459f15c4 QL |
1001 | (sample_div_reg & SIRFSOC_USP_ASYNC_DIV2_MASK) << |
1002 | SIRFSOC_USP_ASYNC_DIV2_OFFSET); | |
5df83111 | 1003 | } |
8316d04c QL |
1004 | if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) |
1005 | wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_DMA_MODE); | |
1006 | else | |
1007 | wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_IO_MODE); | |
1008 | if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) | |
1009 | wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_DMA_MODE); | |
1010 | else | |
1011 | wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_IO_MODE); | |
161e773c | 1012 | /* Reset Rx/Tx FIFO Threshold level for proper baudrate */ |
5df83111 | 1013 | if (set_baud < 1000000) |
161e773c RW |
1014 | threshold_div = 1; |
1015 | else | |
1016 | threshold_div = 2; | |
8316d04c QL |
1017 | wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, |
1018 | SIRFUART_FIFO_THD(port) / threshold_div); | |
1019 | wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, | |
1020 | SIRFUART_FIFO_THD(port) / threshold_div); | |
1021 | txfifo_op_reg |= SIRFUART_FIFO_START; | |
1022 | wr_regl(port, ureg->sirfsoc_tx_fifo_op, txfifo_op_reg); | |
5df83111 | 1023 | uart_update_timeout(port, termios->c_cflag, set_baud); |
161e773c | 1024 | sirfsoc_uart_start_rx(port); |
5df83111 | 1025 | wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_TX_EN | SIRFUART_RX_EN); |
161e773c RW |
1026 | spin_unlock_irqrestore(&port->lock, flags); |
1027 | } | |
1028 | ||
8316d04c QL |
1029 | static unsigned int sirfsoc_uart_init_tx_dma(struct uart_port *port) |
1030 | { | |
1031 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); | |
1032 | dma_cap_mask_t dma_mask; | |
1033 | struct dma_slave_config tx_slv_cfg = { | |
1034 | .dst_maxburst = 2, | |
1035 | }; | |
1036 | ||
1037 | dma_cap_zero(dma_mask); | |
1038 | dma_cap_set(DMA_SLAVE, dma_mask); | |
1039 | sirfport->tx_dma_chan = dma_request_channel(dma_mask, | |
1040 | (dma_filter_fn)sirfsoc_dma_filter_id, | |
1041 | (void *)sirfport->tx_dma_no); | |
1042 | if (!sirfport->tx_dma_chan) { | |
1043 | dev_err(port->dev, "Uart Request Dma Channel Fail %d\n", | |
1044 | sirfport->tx_dma_no); | |
1045 | return -EPROBE_DEFER; | |
1046 | } | |
1047 | dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg); | |
1048 | ||
1049 | return 0; | |
1050 | } | |
1051 | ||
1052 | static unsigned int sirfsoc_uart_init_rx_dma(struct uart_port *port) | |
1053 | { | |
1054 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); | |
1055 | dma_cap_mask_t dma_mask; | |
1056 | int ret; | |
1057 | int i, j; | |
1058 | struct dma_slave_config slv_cfg = { | |
1059 | .src_maxburst = 2, | |
1060 | }; | |
1061 | ||
1062 | dma_cap_zero(dma_mask); | |
1063 | dma_cap_set(DMA_SLAVE, dma_mask); | |
1064 | sirfport->rx_dma_chan = dma_request_channel(dma_mask, | |
1065 | (dma_filter_fn)sirfsoc_dma_filter_id, | |
1066 | (void *)sirfport->rx_dma_no); | |
1067 | if (!sirfport->rx_dma_chan) { | |
1068 | dev_err(port->dev, "Uart Request Dma Channel Fail %d\n", | |
1069 | sirfport->rx_dma_no); | |
1070 | ret = -EPROBE_DEFER; | |
1071 | goto request_err; | |
1072 | } | |
1073 | for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++) { | |
1074 | sirfport->rx_dma_items[i].xmit.buf = | |
1075 | dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, | |
1076 | &sirfport->rx_dma_items[i].dma_addr, GFP_KERNEL); | |
1077 | if (!sirfport->rx_dma_items[i].xmit.buf) { | |
1078 | dev_err(port->dev, "Uart alloc bufa failed\n"); | |
1079 | ret = -ENOMEM; | |
1080 | goto alloc_coherent_err; | |
1081 | } | |
1082 | sirfport->rx_dma_items[i].xmit.head = | |
1083 | sirfport->rx_dma_items[i].xmit.tail = 0; | |
1084 | } | |
1085 | dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg); | |
1086 | ||
1087 | return 0; | |
1088 | alloc_coherent_err: | |
1089 | for (j = 0; j < i; j++) | |
1090 | dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, | |
1091 | sirfport->rx_dma_items[j].xmit.buf, | |
1092 | sirfport->rx_dma_items[j].dma_addr); | |
1093 | dma_release_channel(sirfport->rx_dma_chan); | |
1094 | request_err: | |
1095 | return ret; | |
1096 | } | |
1097 | ||
1098 | static void sirfsoc_uart_uninit_tx_dma(struct sirfsoc_uart_port *sirfport) | |
1099 | { | |
1100 | dmaengine_terminate_all(sirfport->tx_dma_chan); | |
1101 | dma_release_channel(sirfport->tx_dma_chan); | |
1102 | } | |
1103 | ||
1104 | static void sirfsoc_uart_uninit_rx_dma(struct sirfsoc_uart_port *sirfport) | |
1105 | { | |
1106 | int i; | |
1107 | struct uart_port *port = &sirfport->port; | |
1108 | dmaengine_terminate_all(sirfport->rx_dma_chan); | |
1109 | dma_release_channel(sirfport->rx_dma_chan); | |
1110 | for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++) | |
1111 | dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE, | |
1112 | sirfport->rx_dma_items[i].xmit.buf, | |
1113 | sirfport->rx_dma_items[i].dma_addr); | |
1114 | } | |
1115 | ||
161e773c RW |
1116 | static int sirfsoc_uart_startup(struct uart_port *port) |
1117 | { | |
1118 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); | |
15cdcb12 | 1119 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; |
161e773c RW |
1120 | unsigned int index = port->line; |
1121 | int ret; | |
1122 | set_irq_flags(port->irq, IRQF_VALID | IRQF_NOAUTOEN); | |
1123 | ret = request_irq(port->irq, | |
1124 | sirfsoc_uart_isr, | |
1125 | 0, | |
1126 | SIRFUART_PORT_NAME, | |
1127 | sirfport); | |
1128 | if (ret != 0) { | |
1129 | dev_err(port->dev, "UART%d request IRQ line (%d) failed.\n", | |
1130 | index, port->irq); | |
1131 | goto irq_err; | |
1132 | } | |
15cdcb12 QL |
1133 | |
1134 | /* initial hardware settings */ | |
1135 | wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, | |
1136 | rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl) | | |
1137 | SIRFUART_IO_MODE); | |
1138 | wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, | |
1139 | rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) | | |
1140 | SIRFUART_IO_MODE); | |
1141 | wr_regl(port, ureg->sirfsoc_tx_dma_io_len, 0); | |
1142 | wr_regl(port, ureg->sirfsoc_rx_dma_io_len, 0); | |
1143 | wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_RX_EN | SIRFUART_TX_EN); | |
1144 | if (sirfport->uart_reg->uart_type == SIRF_USP_UART) | |
1145 | wr_regl(port, ureg->sirfsoc_mode1, | |
1146 | SIRFSOC_USP_ENDIAN_CTRL_LSBF | | |
1147 | SIRFSOC_USP_EN); | |
1148 | wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_RESET); | |
1149 | wr_regl(port, ureg->sirfsoc_tx_fifo_op, 0); | |
1150 | wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET); | |
1151 | wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0); | |
1152 | wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, SIRFUART_FIFO_THD(port)); | |
1153 | wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port)); | |
2eb5618d | 1154 | |
8316d04c QL |
1155 | if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) { |
1156 | ret = sirfsoc_uart_init_rx_dma(port); | |
1157 | if (ret) | |
1158 | goto init_rx_err; | |
1159 | wr_regl(port, ureg->sirfsoc_rx_fifo_level_chk, | |
1160 | SIRFUART_RX_FIFO_CHK_SC(port->line, 0x4) | | |
1161 | SIRFUART_RX_FIFO_CHK_LC(port->line, 0xe) | | |
1162 | SIRFUART_RX_FIFO_CHK_HC(port->line, 0x1b)); | |
1163 | } | |
1164 | if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) { | |
1165 | sirfsoc_uart_init_tx_dma(port); | |
1166 | sirfport->tx_dma_state = TX_DMA_IDLE; | |
1167 | wr_regl(port, ureg->sirfsoc_tx_fifo_level_chk, | |
1168 | SIRFUART_TX_FIFO_CHK_SC(port->line, 0x1b) | | |
1169 | SIRFUART_TX_FIFO_CHK_LC(port->line, 0xe) | | |
1170 | SIRFUART_TX_FIFO_CHK_HC(port->line, 0x4)); | |
1171 | } | |
2eb5618d QL |
1172 | sirfport->ms_enabled = false; |
1173 | if (sirfport->uart_reg->uart_type == SIRF_USP_UART && | |
1174 | sirfport->hw_flow_ctrl) { | |
1175 | set_irq_flags(gpio_to_irq(sirfport->cts_gpio), | |
1176 | IRQF_VALID | IRQF_NOAUTOEN); | |
1177 | ret = request_irq(gpio_to_irq(sirfport->cts_gpio), | |
1178 | sirfsoc_uart_usp_cts_handler, IRQF_TRIGGER_FALLING | | |
1179 | IRQF_TRIGGER_RISING, "usp_cts_irq", sirfport); | |
1180 | if (ret != 0) { | |
1181 | dev_err(port->dev, "UART-USP:request gpio irq fail\n"); | |
1182 | goto init_rx_err; | |
1183 | } | |
1184 | } | |
1185 | ||
161e773c | 1186 | enable_irq(port->irq); |
2eb5618d | 1187 | |
15cdcb12 | 1188 | return 0; |
2eb5618d QL |
1189 | init_rx_err: |
1190 | free_irq(port->irq, sirfport); | |
161e773c RW |
1191 | irq_err: |
1192 | return ret; | |
1193 | } | |
1194 | ||
1195 | static void sirfsoc_uart_shutdown(struct uart_port *port) | |
1196 | { | |
1197 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); | |
5df83111 | 1198 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; |
909102db | 1199 | if (!sirfport->is_marco) |
5df83111 | 1200 | wr_regl(port, ureg->sirfsoc_int_en_reg, 0); |
909102db BS |
1201 | else |
1202 | wr_regl(port, SIRFUART_INT_EN_CLR, ~0UL); | |
1203 | ||
161e773c | 1204 | free_irq(port->irq, sirfport); |
2eb5618d | 1205 | if (sirfport->ms_enabled) |
161e773c | 1206 | sirfsoc_uart_disable_ms(port); |
2eb5618d QL |
1207 | if (sirfport->uart_reg->uart_type == SIRF_USP_UART && |
1208 | sirfport->hw_flow_ctrl) { | |
1209 | gpio_set_value(sirfport->rts_gpio, 1); | |
1210 | free_irq(gpio_to_irq(sirfport->cts_gpio), sirfport); | |
161e773c | 1211 | } |
8316d04c QL |
1212 | if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) |
1213 | sirfsoc_uart_uninit_rx_dma(sirfport); | |
1214 | if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) { | |
1215 | sirfsoc_uart_uninit_tx_dma(sirfport); | |
1216 | sirfport->tx_dma_state = TX_DMA_IDLE; | |
1217 | } | |
161e773c RW |
1218 | } |
1219 | ||
1220 | static const char *sirfsoc_uart_type(struct uart_port *port) | |
1221 | { | |
1222 | return port->type == SIRFSOC_PORT_TYPE ? SIRFUART_PORT_NAME : NULL; | |
1223 | } | |
1224 | ||
1225 | static int sirfsoc_uart_request_port(struct uart_port *port) | |
1226 | { | |
5df83111 QL |
1227 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); |
1228 | struct sirfsoc_uart_param *uart_param = &sirfport->uart_reg->uart_param; | |
161e773c RW |
1229 | void *ret; |
1230 | ret = request_mem_region(port->mapbase, | |
5df83111 | 1231 | SIRFUART_MAP_SIZE, uart_param->port_name); |
161e773c RW |
1232 | return ret ? 0 : -EBUSY; |
1233 | } | |
1234 | ||
1235 | static void sirfsoc_uart_release_port(struct uart_port *port) | |
1236 | { | |
1237 | release_mem_region(port->mapbase, SIRFUART_MAP_SIZE); | |
1238 | } | |
1239 | ||
1240 | static void sirfsoc_uart_config_port(struct uart_port *port, int flags) | |
1241 | { | |
1242 | if (flags & UART_CONFIG_TYPE) { | |
1243 | port->type = SIRFSOC_PORT_TYPE; | |
1244 | sirfsoc_uart_request_port(port); | |
1245 | } | |
1246 | } | |
1247 | ||
1248 | static struct uart_ops sirfsoc_uart_ops = { | |
1249 | .tx_empty = sirfsoc_uart_tx_empty, | |
1250 | .get_mctrl = sirfsoc_uart_get_mctrl, | |
1251 | .set_mctrl = sirfsoc_uart_set_mctrl, | |
1252 | .stop_tx = sirfsoc_uart_stop_tx, | |
1253 | .start_tx = sirfsoc_uart_start_tx, | |
1254 | .stop_rx = sirfsoc_uart_stop_rx, | |
1255 | .enable_ms = sirfsoc_uart_enable_ms, | |
1256 | .break_ctl = sirfsoc_uart_break_ctl, | |
1257 | .startup = sirfsoc_uart_startup, | |
1258 | .shutdown = sirfsoc_uart_shutdown, | |
1259 | .set_termios = sirfsoc_uart_set_termios, | |
1260 | .type = sirfsoc_uart_type, | |
1261 | .release_port = sirfsoc_uart_release_port, | |
1262 | .request_port = sirfsoc_uart_request_port, | |
1263 | .config_port = sirfsoc_uart_config_port, | |
1264 | }; | |
1265 | ||
1266 | #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE | |
5df83111 QL |
1267 | static int __init |
1268 | sirfsoc_uart_console_setup(struct console *co, char *options) | |
161e773c RW |
1269 | { |
1270 | unsigned int baud = 115200; | |
1271 | unsigned int bits = 8; | |
1272 | unsigned int parity = 'n'; | |
1273 | unsigned int flow = 'n'; | |
1274 | struct uart_port *port = &sirfsoc_uart_ports[co->index].port; | |
5df83111 QL |
1275 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); |
1276 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; | |
161e773c RW |
1277 | if (co->index < 0 || co->index >= SIRFSOC_UART_NR) |
1278 | return -EINVAL; | |
1279 | ||
1280 | if (!port->mapbase) | |
1281 | return -ENODEV; | |
1282 | ||
5df83111 QL |
1283 | /* enable usp in mode1 register */ |
1284 | if (sirfport->uart_reg->uart_type == SIRF_USP_UART) | |
1285 | wr_regl(port, ureg->sirfsoc_mode1, SIRFSOC_USP_EN | | |
1286 | SIRFSOC_USP_ENDIAN_CTRL_LSBF); | |
161e773c RW |
1287 | if (options) |
1288 | uart_parse_options(options, &baud, &parity, &bits, &flow); | |
1289 | port->cons = co; | |
5df83111 | 1290 | |
8316d04c QL |
1291 | /* default console tx/rx transfer using io mode */ |
1292 | sirfport->rx_dma_no = UNVALID_DMA_CHAN; | |
1293 | sirfport->tx_dma_no = UNVALID_DMA_CHAN; | |
161e773c RW |
1294 | return uart_set_options(port, co, baud, parity, bits, flow); |
1295 | } | |
1296 | ||
1297 | static void sirfsoc_uart_console_putchar(struct uart_port *port, int ch) | |
1298 | { | |
5df83111 QL |
1299 | struct sirfsoc_uart_port *sirfport = to_sirfport(port); |
1300 | struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg; | |
1301 | struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status; | |
161e773c | 1302 | while (rd_regl(port, |
5df83111 | 1303 | ureg->sirfsoc_tx_fifo_status) & ufifo_st->ff_full(port->line)) |
161e773c | 1304 | cpu_relax(); |
5df83111 | 1305 | wr_regb(port, ureg->sirfsoc_tx_fifo_data, ch); |
161e773c RW |
1306 | } |
1307 | ||
1308 | static void sirfsoc_uart_console_write(struct console *co, const char *s, | |
1309 | unsigned int count) | |
1310 | { | |
1311 | struct uart_port *port = &sirfsoc_uart_ports[co->index].port; | |
1312 | uart_console_write(port, s, count, sirfsoc_uart_console_putchar); | |
1313 | } | |
1314 | ||
1315 | static struct console sirfsoc_uart_console = { | |
1316 | .name = SIRFSOC_UART_NAME, | |
1317 | .device = uart_console_device, | |
1318 | .flags = CON_PRINTBUFFER, | |
1319 | .index = -1, | |
1320 | .write = sirfsoc_uart_console_write, | |
1321 | .setup = sirfsoc_uart_console_setup, | |
1322 | .data = &sirfsoc_uart_drv, | |
1323 | }; | |
1324 | ||
1325 | static int __init sirfsoc_uart_console_init(void) | |
1326 | { | |
1327 | register_console(&sirfsoc_uart_console); | |
1328 | return 0; | |
1329 | } | |
1330 | console_initcall(sirfsoc_uart_console_init); | |
1331 | #endif | |
1332 | ||
1333 | static struct uart_driver sirfsoc_uart_drv = { | |
1334 | .owner = THIS_MODULE, | |
1335 | .driver_name = SIRFUART_PORT_NAME, | |
1336 | .nr = SIRFSOC_UART_NR, | |
1337 | .dev_name = SIRFSOC_UART_NAME, | |
1338 | .major = SIRFSOC_UART_MAJOR, | |
1339 | .minor = SIRFSOC_UART_MINOR, | |
1340 | #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE | |
1341 | .cons = &sirfsoc_uart_console, | |
1342 | #else | |
1343 | .cons = NULL, | |
1344 | #endif | |
1345 | }; | |
1346 | ||
5df83111 QL |
1347 | static struct of_device_id sirfsoc_uart_ids[] = { |
1348 | { .compatible = "sirf,prima2-uart", .data = &sirfsoc_uart,}, | |
1349 | { .compatible = "sirf,marco-uart", .data = &sirfsoc_uart}, | |
1350 | { .compatible = "sirf,prima2-usp-uart", .data = &sirfsoc_usp}, | |
1351 | {} | |
1352 | }; | |
1353 | MODULE_DEVICE_TABLE(of, sirfsoc_uart_ids); | |
1354 | ||
ada1f443 | 1355 | static int sirfsoc_uart_probe(struct platform_device *pdev) |
161e773c RW |
1356 | { |
1357 | struct sirfsoc_uart_port *sirfport; | |
1358 | struct uart_port *port; | |
1359 | struct resource *res; | |
1360 | int ret; | |
5df83111 | 1361 | const struct of_device_id *match; |
161e773c | 1362 | |
5df83111 | 1363 | match = of_match_node(sirfsoc_uart_ids, pdev->dev.of_node); |
161e773c RW |
1364 | if (of_property_read_u32(pdev->dev.of_node, "cell-index", &pdev->id)) { |
1365 | dev_err(&pdev->dev, | |
1366 | "Unable to find cell-index in uart node.\n"); | |
1367 | ret = -EFAULT; | |
1368 | goto err; | |
1369 | } | |
5df83111 QL |
1370 | if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart")) |
1371 | pdev->id += ((struct sirfsoc_uart_register *) | |
1372 | match->data)->uart_param.register_uart_nr; | |
161e773c RW |
1373 | sirfport = &sirfsoc_uart_ports[pdev->id]; |
1374 | port = &sirfport->port; | |
1375 | port->dev = &pdev->dev; | |
1376 | port->private_data = sirfport; | |
5df83111 | 1377 | sirfport->uart_reg = (struct sirfsoc_uart_register *)match->data; |
161e773c | 1378 | |
2eb5618d QL |
1379 | sirfport->hw_flow_ctrl = of_property_read_bool(pdev->dev.of_node, |
1380 | "sirf,uart-has-rtscts"); | |
8316d04c | 1381 | if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-uart")) { |
5df83111 | 1382 | sirfport->uart_reg->uart_type = SIRF_REAL_UART; |
8316d04c QL |
1383 | if (of_property_read_u32(pdev->dev.of_node, |
1384 | "sirf,uart-dma-rx-channel", | |
1385 | &sirfport->rx_dma_no)) | |
1386 | sirfport->rx_dma_no = UNVALID_DMA_CHAN; | |
1387 | if (of_property_read_u32(pdev->dev.of_node, | |
1388 | "sirf,uart-dma-tx-channel", | |
1389 | &sirfport->tx_dma_no)) | |
1390 | sirfport->tx_dma_no = UNVALID_DMA_CHAN; | |
1391 | } | |
2eb5618d | 1392 | if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart")) { |
5df83111 | 1393 | sirfport->uart_reg->uart_type = SIRF_USP_UART; |
8316d04c QL |
1394 | if (of_property_read_u32(pdev->dev.of_node, |
1395 | "sirf,usp-dma-rx-channel", | |
1396 | &sirfport->rx_dma_no)) | |
1397 | sirfport->rx_dma_no = UNVALID_DMA_CHAN; | |
1398 | if (of_property_read_u32(pdev->dev.of_node, | |
1399 | "sirf,usp-dma-tx-channel", | |
1400 | &sirfport->tx_dma_no)) | |
1401 | sirfport->tx_dma_no = UNVALID_DMA_CHAN; | |
2eb5618d QL |
1402 | if (!sirfport->hw_flow_ctrl) |
1403 | goto usp_no_flow_control; | |
1404 | if (of_find_property(pdev->dev.of_node, "cts-gpios", NULL)) | |
1405 | sirfport->cts_gpio = of_get_named_gpio( | |
1406 | pdev->dev.of_node, "cts-gpios", 0); | |
1407 | else | |
1408 | sirfport->cts_gpio = -1; | |
1409 | if (of_find_property(pdev->dev.of_node, "rts-gpios", NULL)) | |
1410 | sirfport->rts_gpio = of_get_named_gpio( | |
1411 | pdev->dev.of_node, "rts-gpios", 0); | |
1412 | else | |
1413 | sirfport->rts_gpio = -1; | |
1414 | ||
1415 | if ((!gpio_is_valid(sirfport->cts_gpio) || | |
1416 | !gpio_is_valid(sirfport->rts_gpio))) { | |
1417 | ret = -EINVAL; | |
1418 | dev_err(&pdev->dev, | |
67bc306c | 1419 | "Usp flow control must have cts and rts gpio"); |
2eb5618d QL |
1420 | goto err; |
1421 | } | |
1422 | ret = devm_gpio_request(&pdev->dev, sirfport->cts_gpio, | |
67bc306c | 1423 | "usp-cts-gpio"); |
2eb5618d | 1424 | if (ret) { |
67bc306c | 1425 | dev_err(&pdev->dev, "Unable request cts gpio"); |
2eb5618d QL |
1426 | goto err; |
1427 | } | |
1428 | gpio_direction_input(sirfport->cts_gpio); | |
1429 | ret = devm_gpio_request(&pdev->dev, sirfport->rts_gpio, | |
67bc306c | 1430 | "usp-rts-gpio"); |
2eb5618d | 1431 | if (ret) { |
67bc306c | 1432 | dev_err(&pdev->dev, "Unable request rts gpio"); |
2eb5618d QL |
1433 | goto err; |
1434 | } | |
1435 | gpio_direction_output(sirfport->rts_gpio, 1); | |
1436 | } | |
1437 | usp_no_flow_control: | |
909102db BS |
1438 | if (of_device_is_compatible(pdev->dev.of_node, "sirf,marco-uart")) |
1439 | sirfport->is_marco = true; | |
1440 | ||
161e773c RW |
1441 | if (of_property_read_u32(pdev->dev.of_node, |
1442 | "fifosize", | |
1443 | &port->fifosize)) { | |
1444 | dev_err(&pdev->dev, | |
1445 | "Unable to find fifosize in uart node.\n"); | |
1446 | ret = -EFAULT; | |
1447 | goto err; | |
1448 | } | |
1449 | ||
1450 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1451 | if (res == NULL) { | |
1452 | dev_err(&pdev->dev, "Insufficient resources.\n"); | |
1453 | ret = -EFAULT; | |
1454 | goto err; | |
1455 | } | |
8316d04c QL |
1456 | spin_lock_init(&sirfport->rx_lock); |
1457 | spin_lock_init(&sirfport->tx_lock); | |
1458 | tasklet_init(&sirfport->rx_dma_complete_tasklet, | |
1459 | sirfsoc_uart_rx_dma_complete_tl, (unsigned long)sirfport); | |
1460 | tasklet_init(&sirfport->rx_tmo_process_tasklet, | |
1461 | sirfsoc_rx_tmo_process_tl, (unsigned long)sirfport); | |
161e773c RW |
1462 | port->mapbase = res->start; |
1463 | port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res)); | |
1464 | if (!port->membase) { | |
1465 | dev_err(&pdev->dev, "Cannot remap resource.\n"); | |
1466 | ret = -ENOMEM; | |
1467 | goto err; | |
1468 | } | |
1469 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | |
1470 | if (res == NULL) { | |
1471 | dev_err(&pdev->dev, "Insufficient resources.\n"); | |
1472 | ret = -EFAULT; | |
9250dd57 | 1473 | goto err; |
161e773c RW |
1474 | } |
1475 | port->irq = res->start; | |
1476 | ||
ac4ce718 BS |
1477 | sirfport->clk = clk_get(&pdev->dev, NULL); |
1478 | if (IS_ERR(sirfport->clk)) { | |
1479 | ret = PTR_ERR(sirfport->clk); | |
a343756e | 1480 | goto err; |
ac4ce718 BS |
1481 | } |
1482 | clk_prepare_enable(sirfport->clk); | |
1483 | port->uartclk = clk_get_rate(sirfport->clk); | |
1484 | ||
161e773c RW |
1485 | port->ops = &sirfsoc_uart_ops; |
1486 | spin_lock_init(&port->lock); | |
1487 | ||
1488 | platform_set_drvdata(pdev, sirfport); | |
1489 | ret = uart_add_one_port(&sirfsoc_uart_drv, port); | |
1490 | if (ret != 0) { | |
1491 | dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id); | |
1492 | goto port_err; | |
1493 | } | |
1494 | ||
1495 | return 0; | |
1496 | ||
1497 | port_err: | |
ac4ce718 BS |
1498 | clk_disable_unprepare(sirfport->clk); |
1499 | clk_put(sirfport->clk); | |
161e773c RW |
1500 | err: |
1501 | return ret; | |
1502 | } | |
1503 | ||
1504 | static int sirfsoc_uart_remove(struct platform_device *pdev) | |
1505 | { | |
1506 | struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev); | |
1507 | struct uart_port *port = &sirfport->port; | |
ac4ce718 BS |
1508 | clk_disable_unprepare(sirfport->clk); |
1509 | clk_put(sirfport->clk); | |
161e773c RW |
1510 | uart_remove_one_port(&sirfsoc_uart_drv, port); |
1511 | return 0; | |
1512 | } | |
1513 | ||
1514 | static int | |
1515 | sirfsoc_uart_suspend(struct platform_device *pdev, pm_message_t state) | |
1516 | { | |
1517 | struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev); | |
1518 | struct uart_port *port = &sirfport->port; | |
1519 | uart_suspend_port(&sirfsoc_uart_drv, port); | |
1520 | return 0; | |
1521 | } | |
1522 | ||
1523 | static int sirfsoc_uart_resume(struct platform_device *pdev) | |
1524 | { | |
1525 | struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev); | |
1526 | struct uart_port *port = &sirfport->port; | |
1527 | uart_resume_port(&sirfsoc_uart_drv, port); | |
1528 | return 0; | |
1529 | } | |
1530 | ||
161e773c RW |
1531 | static struct platform_driver sirfsoc_uart_driver = { |
1532 | .probe = sirfsoc_uart_probe, | |
2d47b716 | 1533 | .remove = sirfsoc_uart_remove, |
161e773c RW |
1534 | .suspend = sirfsoc_uart_suspend, |
1535 | .resume = sirfsoc_uart_resume, | |
1536 | .driver = { | |
1537 | .name = SIRFUART_PORT_NAME, | |
1538 | .owner = THIS_MODULE, | |
1539 | .of_match_table = sirfsoc_uart_ids, | |
1540 | }, | |
1541 | }; | |
1542 | ||
1543 | static int __init sirfsoc_uart_init(void) | |
1544 | { | |
1545 | int ret = 0; | |
1546 | ||
1547 | ret = uart_register_driver(&sirfsoc_uart_drv); | |
1548 | if (ret) | |
1549 | goto out; | |
1550 | ||
1551 | ret = platform_driver_register(&sirfsoc_uart_driver); | |
1552 | if (ret) | |
1553 | uart_unregister_driver(&sirfsoc_uart_drv); | |
1554 | out: | |
1555 | return ret; | |
1556 | } | |
1557 | module_init(sirfsoc_uart_init); | |
1558 | ||
1559 | static void __exit sirfsoc_uart_exit(void) | |
1560 | { | |
1561 | platform_driver_unregister(&sirfsoc_uart_driver); | |
1562 | uart_unregister_driver(&sirfsoc_uart_drv); | |
1563 | } | |
1564 | module_exit(sirfsoc_uart_exit); | |
1565 | ||
1566 | MODULE_LICENSE("GPL v2"); | |
1567 | MODULE_AUTHOR("Bin Shi <Bin.Shi@csr.com>, Rong Wang<Rong.Wang@csr.com>"); | |
1568 | MODULE_DESCRIPTION("CSR SiRFprimaII Uart Driver"); |