2 * Driver for AMBA serial ports
4 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
6 * Copyright 1999 ARM Limited
7 * Copyright (C) 2000 Deep Blue Solutions Ltd.
8 * Copyright (C) 2010 ST-Ericsson SA
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 * This is a generic driver for ARM AMBA-type serial ports. They
25 * have a lot of 16550-like features, but are not register compatible.
26 * Note that although they do have CTS, DCD and DSR inputs, they do
27 * not have an RI input, nor do they have DTR or RTS outputs. If
28 * required, these have to be supplied via some other means (eg, GPIO)
29 * and hooked into this driver.
33 #if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
37 #include <linux/module.h>
38 #include <linux/ioport.h>
39 #include <linux/init.h>
40 #include <linux/console.h>
41 #include <linux/sysrq.h>
42 #include <linux/device.h>
43 #include <linux/tty.h>
44 #include <linux/tty_flip.h>
45 #include <linux/serial_core.h>
46 #include <linux/serial.h>
47 #include <linux/amba/bus.h>
48 #include <linux/amba/serial.h>
49 #include <linux/clk.h>
50 #include <linux/slab.h>
51 #include <linux/dmaengine.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/scatterlist.h>
54 #include <linux/delay.h>
55 #include <linux/types.h>
57 #include <linux/of_device.h>
58 #include <linux/pinctrl/consumer.h>
59 #include <linux/sizes.h>
61 #include <linux/acpi.h>
63 #include "amba-pl011.h"
67 #define SERIAL_AMBA_MAJOR 204
68 #define SERIAL_AMBA_MINOR 64
69 #define SERIAL_AMBA_NR UART_NR
71 #define AMBA_ISR_PASS_LIMIT 256
73 #define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
74 #define UART_DUMMY_DR_RX (1 << 16)
76 static u16 pl011_std_offsets[REG_ARRAY_SIZE] = {
77 [REG_DR] = UART01x_DR,
78 [REG_FR] = UART01x_FR,
79 [REG_LCRH_RX] = UART011_LCRH,
80 [REG_LCRH_TX] = UART011_LCRH,
81 [REG_IBRD] = UART011_IBRD,
82 [REG_FBRD] = UART011_FBRD,
83 [REG_CR] = UART011_CR,
84 [REG_IFLS] = UART011_IFLS,
85 [REG_IMSC] = UART011_IMSC,
86 [REG_RIS] = UART011_RIS,
87 [REG_MIS] = UART011_MIS,
88 [REG_ICR] = UART011_ICR,
89 [REG_DMACR] = UART011_DMACR,
92 /* There is by now at least one vendor with differing details, so handle it */
94 const u16 *reg_offset;
104 bool cts_event_workaround;
108 unsigned int (*get_fifosize)(struct amba_device *dev);
111 static unsigned int get_fifosize_arm(struct amba_device *dev)
113 return amba_rev(dev) < 3 ? 16 : 32;
116 static struct vendor_data vendor_arm = {
117 .reg_offset = pl011_std_offsets,
118 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
119 .fr_busy = UART01x_FR_BUSY,
120 .fr_dsr = UART01x_FR_DSR,
121 .fr_cts = UART01x_FR_CTS,
122 .fr_ri = UART011_FR_RI,
123 .oversampling = false,
124 .dma_threshold = false,
125 .cts_event_workaround = false,
126 .always_enabled = false,
127 .fixed_options = false,
128 .get_fifosize = get_fifosize_arm,
131 static struct vendor_data vendor_sbsa = {
132 .reg_offset = pl011_std_offsets,
133 .fr_busy = UART01x_FR_BUSY,
134 .fr_dsr = UART01x_FR_DSR,
135 .fr_cts = UART01x_FR_CTS,
136 .fr_ri = UART011_FR_RI,
138 .oversampling = false,
139 .dma_threshold = false,
140 .cts_event_workaround = false,
141 .always_enabled = true,
142 .fixed_options = true,
146 * Erratum 44 for QDF2432v1 and QDF2400v1 SoCs describes the BUSY bit as
147 * occasionally getting stuck as 1. To avoid the potential for a hang, check
148 * TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART
149 * implementations, so only do so if an affected platform is detected in
152 static bool qdf2400_e44_present = false;
154 static struct vendor_data vendor_qdt_qdf2400_e44 = {
155 .reg_offset = pl011_std_offsets,
156 .fr_busy = UART011_FR_TXFE,
157 .fr_dsr = UART01x_FR_DSR,
158 .fr_cts = UART01x_FR_CTS,
159 .fr_ri = UART011_FR_RI,
160 .inv_fr = UART011_FR_TXFE,
162 .oversampling = false,
163 .dma_threshold = false,
164 .cts_event_workaround = false,
165 .always_enabled = true,
166 .fixed_options = true,
169 static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
170 [REG_DR] = UART01x_DR,
171 [REG_ST_DMAWM] = ST_UART011_DMAWM,
172 [REG_ST_TIMEOUT] = ST_UART011_TIMEOUT,
173 [REG_FR] = UART01x_FR,
174 [REG_LCRH_RX] = ST_UART011_LCRH_RX,
175 [REG_LCRH_TX] = ST_UART011_LCRH_TX,
176 [REG_IBRD] = UART011_IBRD,
177 [REG_FBRD] = UART011_FBRD,
178 [REG_CR] = UART011_CR,
179 [REG_IFLS] = UART011_IFLS,
180 [REG_IMSC] = UART011_IMSC,
181 [REG_RIS] = UART011_RIS,
182 [REG_MIS] = UART011_MIS,
183 [REG_ICR] = UART011_ICR,
184 [REG_DMACR] = UART011_DMACR,
185 [REG_ST_XFCR] = ST_UART011_XFCR,
186 [REG_ST_XON1] = ST_UART011_XON1,
187 [REG_ST_XON2] = ST_UART011_XON2,
188 [REG_ST_XOFF1] = ST_UART011_XOFF1,
189 [REG_ST_XOFF2] = ST_UART011_XOFF2,
190 [REG_ST_ITCR] = ST_UART011_ITCR,
191 [REG_ST_ITIP] = ST_UART011_ITIP,
192 [REG_ST_ABCR] = ST_UART011_ABCR,
193 [REG_ST_ABIMSC] = ST_UART011_ABIMSC,
196 static unsigned int get_fifosize_st(struct amba_device *dev)
201 static struct vendor_data vendor_st = {
202 .reg_offset = pl011_st_offsets,
203 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
204 .fr_busy = UART01x_FR_BUSY,
205 .fr_dsr = UART01x_FR_DSR,
206 .fr_cts = UART01x_FR_CTS,
207 .fr_ri = UART011_FR_RI,
208 .oversampling = true,
209 .dma_threshold = true,
210 .cts_event_workaround = true,
211 .always_enabled = false,
212 .fixed_options = false,
213 .get_fifosize = get_fifosize_st,
216 static const u16 pl011_zte_offsets[REG_ARRAY_SIZE] = {
217 [REG_DR] = ZX_UART011_DR,
218 [REG_FR] = ZX_UART011_FR,
219 [REG_LCRH_RX] = ZX_UART011_LCRH,
220 [REG_LCRH_TX] = ZX_UART011_LCRH,
221 [REG_IBRD] = ZX_UART011_IBRD,
222 [REG_FBRD] = ZX_UART011_FBRD,
223 [REG_CR] = ZX_UART011_CR,
224 [REG_IFLS] = ZX_UART011_IFLS,
225 [REG_IMSC] = ZX_UART011_IMSC,
226 [REG_RIS] = ZX_UART011_RIS,
227 [REG_MIS] = ZX_UART011_MIS,
228 [REG_ICR] = ZX_UART011_ICR,
229 [REG_DMACR] = ZX_UART011_DMACR,
232 static unsigned int get_fifosize_zte(struct amba_device *dev)
237 static struct vendor_data vendor_zte = {
238 .reg_offset = pl011_zte_offsets,
240 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
241 .fr_busy = ZX_UART01x_FR_BUSY,
242 .fr_dsr = ZX_UART01x_FR_DSR,
243 .fr_cts = ZX_UART01x_FR_CTS,
244 .fr_ri = ZX_UART011_FR_RI,
245 .get_fifosize = get_fifosize_zte,
248 /* Deals with DMA transactions */
251 struct scatterlist sg;
255 struct pl011_dmarx_data {
256 struct dma_chan *chan;
257 struct completion complete;
259 struct pl011_sgbuf sgbuf_a;
260 struct pl011_sgbuf sgbuf_b;
263 struct timer_list timer;
264 unsigned int last_residue;
265 unsigned long last_jiffies;
267 unsigned int poll_rate;
268 unsigned int poll_timeout;
271 struct pl011_dmatx_data {
272 struct dma_chan *chan;
273 struct scatterlist sg;
279 * We wrap our port structure around the generic uart_port.
281 struct uart_amba_port {
282 struct uart_port port;
283 const u16 *reg_offset;
285 const struct vendor_data *vendor;
286 unsigned int dmacr; /* dma control reg */
287 unsigned int im; /* interrupt mask */
288 unsigned int old_status;
289 unsigned int fifosize; /* vendor-specific */
290 unsigned int old_cr; /* state during shutdown */
292 unsigned int fixed_baud; /* vendor-set fixed baud rate */
294 #ifdef CONFIG_DMA_ENGINE
298 struct pl011_dmarx_data dmarx;
299 struct pl011_dmatx_data dmatx;
304 static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap,
307 return uap->reg_offset[reg];
310 static unsigned int pl011_read(const struct uart_amba_port *uap,
313 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
315 return (uap->port.iotype == UPIO_MEM32) ?
316 readl_relaxed(addr) : readw_relaxed(addr);
319 static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
322 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg);
324 if (uap->port.iotype == UPIO_MEM32)
325 writel_relaxed(val, addr);
327 writew_relaxed(val, addr);
331 * Reads up to 256 characters from the FIFO or until it's empty and
332 * inserts them into the TTY layer. Returns the number of characters
333 * read from the FIFO.
335 static int pl011_fifo_to_tty(struct uart_amba_port *uap)
338 unsigned int ch, flag, max_count = 256;
341 while (max_count--) {
342 status = pl011_read(uap, REG_FR);
343 if (status & UART01x_FR_RXFE)
346 /* Take chars from the FIFO and update status */
347 ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX;
349 uap->port.icount.rx++;
352 if (unlikely(ch & UART_DR_ERROR)) {
353 if (ch & UART011_DR_BE) {
354 ch &= ~(UART011_DR_FE | UART011_DR_PE);
355 uap->port.icount.brk++;
356 if (uart_handle_break(&uap->port))
358 } else if (ch & UART011_DR_PE)
359 uap->port.icount.parity++;
360 else if (ch & UART011_DR_FE)
361 uap->port.icount.frame++;
362 if (ch & UART011_DR_OE)
363 uap->port.icount.overrun++;
365 ch &= uap->port.read_status_mask;
367 if (ch & UART011_DR_BE)
369 else if (ch & UART011_DR_PE)
371 else if (ch & UART011_DR_FE)
375 if (uart_handle_sysrq_char(&uap->port, ch & 255))
378 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
386 * All the DMA operation mode stuff goes inside this ifdef.
387 * This assumes that you have a generic DMA device interface,
388 * no custom DMA interfaces are supported.
390 #ifdef CONFIG_DMA_ENGINE
392 #define PL011_DMA_BUFFER_SIZE PAGE_SIZE
394 static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
395 enum dma_data_direction dir)
399 sg->buf = dma_alloc_coherent(chan->device->dev,
400 PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
404 sg_init_table(&sg->sg, 1);
405 sg_set_page(&sg->sg, phys_to_page(dma_addr),
406 PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
407 sg_dma_address(&sg->sg) = dma_addr;
408 sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
413 static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
414 enum dma_data_direction dir)
417 dma_free_coherent(chan->device->dev,
418 PL011_DMA_BUFFER_SIZE, sg->buf,
419 sg_dma_address(&sg->sg));
423 static void pl011_dma_probe(struct uart_amba_port *uap)
425 /* DMA is the sole user of the platform data right now */
426 struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
427 struct device *dev = uap->port.dev;
428 struct dma_slave_config tx_conf = {
429 .dst_addr = uap->port.mapbase +
430 pl011_reg_to_offset(uap, REG_DR),
431 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
432 .direction = DMA_MEM_TO_DEV,
433 .dst_maxburst = uap->fifosize >> 1,
436 struct dma_chan *chan;
439 uap->dma_probed = true;
440 chan = dma_request_slave_channel_reason(dev, "tx");
442 if (PTR_ERR(chan) == -EPROBE_DEFER) {
443 uap->dma_probed = false;
447 /* We need platform data */
448 if (!plat || !plat->dma_filter) {
449 dev_info(uap->port.dev, "no DMA platform data\n");
453 /* Try to acquire a generic DMA engine slave TX channel */
455 dma_cap_set(DMA_SLAVE, mask);
457 chan = dma_request_channel(mask, plat->dma_filter,
460 dev_err(uap->port.dev, "no TX DMA channel!\n");
465 dmaengine_slave_config(chan, &tx_conf);
466 uap->dmatx.chan = chan;
468 dev_info(uap->port.dev, "DMA channel TX %s\n",
469 dma_chan_name(uap->dmatx.chan));
471 /* Optionally make use of an RX channel as well */
472 chan = dma_request_slave_channel(dev, "rx");
474 if (!chan && plat && plat->dma_rx_param) {
475 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
478 dev_err(uap->port.dev, "no RX DMA channel!\n");
484 struct dma_slave_config rx_conf = {
485 .src_addr = uap->port.mapbase +
486 pl011_reg_to_offset(uap, REG_DR),
487 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
488 .direction = DMA_DEV_TO_MEM,
489 .src_maxburst = uap->fifosize >> 2,
492 struct dma_slave_caps caps;
495 * Some DMA controllers provide information on their capabilities.
496 * If the controller does, check for suitable residue processing
497 * otherwise assime all is well.
499 if (0 == dma_get_slave_caps(chan, &caps)) {
500 if (caps.residue_granularity ==
501 DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
502 dma_release_channel(chan);
503 dev_info(uap->port.dev,
504 "RX DMA disabled - no residue processing\n");
508 dmaengine_slave_config(chan, &rx_conf);
509 uap->dmarx.chan = chan;
511 uap->dmarx.auto_poll_rate = false;
512 if (plat && plat->dma_rx_poll_enable) {
513 /* Set poll rate if specified. */
514 if (plat->dma_rx_poll_rate) {
515 uap->dmarx.auto_poll_rate = false;
516 uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
519 * 100 ms defaults to poll rate if not
520 * specified. This will be adjusted with
521 * the baud rate at set_termios.
523 uap->dmarx.auto_poll_rate = true;
524 uap->dmarx.poll_rate = 100;
526 /* 3 secs defaults poll_timeout if not specified. */
527 if (plat->dma_rx_poll_timeout)
528 uap->dmarx.poll_timeout =
529 plat->dma_rx_poll_timeout;
531 uap->dmarx.poll_timeout = 3000;
532 } else if (!plat && dev->of_node) {
533 uap->dmarx.auto_poll_rate = of_property_read_bool(
534 dev->of_node, "auto-poll");
535 if (uap->dmarx.auto_poll_rate) {
538 if (0 == of_property_read_u32(dev->of_node,
540 uap->dmarx.poll_rate = x;
542 uap->dmarx.poll_rate = 100;
543 if (0 == of_property_read_u32(dev->of_node,
544 "poll-timeout-ms", &x))
545 uap->dmarx.poll_timeout = x;
547 uap->dmarx.poll_timeout = 3000;
550 dev_info(uap->port.dev, "DMA channel RX %s\n",
551 dma_chan_name(uap->dmarx.chan));
555 static void pl011_dma_remove(struct uart_amba_port *uap)
558 dma_release_channel(uap->dmatx.chan);
560 dma_release_channel(uap->dmarx.chan);
563 /* Forward declare these for the refill routine */
564 static int pl011_dma_tx_refill(struct uart_amba_port *uap);
565 static void pl011_start_tx_pio(struct uart_amba_port *uap);
568 * The current DMA TX buffer has been sent.
569 * Try to queue up another DMA buffer.
571 static void pl011_dma_tx_callback(void *data)
573 struct uart_amba_port *uap = data;
574 struct pl011_dmatx_data *dmatx = &uap->dmatx;
578 spin_lock_irqsave(&uap->port.lock, flags);
579 if (uap->dmatx.queued)
580 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
584 uap->dmacr = dmacr & ~UART011_TXDMAE;
585 pl011_write(uap->dmacr, uap, REG_DMACR);
588 * If TX DMA was disabled, it means that we've stopped the DMA for
589 * some reason (eg, XOFF received, or we want to send an X-char.)
591 * Note: we need to be careful here of a potential race between DMA
592 * and the rest of the driver - if the driver disables TX DMA while
593 * a TX buffer completing, we must update the tx queued status to
594 * get further refills (hence we check dmacr).
596 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
597 uart_circ_empty(&uap->port.state->xmit)) {
598 uap->dmatx.queued = false;
599 spin_unlock_irqrestore(&uap->port.lock, flags);
603 if (pl011_dma_tx_refill(uap) <= 0)
605 * We didn't queue a DMA buffer for some reason, but we
606 * have data pending to be sent. Re-enable the TX IRQ.
608 pl011_start_tx_pio(uap);
610 spin_unlock_irqrestore(&uap->port.lock, flags);
614 * Try to refill the TX DMA buffer.
615 * Locking: called with port lock held and IRQs disabled.
617 * 1 if we queued up a TX DMA buffer.
618 * 0 if we didn't want to handle this by DMA
621 static int pl011_dma_tx_refill(struct uart_amba_port *uap)
623 struct pl011_dmatx_data *dmatx = &uap->dmatx;
624 struct dma_chan *chan = dmatx->chan;
625 struct dma_device *dma_dev = chan->device;
626 struct dma_async_tx_descriptor *desc;
627 struct circ_buf *xmit = &uap->port.state->xmit;
631 * Try to avoid the overhead involved in using DMA if the
632 * transaction fits in the first half of the FIFO, by using
633 * the standard interrupt handling. This ensures that we
634 * issue a uart_write_wakeup() at the appropriate time.
636 count = uart_circ_chars_pending(xmit);
637 if (count < (uap->fifosize >> 1)) {
638 uap->dmatx.queued = false;
643 * Bodge: don't send the last character by DMA, as this
644 * will prevent XON from notifying us to restart DMA.
648 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
649 if (count > PL011_DMA_BUFFER_SIZE)
650 count = PL011_DMA_BUFFER_SIZE;
652 if (xmit->tail < xmit->head)
653 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
655 size_t first = UART_XMIT_SIZE - xmit->tail;
660 second = count - first;
662 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
664 memcpy(&dmatx->buf[first], &xmit->buf[0], second);
667 dmatx->sg.length = count;
669 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
670 uap->dmatx.queued = false;
671 dev_dbg(uap->port.dev, "unable to map TX DMA\n");
675 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
676 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
678 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
679 uap->dmatx.queued = false;
681 * If DMA cannot be used right now, we complete this
682 * transaction via IRQ and let the TTY layer retry.
684 dev_dbg(uap->port.dev, "TX DMA busy\n");
688 /* Some data to go along to the callback */
689 desc->callback = pl011_dma_tx_callback;
690 desc->callback_param = uap;
692 /* All errors should happen at prepare time */
693 dmaengine_submit(desc);
695 /* Fire the DMA transaction */
696 dma_dev->device_issue_pending(chan);
698 uap->dmacr |= UART011_TXDMAE;
699 pl011_write(uap->dmacr, uap, REG_DMACR);
700 uap->dmatx.queued = true;
703 * Now we know that DMA will fire, so advance the ring buffer
704 * with the stuff we just dispatched.
706 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
707 uap->port.icount.tx += count;
709 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
710 uart_write_wakeup(&uap->port);
716 * We received a transmit interrupt without a pending X-char but with
717 * pending characters.
718 * Locking: called with port lock held and IRQs disabled.
720 * false if we want to use PIO to transmit
721 * true if we queued a DMA buffer
723 static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
725 if (!uap->using_tx_dma)
729 * If we already have a TX buffer queued, but received a
730 * TX interrupt, it will be because we've just sent an X-char.
731 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
733 if (uap->dmatx.queued) {
734 uap->dmacr |= UART011_TXDMAE;
735 pl011_write(uap->dmacr, uap, REG_DMACR);
736 uap->im &= ~UART011_TXIM;
737 pl011_write(uap->im, uap, REG_IMSC);
742 * We don't have a TX buffer queued, so try to queue one.
743 * If we successfully queued a buffer, mask the TX IRQ.
745 if (pl011_dma_tx_refill(uap) > 0) {
746 uap->im &= ~UART011_TXIM;
747 pl011_write(uap->im, uap, REG_IMSC);
754 * Stop the DMA transmit (eg, due to received XOFF).
755 * Locking: called with port lock held and IRQs disabled.
757 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
759 if (uap->dmatx.queued) {
760 uap->dmacr &= ~UART011_TXDMAE;
761 pl011_write(uap->dmacr, uap, REG_DMACR);
766 * Try to start a DMA transmit, or in the case of an XON/OFF
767 * character queued for send, try to get that character out ASAP.
768 * Locking: called with port lock held and IRQs disabled.
770 * false if we want the TX IRQ to be enabled
771 * true if we have a buffer queued
773 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
777 if (!uap->using_tx_dma)
780 if (!uap->port.x_char) {
781 /* no X-char, try to push chars out in DMA mode */
784 if (!uap->dmatx.queued) {
785 if (pl011_dma_tx_refill(uap) > 0) {
786 uap->im &= ~UART011_TXIM;
787 pl011_write(uap->im, uap, REG_IMSC);
790 } else if (!(uap->dmacr & UART011_TXDMAE)) {
791 uap->dmacr |= UART011_TXDMAE;
792 pl011_write(uap->dmacr, uap, REG_DMACR);
798 * We have an X-char to send. Disable DMA to prevent it loading
799 * the TX fifo, and then see if we can stuff it into the FIFO.
802 uap->dmacr &= ~UART011_TXDMAE;
803 pl011_write(uap->dmacr, uap, REG_DMACR);
805 if (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) {
807 * No space in the FIFO, so enable the transmit interrupt
808 * so we know when there is space. Note that once we've
809 * loaded the character, we should just re-enable DMA.
814 pl011_write(uap->port.x_char, uap, REG_DR);
815 uap->port.icount.tx++;
816 uap->port.x_char = 0;
818 /* Success - restore the DMA state */
820 pl011_write(dmacr, uap, REG_DMACR);
826 * Flush the transmit buffer.
827 * Locking: called with port lock held and IRQs disabled.
829 static void pl011_dma_flush_buffer(struct uart_port *port)
830 __releases(&uap->port.lock)
831 __acquires(&uap->port.lock)
833 struct uart_amba_port *uap =
834 container_of(port, struct uart_amba_port, port);
836 if (!uap->using_tx_dma)
839 /* Avoid deadlock with the DMA engine callback */
840 spin_unlock(&uap->port.lock);
841 dmaengine_terminate_all(uap->dmatx.chan);
842 spin_lock(&uap->port.lock);
843 if (uap->dmatx.queued) {
844 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
846 uap->dmatx.queued = false;
847 uap->dmacr &= ~UART011_TXDMAE;
848 pl011_write(uap->dmacr, uap, REG_DMACR);
852 static void pl011_dma_rx_callback(void *data);
854 static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
856 struct dma_chan *rxchan = uap->dmarx.chan;
857 struct pl011_dmarx_data *dmarx = &uap->dmarx;
858 struct dma_async_tx_descriptor *desc;
859 struct pl011_sgbuf *sgbuf;
864 /* Start the RX DMA job */
865 sgbuf = uap->dmarx.use_buf_b ?
866 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
867 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
869 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
871 * If the DMA engine is busy and cannot prepare a
872 * channel, no big deal, the driver will fall back
873 * to interrupt mode as a result of this error code.
876 uap->dmarx.running = false;
877 dmaengine_terminate_all(rxchan);
881 /* Some data to go along to the callback */
882 desc->callback = pl011_dma_rx_callback;
883 desc->callback_param = uap;
884 dmarx->cookie = dmaengine_submit(desc);
885 dma_async_issue_pending(rxchan);
887 uap->dmacr |= UART011_RXDMAE;
888 pl011_write(uap->dmacr, uap, REG_DMACR);
889 uap->dmarx.running = true;
891 uap->im &= ~UART011_RXIM;
892 pl011_write(uap->im, uap, REG_IMSC);
898 * This is called when either the DMA job is complete, or
899 * the FIFO timeout interrupt occurred. This must be called
900 * with the port spinlock uap->port.lock held.
902 static void pl011_dma_rx_chars(struct uart_amba_port *uap,
903 u32 pending, bool use_buf_b,
906 struct tty_port *port = &uap->port.state->port;
907 struct pl011_sgbuf *sgbuf = use_buf_b ?
908 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
910 u32 fifotaken = 0; /* only used for vdbg() */
912 struct pl011_dmarx_data *dmarx = &uap->dmarx;
915 if (uap->dmarx.poll_rate) {
916 /* The data can be taken by polling */
917 dmataken = sgbuf->sg.length - dmarx->last_residue;
918 /* Recalculate the pending size */
919 if (pending >= dmataken)
923 /* Pick the remain data from the DMA */
927 * First take all chars in the DMA pipe, then look in the FIFO.
928 * Note that tty_insert_flip_buf() tries to take as many chars
931 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
934 uap->port.icount.rx += dma_count;
935 if (dma_count < pending)
936 dev_warn(uap->port.dev,
937 "couldn't insert all characters (TTY is full?)\n");
940 /* Reset the last_residue for Rx DMA poll */
941 if (uap->dmarx.poll_rate)
942 dmarx->last_residue = sgbuf->sg.length;
945 * Only continue with trying to read the FIFO if all DMA chars have
948 if (dma_count == pending && readfifo) {
949 /* Clear any error flags */
950 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
951 UART011_FEIS, uap, REG_ICR);
954 * If we read all the DMA'd characters, and we had an
955 * incomplete buffer, that could be due to an rx error, or
956 * maybe we just timed out. Read any pending chars and check
959 * Error conditions will only occur in the FIFO, these will
960 * trigger an immediate interrupt and stop the DMA job, so we
961 * will always find the error in the FIFO, never in the DMA
964 fifotaken = pl011_fifo_to_tty(uap);
967 spin_unlock(&uap->port.lock);
968 dev_vdbg(uap->port.dev,
969 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
970 dma_count, fifotaken);
971 tty_flip_buffer_push(port);
972 spin_lock(&uap->port.lock);
975 static void pl011_dma_rx_irq(struct uart_amba_port *uap)
977 struct pl011_dmarx_data *dmarx = &uap->dmarx;
978 struct dma_chan *rxchan = dmarx->chan;
979 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
980 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
982 struct dma_tx_state state;
983 enum dma_status dmastat;
986 * Pause the transfer so we can trust the current counter,
987 * do this before we pause the PL011 block, else we may
990 if (dmaengine_pause(rxchan))
991 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
992 dmastat = rxchan->device->device_tx_status(rxchan,
993 dmarx->cookie, &state);
994 if (dmastat != DMA_PAUSED)
995 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
997 /* Disable RX DMA - incoming data will wait in the FIFO */
998 uap->dmacr &= ~UART011_RXDMAE;
999 pl011_write(uap->dmacr, uap, REG_DMACR);
1000 uap->dmarx.running = false;
1002 pending = sgbuf->sg.length - state.residue;
1003 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
1004 /* Then we terminate the transfer - we now know our residue */
1005 dmaengine_terminate_all(rxchan);
1008 * This will take the chars we have so far and insert
1009 * into the framework.
1011 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
1013 /* Switch buffer & re-trigger DMA job */
1014 dmarx->use_buf_b = !dmarx->use_buf_b;
1015 if (pl011_dma_rx_trigger_dma(uap)) {
1016 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
1017 "fall back to interrupt mode\n");
1018 uap->im |= UART011_RXIM;
1019 pl011_write(uap->im, uap, REG_IMSC);
1023 static void pl011_dma_rx_callback(void *data)
1025 struct uart_amba_port *uap = data;
1026 struct pl011_dmarx_data *dmarx = &uap->dmarx;
1027 struct dma_chan *rxchan = dmarx->chan;
1028 bool lastbuf = dmarx->use_buf_b;
1029 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
1030 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
1032 struct dma_tx_state state;
1036 * This completion interrupt occurs typically when the
1037 * RX buffer is totally stuffed but no timeout has yet
1038 * occurred. When that happens, we just want the RX
1039 * routine to flush out the secondary DMA buffer while
1040 * we immediately trigger the next DMA job.
1042 spin_lock_irq(&uap->port.lock);
1044 * Rx data can be taken by the UART interrupts during
1045 * the DMA irq handler. So we check the residue here.
1047 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1048 pending = sgbuf->sg.length - state.residue;
1049 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
1050 /* Then we terminate the transfer - we now know our residue */
1051 dmaengine_terminate_all(rxchan);
1053 uap->dmarx.running = false;
1054 dmarx->use_buf_b = !lastbuf;
1055 ret = pl011_dma_rx_trigger_dma(uap);
1057 pl011_dma_rx_chars(uap, pending, lastbuf, false);
1058 spin_unlock_irq(&uap->port.lock);
1060 * Do this check after we picked the DMA chars so we don't
1061 * get some IRQ immediately from RX.
1064 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
1065 "fall back to interrupt mode\n");
1066 uap->im |= UART011_RXIM;
1067 pl011_write(uap->im, uap, REG_IMSC);
1072 * Stop accepting received characters, when we're shutting down or
1073 * suspending this port.
1074 * Locking: called with port lock held and IRQs disabled.
1076 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1078 /* FIXME. Just disable the DMA enable */
1079 uap->dmacr &= ~UART011_RXDMAE;
1080 pl011_write(uap->dmacr, uap, REG_DMACR);
1084 * Timer handler for Rx DMA polling.
1085 * Every polling, It checks the residue in the dma buffer and transfer
1086 * data to the tty. Also, last_residue is updated for the next polling.
1088 static void pl011_dma_rx_poll(unsigned long args)
1090 struct uart_amba_port *uap = (struct uart_amba_port *)args;
1091 struct tty_port *port = &uap->port.state->port;
1092 struct pl011_dmarx_data *dmarx = &uap->dmarx;
1093 struct dma_chan *rxchan = uap->dmarx.chan;
1094 unsigned long flags = 0;
1095 unsigned int dmataken = 0;
1096 unsigned int size = 0;
1097 struct pl011_sgbuf *sgbuf;
1099 struct dma_tx_state state;
1101 sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
1102 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1103 if (likely(state.residue < dmarx->last_residue)) {
1104 dmataken = sgbuf->sg.length - dmarx->last_residue;
1105 size = dmarx->last_residue - state.residue;
1106 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
1108 if (dma_count == size)
1109 dmarx->last_residue = state.residue;
1110 dmarx->last_jiffies = jiffies;
1112 tty_flip_buffer_push(port);
1115 * If no data is received in poll_timeout, the driver will fall back
1116 * to interrupt mode. We will retrigger DMA at the first interrupt.
1118 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
1119 > uap->dmarx.poll_timeout) {
1121 spin_lock_irqsave(&uap->port.lock, flags);
1122 pl011_dma_rx_stop(uap);
1123 uap->im |= UART011_RXIM;
1124 pl011_write(uap->im, uap, REG_IMSC);
1125 spin_unlock_irqrestore(&uap->port.lock, flags);
1127 uap->dmarx.running = false;
1128 dmaengine_terminate_all(rxchan);
1129 del_timer(&uap->dmarx.timer);
1131 mod_timer(&uap->dmarx.timer,
1132 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
1136 static void pl011_dma_startup(struct uart_amba_port *uap)
1140 if (!uap->dma_probed)
1141 pl011_dma_probe(uap);
1143 if (!uap->dmatx.chan)
1146 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
1147 if (!uap->dmatx.buf) {
1148 dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
1149 uap->port.fifosize = uap->fifosize;
1153 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
1155 /* The DMA buffer is now the FIFO the TTY subsystem can use */
1156 uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
1157 uap->using_tx_dma = true;
1159 if (!uap->dmarx.chan)
1162 /* Allocate and map DMA RX buffers */
1163 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1166 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1167 "RX buffer A", ret);
1171 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
1174 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1175 "RX buffer B", ret);
1176 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1181 uap->using_rx_dma = true;
1184 /* Turn on DMA error (RX/TX will be enabled on demand) */
1185 uap->dmacr |= UART011_DMAONERR;
1186 pl011_write(uap->dmacr, uap, REG_DMACR);
1189 * ST Micro variants has some specific dma burst threshold
1190 * compensation. Set this to 16 bytes, so burst will only
1191 * be issued above/below 16 bytes.
1193 if (uap->vendor->dma_threshold)
1194 pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
1197 if (uap->using_rx_dma) {
1198 if (pl011_dma_rx_trigger_dma(uap))
1199 dev_dbg(uap->port.dev, "could not trigger initial "
1200 "RX DMA job, fall back to interrupt mode\n");
1201 if (uap->dmarx.poll_rate) {
1202 init_timer(&(uap->dmarx.timer));
1203 uap->dmarx.timer.function = pl011_dma_rx_poll;
1204 uap->dmarx.timer.data = (unsigned long)uap;
1205 mod_timer(&uap->dmarx.timer,
1207 msecs_to_jiffies(uap->dmarx.poll_rate));
1208 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1209 uap->dmarx.last_jiffies = jiffies;
1214 static void pl011_dma_shutdown(struct uart_amba_port *uap)
1216 if (!(uap->using_tx_dma || uap->using_rx_dma))
1219 /* Disable RX and TX DMA */
1220 while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
1223 spin_lock_irq(&uap->port.lock);
1224 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
1225 pl011_write(uap->dmacr, uap, REG_DMACR);
1226 spin_unlock_irq(&uap->port.lock);
1228 if (uap->using_tx_dma) {
1229 /* In theory, this should already be done by pl011_dma_flush_buffer */
1230 dmaengine_terminate_all(uap->dmatx.chan);
1231 if (uap->dmatx.queued) {
1232 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
1234 uap->dmatx.queued = false;
1237 kfree(uap->dmatx.buf);
1238 uap->using_tx_dma = false;
1241 if (uap->using_rx_dma) {
1242 dmaengine_terminate_all(uap->dmarx.chan);
1243 /* Clean up the RX DMA */
1244 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
1245 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
1246 if (uap->dmarx.poll_rate)
1247 del_timer_sync(&uap->dmarx.timer);
1248 uap->using_rx_dma = false;
1252 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1254 return uap->using_rx_dma;
1257 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1259 return uap->using_rx_dma && uap->dmarx.running;
1263 /* Blank functions if the DMA engine is not available */
1264 static inline void pl011_dma_probe(struct uart_amba_port *uap)
1268 static inline void pl011_dma_remove(struct uart_amba_port *uap)
1272 static inline void pl011_dma_startup(struct uart_amba_port *uap)
1276 static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
1280 static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1285 static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1289 static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1294 static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1298 static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1302 static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1307 static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1312 static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1317 #define pl011_dma_flush_buffer NULL
1320 static void pl011_stop_tx(struct uart_port *port)
1322 struct uart_amba_port *uap =
1323 container_of(port, struct uart_amba_port, port);
1325 uap->im &= ~UART011_TXIM;
1326 pl011_write(uap->im, uap, REG_IMSC);
1327 pl011_dma_tx_stop(uap);
1330 static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
1332 /* Start TX with programmed I/O only (no DMA) */
1333 static void pl011_start_tx_pio(struct uart_amba_port *uap)
1335 if (pl011_tx_chars(uap, false)) {
1336 uap->im |= UART011_TXIM;
1337 pl011_write(uap->im, uap, REG_IMSC);
1341 static void pl011_start_tx(struct uart_port *port)
1343 struct uart_amba_port *uap =
1344 container_of(port, struct uart_amba_port, port);
1346 if (!pl011_dma_tx_start(uap))
1347 pl011_start_tx_pio(uap);
1350 static void pl011_stop_rx(struct uart_port *port)
1352 struct uart_amba_port *uap =
1353 container_of(port, struct uart_amba_port, port);
1355 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1356 UART011_PEIM|UART011_BEIM|UART011_OEIM);
1357 pl011_write(uap->im, uap, REG_IMSC);
1359 pl011_dma_rx_stop(uap);
1362 static void pl011_enable_ms(struct uart_port *port)
1364 struct uart_amba_port *uap =
1365 container_of(port, struct uart_amba_port, port);
1367 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1368 pl011_write(uap->im, uap, REG_IMSC);
1371 static void pl011_rx_chars(struct uart_amba_port *uap)
1372 __releases(&uap->port.lock)
1373 __acquires(&uap->port.lock)
1375 pl011_fifo_to_tty(uap);
1377 spin_unlock(&uap->port.lock);
1378 tty_flip_buffer_push(&uap->port.state->port);
1380 * If we were temporarily out of DMA mode for a while,
1381 * attempt to switch back to DMA mode again.
1383 if (pl011_dma_rx_available(uap)) {
1384 if (pl011_dma_rx_trigger_dma(uap)) {
1385 dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1386 "fall back to interrupt mode again\n");
1387 uap->im |= UART011_RXIM;
1388 pl011_write(uap->im, uap, REG_IMSC);
1390 #ifdef CONFIG_DMA_ENGINE
1391 /* Start Rx DMA poll */
1392 if (uap->dmarx.poll_rate) {
1393 uap->dmarx.last_jiffies = jiffies;
1394 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1395 mod_timer(&uap->dmarx.timer,
1397 msecs_to_jiffies(uap->dmarx.poll_rate));
1402 spin_lock(&uap->port.lock);
1405 static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
1408 if (unlikely(!from_irq) &&
1409 pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1410 return false; /* unable to transmit character */
1412 pl011_write(c, uap, REG_DR);
1413 uap->port.icount.tx++;
1418 /* Returns true if tx interrupts have to be (kept) enabled */
1419 static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
1421 struct circ_buf *xmit = &uap->port.state->xmit;
1422 int count = uap->fifosize >> 1;
1424 if (uap->port.x_char) {
1425 if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
1427 uap->port.x_char = 0;
1430 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
1431 pl011_stop_tx(&uap->port);
1435 /* If we are using DMA mode, try to send some characters. */
1436 if (pl011_dma_tx_irq(uap))
1440 if (likely(from_irq) && count-- == 0)
1443 if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq))
1446 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1447 } while (!uart_circ_empty(xmit));
1449 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1450 uart_write_wakeup(&uap->port);
1452 if (uart_circ_empty(xmit)) {
1453 pl011_stop_tx(&uap->port);
1459 static void pl011_modem_status(struct uart_amba_port *uap)
1461 unsigned int status, delta;
1463 status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1465 delta = status ^ uap->old_status;
1466 uap->old_status = status;
1471 if (delta & UART01x_FR_DCD)
1472 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1474 if (delta & uap->vendor->fr_dsr)
1475 uap->port.icount.dsr++;
1477 if (delta & uap->vendor->fr_cts)
1478 uart_handle_cts_change(&uap->port,
1479 status & uap->vendor->fr_cts);
1481 wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
1484 static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
1486 unsigned int dummy_read;
1488 if (!uap->vendor->cts_event_workaround)
1491 /* workaround to make sure that all bits are unlocked.. */
1492 pl011_write(0x00, uap, REG_ICR);
1495 * WA: introduce 26ns(1 uart clk) delay before W1C;
1496 * single apb access will incur 2 pclk(133.12Mhz) delay,
1497 * so add 2 dummy reads
1499 dummy_read = pl011_read(uap, REG_ICR);
1500 dummy_read = pl011_read(uap, REG_ICR);
1503 static irqreturn_t pl011_int(int irq, void *dev_id)
1505 struct uart_amba_port *uap = dev_id;
1506 unsigned long flags;
1507 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1511 spin_lock_irqsave(&uap->port.lock, flags);
1512 imsc = pl011_read(uap, REG_IMSC);
1513 status = pl011_read(uap, REG_RIS) & imsc;
1516 check_apply_cts_event_workaround(uap);
1518 pl011_write(status & ~(UART011_TXIS|UART011_RTIS|
1522 if (status & (UART011_RTIS|UART011_RXIS)) {
1523 if (pl011_dma_rx_running(uap))
1524 pl011_dma_rx_irq(uap);
1526 pl011_rx_chars(uap);
1528 if (status & (UART011_DSRMIS|UART011_DCDMIS|
1529 UART011_CTSMIS|UART011_RIMIS))
1530 pl011_modem_status(uap);
1531 if (status & UART011_TXIS)
1532 pl011_tx_chars(uap, true);
1534 if (pass_counter-- == 0)
1537 status = pl011_read(uap, REG_RIS) & imsc;
1538 } while (status != 0);
1542 spin_unlock_irqrestore(&uap->port.lock, flags);
1544 return IRQ_RETVAL(handled);
1547 static unsigned int pl011_tx_empty(struct uart_port *port)
1549 struct uart_amba_port *uap =
1550 container_of(port, struct uart_amba_port, port);
1552 /* Allow feature register bits to be inverted to work around errata */
1553 unsigned int status = pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr;
1555 return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ?
1559 static unsigned int pl011_get_mctrl(struct uart_port *port)
1561 struct uart_amba_port *uap =
1562 container_of(port, struct uart_amba_port, port);
1563 unsigned int result = 0;
1564 unsigned int status = pl011_read(uap, REG_FR);
1566 #define TIOCMBIT(uartbit, tiocmbit) \
1567 if (status & uartbit) \
1570 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1571 TIOCMBIT(uap->vendor->fr_dsr, TIOCM_DSR);
1572 TIOCMBIT(uap->vendor->fr_cts, TIOCM_CTS);
1573 TIOCMBIT(uap->vendor->fr_ri, TIOCM_RNG);
1578 static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1580 struct uart_amba_port *uap =
1581 container_of(port, struct uart_amba_port, port);
1584 cr = pl011_read(uap, REG_CR);
1586 #define TIOCMBIT(tiocmbit, uartbit) \
1587 if (mctrl & tiocmbit) \
1592 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1593 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1594 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1595 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1596 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
1599 /* We need to disable auto-RTS if we want to turn RTS off */
1600 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1604 pl011_write(cr, uap, REG_CR);
1607 static void pl011_break_ctl(struct uart_port *port, int break_state)
1609 struct uart_amba_port *uap =
1610 container_of(port, struct uart_amba_port, port);
1611 unsigned long flags;
1614 spin_lock_irqsave(&uap->port.lock, flags);
1615 lcr_h = pl011_read(uap, REG_LCRH_TX);
1616 if (break_state == -1)
1617 lcr_h |= UART01x_LCRH_BRK;
1619 lcr_h &= ~UART01x_LCRH_BRK;
1620 pl011_write(lcr_h, uap, REG_LCRH_TX);
1621 spin_unlock_irqrestore(&uap->port.lock, flags);
1624 #ifdef CONFIG_CONSOLE_POLL
1626 static void pl011_quiesce_irqs(struct uart_port *port)
1628 struct uart_amba_port *uap =
1629 container_of(port, struct uart_amba_port, port);
1631 pl011_write(pl011_read(uap, REG_MIS), uap, REG_ICR);
1633 * There is no way to clear TXIM as this is "ready to transmit IRQ", so
1634 * we simply mask it. start_tx() will unmask it.
1636 * Note we can race with start_tx(), and if the race happens, the
1637 * polling user might get another interrupt just after we clear it.
1638 * But it should be OK and can happen even w/o the race, e.g.
1639 * controller immediately got some new data and raised the IRQ.
1641 * And whoever uses polling routines assumes that it manages the device
1642 * (including tx queue), so we're also fine with start_tx()'s caller
1645 pl011_write(pl011_read(uap, REG_IMSC) & ~UART011_TXIM, uap,
1649 static int pl011_get_poll_char(struct uart_port *port)
1651 struct uart_amba_port *uap =
1652 container_of(port, struct uart_amba_port, port);
1653 unsigned int status;
1656 * The caller might need IRQs lowered, e.g. if used with KDB NMI
1659 pl011_quiesce_irqs(port);
1661 status = pl011_read(uap, REG_FR);
1662 if (status & UART01x_FR_RXFE)
1663 return NO_POLL_CHAR;
1665 return pl011_read(uap, REG_DR);
1668 static void pl011_put_poll_char(struct uart_port *port,
1671 struct uart_amba_port *uap =
1672 container_of(port, struct uart_amba_port, port);
1674 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
1677 pl011_write(ch, uap, REG_DR);
1680 #endif /* CONFIG_CONSOLE_POLL */
1682 static int pl011_hwinit(struct uart_port *port)
1684 struct uart_amba_port *uap =
1685 container_of(port, struct uart_amba_port, port);
1688 /* Optionaly enable pins to be muxed in and configured */
1689 pinctrl_pm_select_default_state(port->dev);
1692 * Try to enable the clock producer.
1694 retval = clk_prepare_enable(uap->clk);
1698 uap->port.uartclk = clk_get_rate(uap->clk);
1700 /* Clear pending error and receive interrupts */
1701 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
1702 UART011_FEIS | UART011_RTIS | UART011_RXIS,
1706 * Save interrupts enable mask, and enable RX interrupts in case if
1707 * the interrupt is used for NMI entry.
1709 uap->im = pl011_read(uap, REG_IMSC);
1710 pl011_write(UART011_RTIM | UART011_RXIM, uap, REG_IMSC);
1712 if (dev_get_platdata(uap->port.dev)) {
1713 struct amba_pl011_data *plat;
1715 plat = dev_get_platdata(uap->port.dev);
1722 static bool pl011_split_lcrh(const struct uart_amba_port *uap)
1724 return pl011_reg_to_offset(uap, REG_LCRH_RX) !=
1725 pl011_reg_to_offset(uap, REG_LCRH_TX);
1728 static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
1730 pl011_write(lcr_h, uap, REG_LCRH_RX);
1731 if (pl011_split_lcrh(uap)) {
1734 * Wait 10 PCLKs before writing LCRH_TX register,
1735 * to get this delay write read only register 10 times
1737 for (i = 0; i < 10; ++i)
1738 pl011_write(0xff, uap, REG_MIS);
1739 pl011_write(lcr_h, uap, REG_LCRH_TX);
1743 static int pl011_allocate_irq(struct uart_amba_port *uap)
1745 pl011_write(uap->im, uap, REG_IMSC);
1747 return request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
1751 * Enable interrupts, only timeouts when using DMA
1752 * if initial RX DMA job failed, start in interrupt mode
1755 static void pl011_enable_interrupts(struct uart_amba_port *uap)
1757 spin_lock_irq(&uap->port.lock);
1759 /* Clear out any spuriously appearing RX interrupts */
1760 pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
1761 uap->im = UART011_RTIM;
1762 if (!pl011_dma_rx_running(uap))
1763 uap->im |= UART011_RXIM;
1764 pl011_write(uap->im, uap, REG_IMSC);
1765 spin_unlock_irq(&uap->port.lock);
1768 static int pl011_startup(struct uart_port *port)
1770 struct uart_amba_port *uap =
1771 container_of(port, struct uart_amba_port, port);
1775 retval = pl011_hwinit(port);
1779 retval = pl011_allocate_irq(uap);
1783 pl011_write(uap->vendor->ifls, uap, REG_IFLS);
1785 spin_lock_irq(&uap->port.lock);
1787 /* restore RTS and DTR */
1788 cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
1789 cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
1790 pl011_write(cr, uap, REG_CR);
1792 spin_unlock_irq(&uap->port.lock);
1795 * initialise the old status of the modem signals
1797 uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
1800 pl011_dma_startup(uap);
1802 pl011_enable_interrupts(uap);
1807 clk_disable_unprepare(uap->clk);
1811 static int sbsa_uart_startup(struct uart_port *port)
1813 struct uart_amba_port *uap =
1814 container_of(port, struct uart_amba_port, port);
1817 retval = pl011_hwinit(port);
1821 retval = pl011_allocate_irq(uap);
1825 /* The SBSA UART does not support any modem status lines. */
1826 uap->old_status = 0;
1828 pl011_enable_interrupts(uap);
1833 static void pl011_shutdown_channel(struct uart_amba_port *uap,
1838 val = pl011_read(uap, lcrh);
1839 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1840 pl011_write(val, uap, lcrh);
1844 * disable the port. It should not disable RTS and DTR.
1845 * Also RTS and DTR state should be preserved to restore
1846 * it during startup().
1848 static void pl011_disable_uart(struct uart_amba_port *uap)
1852 uap->autorts = false;
1853 spin_lock_irq(&uap->port.lock);
1854 cr = pl011_read(uap, REG_CR);
1856 cr &= UART011_CR_RTS | UART011_CR_DTR;
1857 cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1858 pl011_write(cr, uap, REG_CR);
1859 spin_unlock_irq(&uap->port.lock);
1862 * disable break condition and fifos
1864 pl011_shutdown_channel(uap, REG_LCRH_RX);
1865 if (pl011_split_lcrh(uap))
1866 pl011_shutdown_channel(uap, REG_LCRH_TX);
1869 static void pl011_disable_interrupts(struct uart_amba_port *uap)
1871 spin_lock_irq(&uap->port.lock);
1873 /* mask all interrupts and clear all pending ones */
1875 pl011_write(uap->im, uap, REG_IMSC);
1876 pl011_write(0xffff, uap, REG_ICR);
1878 spin_unlock_irq(&uap->port.lock);
1881 static void pl011_shutdown(struct uart_port *port)
1883 struct uart_amba_port *uap =
1884 container_of(port, struct uart_amba_port, port);
1886 pl011_disable_interrupts(uap);
1888 pl011_dma_shutdown(uap);
1890 free_irq(uap->port.irq, uap);
1892 pl011_disable_uart(uap);
1895 * Shut down the clock producer
1897 clk_disable_unprepare(uap->clk);
1898 /* Optionally let pins go into sleep states */
1899 pinctrl_pm_select_sleep_state(port->dev);
1901 if (dev_get_platdata(uap->port.dev)) {
1902 struct amba_pl011_data *plat;
1904 plat = dev_get_platdata(uap->port.dev);
1909 if (uap->port.ops->flush_buffer)
1910 uap->port.ops->flush_buffer(port);
1913 static void sbsa_uart_shutdown(struct uart_port *port)
1915 struct uart_amba_port *uap =
1916 container_of(port, struct uart_amba_port, port);
1918 pl011_disable_interrupts(uap);
1920 free_irq(uap->port.irq, uap);
1922 if (uap->port.ops->flush_buffer)
1923 uap->port.ops->flush_buffer(port);
1927 pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios)
1929 port->read_status_mask = UART011_DR_OE | 255;
1930 if (termios->c_iflag & INPCK)
1931 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
1932 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1933 port->read_status_mask |= UART011_DR_BE;
1936 * Characters to ignore
1938 port->ignore_status_mask = 0;
1939 if (termios->c_iflag & IGNPAR)
1940 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
1941 if (termios->c_iflag & IGNBRK) {
1942 port->ignore_status_mask |= UART011_DR_BE;
1944 * If we're ignoring parity and break indicators,
1945 * ignore overruns too (for real raw support).
1947 if (termios->c_iflag & IGNPAR)
1948 port->ignore_status_mask |= UART011_DR_OE;
1952 * Ignore all characters if CREAD is not set.
1954 if ((termios->c_cflag & CREAD) == 0)
1955 port->ignore_status_mask |= UART_DUMMY_DR_RX;
1959 pl011_set_termios(struct uart_port *port, struct ktermios *termios,
1960 struct ktermios *old)
1962 struct uart_amba_port *uap =
1963 container_of(port, struct uart_amba_port, port);
1964 unsigned int lcr_h, old_cr;
1965 unsigned long flags;
1966 unsigned int baud, quot, clkdiv;
1968 if (uap->vendor->oversampling)
1974 * Ask the core to calculate the divisor for us.
1976 baud = uart_get_baud_rate(port, termios, old, 0,
1977 port->uartclk / clkdiv);
1978 #ifdef CONFIG_DMA_ENGINE
1980 * Adjust RX DMA polling rate with baud rate if not specified.
1982 if (uap->dmarx.auto_poll_rate)
1983 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
1986 if (baud > port->uartclk/16)
1987 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
1989 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
1991 switch (termios->c_cflag & CSIZE) {
1993 lcr_h = UART01x_LCRH_WLEN_5;
1996 lcr_h = UART01x_LCRH_WLEN_6;
1999 lcr_h = UART01x_LCRH_WLEN_7;
2002 lcr_h = UART01x_LCRH_WLEN_8;
2005 if (termios->c_cflag & CSTOPB)
2006 lcr_h |= UART01x_LCRH_STP2;
2007 if (termios->c_cflag & PARENB) {
2008 lcr_h |= UART01x_LCRH_PEN;
2009 if (!(termios->c_cflag & PARODD))
2010 lcr_h |= UART01x_LCRH_EPS;
2011 if (termios->c_cflag & CMSPAR)
2012 lcr_h |= UART011_LCRH_SPS;
2014 if (uap->fifosize > 1)
2015 lcr_h |= UART01x_LCRH_FEN;
2017 spin_lock_irqsave(&port->lock, flags);
2020 * Update the per-port timeout.
2022 uart_update_timeout(port, termios->c_cflag, baud);
2024 pl011_setup_status_masks(port, termios);
2026 if (UART_ENABLE_MS(port, termios->c_cflag))
2027 pl011_enable_ms(port);
2029 /* first, disable everything */
2030 old_cr = pl011_read(uap, REG_CR);
2031 pl011_write(0, uap, REG_CR);
2033 if (termios->c_cflag & CRTSCTS) {
2034 if (old_cr & UART011_CR_RTS)
2035 old_cr |= UART011_CR_RTSEN;
2037 old_cr |= UART011_CR_CTSEN;
2038 uap->autorts = true;
2040 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
2041 uap->autorts = false;
2044 if (uap->vendor->oversampling) {
2045 if (baud > port->uartclk / 16)
2046 old_cr |= ST_UART011_CR_OVSFACT;
2048 old_cr &= ~ST_UART011_CR_OVSFACT;
2052 * Workaround for the ST Micro oversampling variants to
2053 * increase the bitrate slightly, by lowering the divisor,
2054 * to avoid delayed sampling of start bit at high speeds,
2055 * else we see data corruption.
2057 if (uap->vendor->oversampling) {
2058 if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
2060 else if ((baud > 3250000) && (quot > 2))
2064 pl011_write(quot & 0x3f, uap, REG_FBRD);
2065 pl011_write(quot >> 6, uap, REG_IBRD);
2068 * ----------v----------v----------v----------v-----
2069 * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER
2070 * REG_FBRD & REG_IBRD.
2071 * ----------^----------^----------^----------^-----
2073 pl011_write_lcr_h(uap, lcr_h);
2074 pl011_write(old_cr, uap, REG_CR);
2076 spin_unlock_irqrestore(&port->lock, flags);
2080 sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
2081 struct ktermios *old)
2083 struct uart_amba_port *uap =
2084 container_of(port, struct uart_amba_port, port);
2085 unsigned long flags;
2087 tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud);
2089 /* The SBSA UART only supports 8n1 without hardware flow control. */
2090 termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD);
2091 termios->c_cflag &= ~(CMSPAR | CRTSCTS);
2092 termios->c_cflag |= CS8 | CLOCAL;
2094 spin_lock_irqsave(&port->lock, flags);
2095 uart_update_timeout(port, CS8, uap->fixed_baud);
2096 pl011_setup_status_masks(port, termios);
2097 spin_unlock_irqrestore(&port->lock, flags);
2100 static const char *pl011_type(struct uart_port *port)
2102 struct uart_amba_port *uap =
2103 container_of(port, struct uart_amba_port, port);
2104 return uap->port.type == PORT_AMBA ? uap->type : NULL;
2108 * Release the memory region(s) being used by 'port'
2110 static void pl011_release_port(struct uart_port *port)
2112 release_mem_region(port->mapbase, SZ_4K);
2116 * Request the memory region(s) being used by 'port'
2118 static int pl011_request_port(struct uart_port *port)
2120 return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
2121 != NULL ? 0 : -EBUSY;
2125 * Configure/autoconfigure the port.
2127 static void pl011_config_port(struct uart_port *port, int flags)
2129 if (flags & UART_CONFIG_TYPE) {
2130 port->type = PORT_AMBA;
2131 pl011_request_port(port);
2136 * verify the new serial_struct (for TIOCSSERIAL).
2138 static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
2141 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
2143 if (ser->irq < 0 || ser->irq >= nr_irqs)
2145 if (ser->baud_base < 9600)
2150 static const struct uart_ops amba_pl011_pops = {
2151 .tx_empty = pl011_tx_empty,
2152 .set_mctrl = pl011_set_mctrl,
2153 .get_mctrl = pl011_get_mctrl,
2154 .stop_tx = pl011_stop_tx,
2155 .start_tx = pl011_start_tx,
2156 .stop_rx = pl011_stop_rx,
2157 .enable_ms = pl011_enable_ms,
2158 .break_ctl = pl011_break_ctl,
2159 .startup = pl011_startup,
2160 .shutdown = pl011_shutdown,
2161 .flush_buffer = pl011_dma_flush_buffer,
2162 .set_termios = pl011_set_termios,
2164 .release_port = pl011_release_port,
2165 .request_port = pl011_request_port,
2166 .config_port = pl011_config_port,
2167 .verify_port = pl011_verify_port,
2168 #ifdef CONFIG_CONSOLE_POLL
2169 .poll_init = pl011_hwinit,
2170 .poll_get_char = pl011_get_poll_char,
2171 .poll_put_char = pl011_put_poll_char,
2175 static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
2179 static unsigned int sbsa_uart_get_mctrl(struct uart_port *port)
2184 static const struct uart_ops sbsa_uart_pops = {
2185 .tx_empty = pl011_tx_empty,
2186 .set_mctrl = sbsa_uart_set_mctrl,
2187 .get_mctrl = sbsa_uart_get_mctrl,
2188 .stop_tx = pl011_stop_tx,
2189 .start_tx = pl011_start_tx,
2190 .stop_rx = pl011_stop_rx,
2191 .startup = sbsa_uart_startup,
2192 .shutdown = sbsa_uart_shutdown,
2193 .set_termios = sbsa_uart_set_termios,
2195 .release_port = pl011_release_port,
2196 .request_port = pl011_request_port,
2197 .config_port = pl011_config_port,
2198 .verify_port = pl011_verify_port,
2199 #ifdef CONFIG_CONSOLE_POLL
2200 .poll_init = pl011_hwinit,
2201 .poll_get_char = pl011_get_poll_char,
2202 .poll_put_char = pl011_put_poll_char,
2206 static struct uart_amba_port *amba_ports[UART_NR];
2208 #ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
2210 static void pl011_console_putchar(struct uart_port *port, int ch)
2212 struct uart_amba_port *uap =
2213 container_of(port, struct uart_amba_port, port);
2215 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
2217 pl011_write(ch, uap, REG_DR);
2221 pl011_console_write(struct console *co, const char *s, unsigned int count)
2223 struct uart_amba_port *uap = amba_ports[co->index];
2224 unsigned int old_cr = 0, new_cr;
2225 unsigned long flags;
2228 clk_enable(uap->clk);
2230 local_irq_save(flags);
2231 if (uap->port.sysrq)
2233 else if (oops_in_progress)
2234 locked = spin_trylock(&uap->port.lock);
2236 spin_lock(&uap->port.lock);
2239 * First save the CR then disable the interrupts
2241 if (!uap->vendor->always_enabled) {
2242 old_cr = pl011_read(uap, REG_CR);
2243 new_cr = old_cr & ~UART011_CR_CTSEN;
2244 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
2245 pl011_write(new_cr, uap, REG_CR);
2248 uart_console_write(&uap->port, s, count, pl011_console_putchar);
2251 * Finally, wait for transmitter to become empty and restore the
2252 * TCR. Allow feature register bits to be inverted to work around
2255 while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr)
2256 & uap->vendor->fr_busy)
2258 if (!uap->vendor->always_enabled)
2259 pl011_write(old_cr, uap, REG_CR);
2262 spin_unlock(&uap->port.lock);
2263 local_irq_restore(flags);
2265 clk_disable(uap->clk);
2269 pl011_console_get_options(struct uart_amba_port *uap, int *baud,
2270 int *parity, int *bits)
2272 if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
2273 unsigned int lcr_h, ibrd, fbrd;
2275 lcr_h = pl011_read(uap, REG_LCRH_TX);
2278 if (lcr_h & UART01x_LCRH_PEN) {
2279 if (lcr_h & UART01x_LCRH_EPS)
2285 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
2290 ibrd = pl011_read(uap, REG_IBRD);
2291 fbrd = pl011_read(uap, REG_FBRD);
2293 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
2295 if (uap->vendor->oversampling) {
2296 if (pl011_read(uap, REG_CR)
2297 & ST_UART011_CR_OVSFACT)
2303 static int __init pl011_console_setup(struct console *co, char *options)
2305 struct uart_amba_port *uap;
2313 * Check whether an invalid uart number has been specified, and
2314 * if so, search for the first available port that does have
2317 if (co->index >= UART_NR)
2319 uap = amba_ports[co->index];
2323 /* Allow pins to be muxed in and configured */
2324 pinctrl_pm_select_default_state(uap->port.dev);
2326 ret = clk_prepare(uap->clk);
2330 if (dev_get_platdata(uap->port.dev)) {
2331 struct amba_pl011_data *plat;
2333 plat = dev_get_platdata(uap->port.dev);
2338 uap->port.uartclk = clk_get_rate(uap->clk);
2340 if (uap->vendor->fixed_options) {
2341 baud = uap->fixed_baud;
2344 uart_parse_options(options,
2345 &baud, &parity, &bits, &flow);
2347 pl011_console_get_options(uap, &baud, &parity, &bits);
2350 return uart_set_options(&uap->port, co, baud, parity, bits, flow);
2354 * pl011_console_match - non-standard console matching
2355 * @co: registering console
2356 * @name: name from console command line
2357 * @idx: index from console command line
2358 * @options: ptr to option string from console command line
2360 * Only attempts to match console command lines of the form:
2361 * console=pl011,mmio|mmio32,<addr>[,<options>]
2362 * console=pl011,0x<addr>[,<options>]
2363 * This form is used to register an initial earlycon boot console and
2364 * replace it with the amba_console at pl011 driver init.
2366 * Performs console setup for a match (as required by interface)
2367 * If no <options> are specified, then assume the h/w is already setup.
2369 * Returns 0 if console matches; otherwise non-zero to use default matching
2371 static int __init pl011_console_match(struct console *co, char *name, int idx,
2374 unsigned char iotype;
2375 resource_size_t addr;
2378 if (strcmp(name, "qdf2400_e44") == 0) {
2379 pr_info_once("UART: Working around QDF2400 SoC erratum 44");
2380 qdf2400_e44_present = true;
2381 } else if (strcmp(name, "pl011") != 0) {
2385 if (uart_parse_earlycon(options, &iotype, &addr, &options))
2388 if (iotype != UPIO_MEM && iotype != UPIO_MEM32)
2391 /* try to match the port specified on the command line */
2392 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2393 struct uart_port *port;
2398 port = &amba_ports[i]->port;
2400 if (port->mapbase != addr)
2405 return pl011_console_setup(co, options);
2411 static struct uart_driver amba_reg;
2412 static struct console amba_console = {
2414 .write = pl011_console_write,
2415 .device = uart_console_device,
2416 .setup = pl011_console_setup,
2417 .match = pl011_console_match,
2418 .flags = CON_PRINTBUFFER | CON_ANYTIME,
2423 #define AMBA_CONSOLE (&amba_console)
2425 static void qdf2400_e44_putc(struct uart_port *port, int c)
2427 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2429 writel(c, port->membase + UART01x_DR);
2430 while (!(readl(port->membase + UART01x_FR) & UART011_FR_TXFE))
2434 static void qdf2400_e44_early_write(struct console *con, const char *s, unsigned n)
2436 struct earlycon_device *dev = con->data;
2438 uart_console_write(&dev->port, s, n, qdf2400_e44_putc);
2441 static void pl011_putc(struct uart_port *port, int c)
2443 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
2445 if (port->iotype == UPIO_MEM32)
2446 writel(c, port->membase + UART01x_DR);
2448 writeb(c, port->membase + UART01x_DR);
2449 while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
2453 static void pl011_early_write(struct console *con, const char *s, unsigned n)
2455 struct earlycon_device *dev = con->data;
2457 uart_console_write(&dev->port, s, n, pl011_putc);
2461 * On non-ACPI systems, earlycon is enabled by specifying
2462 * "earlycon=pl011,<address>" on the kernel command line.
2464 * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table,
2465 * by specifying only "earlycon" on the command line. Because it requires
2466 * SPCR, the console starts after ACPI is parsed, which is later than a
2467 * traditional early console.
2469 * To get the traditional early console that starts before ACPI is parsed,
2470 * specify the full "earlycon=pl011,<address>" option.
2472 static int __init pl011_early_console_setup(struct earlycon_device *device,
2475 if (!device->port.membase)
2478 device->con->write = pl011_early_write;
2482 OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
2483 OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
2486 * On Qualcomm Datacenter Technologies QDF2400 SOCs affected by
2487 * Erratum 44, traditional earlycon can be enabled by specifying
2488 * "earlycon=qdf2400_e44,<address>". Any options are ignored.
2490 * Alternatively, you can just specify "earlycon", and the early console
2491 * will be enabled with the information from the SPCR table. In this
2492 * case, the SPCR code will detect the need for the E44 work-around,
2493 * and set the console name to "qdf2400_e44".
2496 qdf2400_e44_early_console_setup(struct earlycon_device *device,
2499 if (!device->port.membase)
2502 device->con->write = qdf2400_e44_early_write;
2505 EARLYCON_DECLARE(qdf2400_e44, qdf2400_e44_early_console_setup);
2508 #define AMBA_CONSOLE NULL
2511 static struct uart_driver amba_reg = {
2512 .owner = THIS_MODULE,
2513 .driver_name = "ttyAMA",
2514 .dev_name = "ttyAMA",
2515 .major = SERIAL_AMBA_MAJOR,
2516 .minor = SERIAL_AMBA_MINOR,
2518 .cons = AMBA_CONSOLE,
2521 static int pl011_probe_dt_alias(int index, struct device *dev)
2523 struct device_node *np;
2524 static bool seen_dev_with_alias = false;
2525 static bool seen_dev_without_alias = false;
2528 if (!IS_ENABLED(CONFIG_OF))
2535 ret = of_alias_get_id(np, "serial");
2537 seen_dev_without_alias = true;
2540 seen_dev_with_alias = true;
2541 if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
2542 dev_warn(dev, "requested serial port %d not available.\n", ret);
2547 if (seen_dev_with_alias && seen_dev_without_alias)
2548 dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
2553 /* unregisters the driver also if no more ports are left */
2554 static void pl011_unregister_port(struct uart_amba_port *uap)
2559 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2560 if (amba_ports[i] == uap)
2561 amba_ports[i] = NULL;
2562 else if (amba_ports[i])
2565 pl011_dma_remove(uap);
2567 uart_unregister_driver(&amba_reg);
2570 static int pl011_find_free_port(void)
2574 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2575 if (amba_ports[i] == NULL)
2581 static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
2582 struct resource *mmiobase, int index)
2586 base = devm_ioremap_resource(dev, mmiobase);
2588 return PTR_ERR(base);
2590 index = pl011_probe_dt_alias(index, dev);
2593 uap->port.dev = dev;
2594 uap->port.mapbase = mmiobase->start;
2595 uap->port.membase = base;
2596 uap->port.fifosize = uap->fifosize;
2597 uap->port.flags = UPF_BOOT_AUTOCONF;
2598 uap->port.line = index;
2600 amba_ports[index] = uap;
2605 static int pl011_register_port(struct uart_amba_port *uap)
2609 /* Ensure interrupts from this UART are masked and cleared */
2610 pl011_write(0, uap, REG_IMSC);
2611 pl011_write(0xffff, uap, REG_ICR);
2613 if (!amba_reg.state) {
2614 ret = uart_register_driver(&amba_reg);
2616 dev_err(uap->port.dev,
2617 "Failed to register AMBA-PL011 driver\n");
2622 ret = uart_add_one_port(&amba_reg, &uap->port);
2624 pl011_unregister_port(uap);
2629 static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
2631 struct uart_amba_port *uap;
2632 struct vendor_data *vendor = id->data;
2635 portnr = pl011_find_free_port();
2639 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
2644 uap->clk = devm_clk_get(&dev->dev, NULL);
2645 if (IS_ERR(uap->clk))
2646 return PTR_ERR(uap->clk);
2648 uap->reg_offset = vendor->reg_offset;
2649 uap->vendor = vendor;
2650 uap->fifosize = vendor->get_fifosize(dev);
2651 uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM;
2652 uap->port.irq = dev->irq[0];
2653 uap->port.ops = &amba_pl011_pops;
2655 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
2657 ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
2661 amba_set_drvdata(dev, uap);
2663 return pl011_register_port(uap);
2666 static int pl011_remove(struct amba_device *dev)
2668 struct uart_amba_port *uap = amba_get_drvdata(dev);
2670 uart_remove_one_port(&amba_reg, &uap->port);
2671 pl011_unregister_port(uap);
2675 #ifdef CONFIG_PM_SLEEP
2676 static int pl011_suspend(struct device *dev)
2678 struct uart_amba_port *uap = dev_get_drvdata(dev);
2683 return uart_suspend_port(&amba_reg, &uap->port);
2686 static int pl011_resume(struct device *dev)
2688 struct uart_amba_port *uap = dev_get_drvdata(dev);
2693 return uart_resume_port(&amba_reg, &uap->port);
2697 static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
2699 static int sbsa_uart_probe(struct platform_device *pdev)
2701 struct uart_amba_port *uap;
2707 * Check the mandatory baud rate parameter in the DT node early
2708 * so that we can easily exit with the error.
2710 if (pdev->dev.of_node) {
2711 struct device_node *np = pdev->dev.of_node;
2713 ret = of_property_read_u32(np, "current-speed", &baudrate);
2720 portnr = pl011_find_free_port();
2724 uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port),
2729 ret = platform_get_irq(pdev, 0);
2731 if (ret != -EPROBE_DEFER)
2732 dev_err(&pdev->dev, "cannot obtain irq\n");
2735 uap->port.irq = ret;
2737 uap->reg_offset = vendor_sbsa.reg_offset;
2738 uap->vendor = qdf2400_e44_present ?
2739 &vendor_qdt_qdf2400_e44 : &vendor_sbsa;
2741 uap->port.iotype = vendor_sbsa.access_32b ? UPIO_MEM32 : UPIO_MEM;
2742 uap->port.ops = &sbsa_uart_pops;
2743 uap->fixed_baud = baudrate;
2745 snprintf(uap->type, sizeof(uap->type), "SBSA");
2747 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2749 ret = pl011_setup_port(&pdev->dev, uap, r, portnr);
2753 platform_set_drvdata(pdev, uap);
2755 return pl011_register_port(uap);
2758 static int sbsa_uart_remove(struct platform_device *pdev)
2760 struct uart_amba_port *uap = platform_get_drvdata(pdev);
2762 uart_remove_one_port(&amba_reg, &uap->port);
2763 pl011_unregister_port(uap);
2767 static const struct of_device_id sbsa_uart_of_match[] = {
2768 { .compatible = "arm,sbsa-uart", },
2771 MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
2773 static const struct acpi_device_id sbsa_uart_acpi_match[] = {
2777 MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
2779 static struct platform_driver arm_sbsa_uart_platform_driver = {
2780 .probe = sbsa_uart_probe,
2781 .remove = sbsa_uart_remove,
2783 .name = "sbsa-uart",
2784 .of_match_table = of_match_ptr(sbsa_uart_of_match),
2785 .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
2789 static struct amba_id pl011_ids[] = {
2793 .data = &vendor_arm,
2801 .id = AMBA_LINUX_ID(0x00, 0x1, 0xffe),
2803 .data = &vendor_zte,
2808 MODULE_DEVICE_TABLE(amba, pl011_ids);
2810 static struct amba_driver pl011_driver = {
2812 .name = "uart-pl011",
2813 .pm = &pl011_dev_pm_ops,
2815 .id_table = pl011_ids,
2816 .probe = pl011_probe,
2817 .remove = pl011_remove,
2820 static int __init pl011_init(void)
2822 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
2824 if (platform_driver_register(&arm_sbsa_uart_platform_driver))
2825 pr_warn("could not register SBSA UART platform driver\n");
2826 return amba_driver_register(&pl011_driver);
2829 static void __exit pl011_exit(void)
2831 platform_driver_unregister(&arm_sbsa_uart_platform_driver);
2832 amba_driver_unregister(&pl011_driver);
2836 * While this can be a module, if builtin it's most likely the console
2837 * So let's leave module_exit but move module_init to an earlier place
2839 arch_initcall(pl011_init);
2840 module_exit(pl011_exit);
2842 MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2843 MODULE_DESCRIPTION("ARM AMBA serial port driver");
2844 MODULE_LICENSE("GPL");