tty: add SPDX identifiers to all remaining files in drivers/tty/
[linux-block.git] / drivers / tty / serial / 8250 / 8250_dma.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * 8250_dma.c - DMA Engine API support for 8250.c
4  *
5  * Copyright (C) 2013 Intel Corporation
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  */
12 #include <linux/tty.h>
13 #include <linux/tty_flip.h>
14 #include <linux/serial_reg.h>
15 #include <linux/dma-mapping.h>
16
17 #include "8250.h"
18
19 static void __dma_tx_complete(void *param)
20 {
21         struct uart_8250_port   *p = param;
22         struct uart_8250_dma    *dma = p->dma;
23         struct circ_buf         *xmit = &p->port.state->xmit;
24         unsigned long   flags;
25         int             ret;
26
27         dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
28                                 UART_XMIT_SIZE, DMA_TO_DEVICE);
29
30         spin_lock_irqsave(&p->port.lock, flags);
31
32         dma->tx_running = 0;
33
34         xmit->tail += dma->tx_size;
35         xmit->tail &= UART_XMIT_SIZE - 1;
36         p->port.icount.tx += dma->tx_size;
37
38         if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
39                 uart_write_wakeup(&p->port);
40
41         ret = serial8250_tx_dma(p);
42         if (ret) {
43                 p->ier |= UART_IER_THRI;
44                 serial_port_out(&p->port, UART_IER, p->ier);
45         }
46
47         spin_unlock_irqrestore(&p->port.lock, flags);
48 }
49
50 static void __dma_rx_complete(void *param)
51 {
52         struct uart_8250_port   *p = param;
53         struct uart_8250_dma    *dma = p->dma;
54         struct tty_port         *tty_port = &p->port.state->port;
55         struct dma_tx_state     state;
56         int                     count;
57
58         dma->rx_running = 0;
59         dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
60
61         count = dma->rx_size - state.residue;
62
63         tty_insert_flip_string(tty_port, dma->rx_buf, count);
64         p->port.icount.rx += count;
65
66         tty_flip_buffer_push(tty_port);
67 }
68
69 int serial8250_tx_dma(struct uart_8250_port *p)
70 {
71         struct uart_8250_dma            *dma = p->dma;
72         struct circ_buf                 *xmit = &p->port.state->xmit;
73         struct dma_async_tx_descriptor  *desc;
74         int ret;
75
76         if (dma->tx_running)
77                 return 0;
78
79         if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
80                 /* We have been called from __dma_tx_complete() */
81                 serial8250_rpm_put_tx(p);
82                 return 0;
83         }
84
85         dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
86
87         desc = dmaengine_prep_slave_single(dma->txchan,
88                                            dma->tx_addr + xmit->tail,
89                                            dma->tx_size, DMA_MEM_TO_DEV,
90                                            DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
91         if (!desc) {
92                 ret = -EBUSY;
93                 goto err;
94         }
95
96         dma->tx_running = 1;
97         desc->callback = __dma_tx_complete;
98         desc->callback_param = p;
99
100         dma->tx_cookie = dmaengine_submit(desc);
101
102         dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
103                                    UART_XMIT_SIZE, DMA_TO_DEVICE);
104
105         dma_async_issue_pending(dma->txchan);
106         if (dma->tx_err) {
107                 dma->tx_err = 0;
108                 if (p->ier & UART_IER_THRI) {
109                         p->ier &= ~UART_IER_THRI;
110                         serial_out(p, UART_IER, p->ier);
111                 }
112         }
113         return 0;
114 err:
115         dma->tx_err = 1;
116         return ret;
117 }
118
119 int serial8250_rx_dma(struct uart_8250_port *p)
120 {
121         struct uart_8250_dma            *dma = p->dma;
122         struct dma_async_tx_descriptor  *desc;
123
124         if (dma->rx_running)
125                 return 0;
126
127         desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
128                                            dma->rx_size, DMA_DEV_TO_MEM,
129                                            DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
130         if (!desc)
131                 return -EBUSY;
132
133         dma->rx_running = 1;
134         desc->callback = __dma_rx_complete;
135         desc->callback_param = p;
136
137         dma->rx_cookie = dmaengine_submit(desc);
138
139         dma_async_issue_pending(dma->rxchan);
140
141         return 0;
142 }
143
144 void serial8250_rx_dma_flush(struct uart_8250_port *p)
145 {
146         struct uart_8250_dma *dma = p->dma;
147
148         if (dma->rx_running) {
149                 dmaengine_pause(dma->rxchan);
150                 __dma_rx_complete(p);
151                 dmaengine_terminate_async(dma->rxchan);
152         }
153 }
154 EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
155
156 int serial8250_request_dma(struct uart_8250_port *p)
157 {
158         struct uart_8250_dma    *dma = p->dma;
159         phys_addr_t rx_dma_addr = dma->rx_dma_addr ?
160                                   dma->rx_dma_addr : p->port.mapbase;
161         phys_addr_t tx_dma_addr = dma->tx_dma_addr ?
162                                   dma->tx_dma_addr : p->port.mapbase;
163         dma_cap_mask_t          mask;
164         struct dma_slave_caps   caps;
165         int                     ret;
166
167         /* Default slave configuration parameters */
168         dma->rxconf.direction           = DMA_DEV_TO_MEM;
169         dma->rxconf.src_addr_width      = DMA_SLAVE_BUSWIDTH_1_BYTE;
170         dma->rxconf.src_addr            = rx_dma_addr + UART_RX;
171
172         dma->txconf.direction           = DMA_MEM_TO_DEV;
173         dma->txconf.dst_addr_width      = DMA_SLAVE_BUSWIDTH_1_BYTE;
174         dma->txconf.dst_addr            = tx_dma_addr + UART_TX;
175
176         dma_cap_zero(mask);
177         dma_cap_set(DMA_SLAVE, mask);
178
179         /* Get a channel for RX */
180         dma->rxchan = dma_request_slave_channel_compat(mask,
181                                                        dma->fn, dma->rx_param,
182                                                        p->port.dev, "rx");
183         if (!dma->rxchan)
184                 return -ENODEV;
185
186         /* 8250 rx dma requires dmaengine driver to support pause/terminate */
187         ret = dma_get_slave_caps(dma->rxchan, &caps);
188         if (ret)
189                 goto release_rx;
190         if (!caps.cmd_pause || !caps.cmd_terminate ||
191             caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
192                 ret = -EINVAL;
193                 goto release_rx;
194         }
195
196         dmaengine_slave_config(dma->rxchan, &dma->rxconf);
197
198         /* Get a channel for TX */
199         dma->txchan = dma_request_slave_channel_compat(mask,
200                                                        dma->fn, dma->tx_param,
201                                                        p->port.dev, "tx");
202         if (!dma->txchan) {
203                 ret = -ENODEV;
204                 goto release_rx;
205         }
206
207         /* 8250 tx dma requires dmaengine driver to support terminate */
208         ret = dma_get_slave_caps(dma->txchan, &caps);
209         if (ret)
210                 goto err;
211         if (!caps.cmd_terminate) {
212                 ret = -EINVAL;
213                 goto err;
214         }
215
216         dmaengine_slave_config(dma->txchan, &dma->txconf);
217
218         /* RX buffer */
219         if (!dma->rx_size)
220                 dma->rx_size = PAGE_SIZE;
221
222         dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
223                                         &dma->rx_addr, GFP_KERNEL);
224         if (!dma->rx_buf) {
225                 ret = -ENOMEM;
226                 goto err;
227         }
228
229         /* TX buffer */
230         dma->tx_addr = dma_map_single(dma->txchan->device->dev,
231                                         p->port.state->xmit.buf,
232                                         UART_XMIT_SIZE,
233                                         DMA_TO_DEVICE);
234         if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
235                 dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
236                                   dma->rx_buf, dma->rx_addr);
237                 ret = -ENOMEM;
238                 goto err;
239         }
240
241         dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
242
243         return 0;
244 err:
245         dma_release_channel(dma->txchan);
246 release_rx:
247         dma_release_channel(dma->rxchan);
248         return ret;
249 }
250 EXPORT_SYMBOL_GPL(serial8250_request_dma);
251
252 void serial8250_release_dma(struct uart_8250_port *p)
253 {
254         struct uart_8250_dma *dma = p->dma;
255
256         if (!dma)
257                 return;
258
259         /* Release RX resources */
260         dmaengine_terminate_sync(dma->rxchan);
261         dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf,
262                           dma->rx_addr);
263         dma_release_channel(dma->rxchan);
264         dma->rxchan = NULL;
265
266         /* Release TX resources */
267         dmaengine_terminate_sync(dma->txchan);
268         dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
269                          UART_XMIT_SIZE, DMA_TO_DEVICE);
270         dma_release_channel(dma->txchan);
271         dma->txchan = NULL;
272         dma->tx_running = 0;
273
274         dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
275 }
276 EXPORT_SYMBOL_GPL(serial8250_release_dma);