Commit | Line | Data |
---|---|---|
2025cf9e | 1 | // SPDX-License-Identifier: GPL-2.0-only |
7063c0d9 | 2 | /* |
6c710c0c | 3 | * Special handling for DW DMA core |
7063c0d9 | 4 | * |
197e96b4 | 5 | * Copyright (c) 2009, 2014 Intel Corporation. |
7063c0d9 FT |
6 | */ |
7 | ||
bdbdf0f0 | 8 | #include <linux/completion.h> |
e7940952 AS |
9 | #include <linux/dma-mapping.h> |
10 | #include <linux/dmaengine.h> | |
e62a15d9 | 11 | #include <linux/irqreturn.h> |
bdbdf0f0 | 12 | #include <linux/jiffies.h> |
a62bacba | 13 | #include <linux/module.h> |
7063c0d9 | 14 | #include <linux/pci.h> |
d744f826 | 15 | #include <linux/platform_data/dma-dw.h> |
6c710c0c SS |
16 | #include <linux/spi/spi.h> |
17 | #include <linux/types.h> | |
18 | ||
19 | #include "spi-dw.h" | |
7063c0d9 | 20 | |
725b0e3e SS |
21 | #define DW_SPI_RX_BUSY 0 |
22 | #define DW_SPI_RX_BURST_LEVEL 16 | |
23 | #define DW_SPI_TX_BUSY 1 | |
24 | #define DW_SPI_TX_BURST_LEVEL 16 | |
30c8eb52 | 25 | |
57784411 | 26 | static bool dw_spi_dma_chan_filter(struct dma_chan *chan, void *param) |
7063c0d9 | 27 | { |
d744f826 AS |
28 | struct dw_dma_slave *s = param; |
29 | ||
30 | if (s->dma_dev != chan->device->dev) | |
31 | return false; | |
7063c0d9 | 32 | |
d744f826 AS |
33 | chan->private = s; |
34 | return true; | |
7063c0d9 FT |
35 | } |
36 | ||
57784411 | 37 | static void dw_spi_dma_maxburst_init(struct dw_spi *dws) |
0b2b6651 SS |
38 | { |
39 | struct dma_slave_caps caps; | |
40 | u32 max_burst, def_burst; | |
41 | int ret; | |
42 | ||
43 | def_burst = dws->fifo_len / 2; | |
44 | ||
45 | ret = dma_get_slave_caps(dws->rxchan, &caps); | |
46 | if (!ret && caps.max_burst) | |
47 | max_burst = caps.max_burst; | |
48 | else | |
725b0e3e | 49 | max_burst = DW_SPI_RX_BURST_LEVEL; |
0b2b6651 SS |
50 | |
51 | dws->rxburst = min(max_burst, def_burst); | |
01ddbbb0 | 52 | dw_writel(dws, DW_SPI_DMARDLR, dws->rxburst - 1); |
0b2b6651 SS |
53 | |
54 | ret = dma_get_slave_caps(dws->txchan, &caps); | |
55 | if (!ret && caps.max_burst) | |
56 | max_burst = caps.max_burst; | |
57 | else | |
725b0e3e | 58 | max_burst = DW_SPI_TX_BURST_LEVEL; |
0b2b6651 | 59 | |
01ddbbb0 SS |
60 | /* |
61 | * Having a Rx DMA channel serviced with higher priority than a Tx DMA | |
62 | * channel might not be enough to provide a well balanced DMA-based | |
63 | * SPI transfer interface. There might still be moments when the Tx DMA | |
64 | * channel is occasionally handled faster than the Rx DMA channel. | |
65 | * That in its turn will eventually cause the SPI Rx FIFO overflow if | |
66 | * SPI bus speed is high enough to fill the SPI Rx FIFO in before it's | |
67 | * cleared by the Rx DMA channel. In order to fix the problem the Tx | |
68 | * DMA activity is intentionally slowed down by limiting the SPI Tx | |
69 | * FIFO depth with a value twice bigger than the Tx burst length. | |
70 | */ | |
0b2b6651 | 71 | dws->txburst = min(max_burst, def_burst); |
01ddbbb0 | 72 | dw_writel(dws, DW_SPI_DMATDLR, dws->txburst); |
0b2b6651 SS |
73 | } |
74 | ||
d1ca1c52 | 75 | static int dw_spi_dma_caps_init(struct dw_spi *dws) |
ad4fe126 | 76 | { |
d1ca1c52 JC |
77 | struct dma_slave_caps tx, rx; |
78 | int ret; | |
79 | ||
80 | ret = dma_get_slave_caps(dws->txchan, &tx); | |
81 | if (ret) | |
82 | return ret; | |
83 | ||
84 | ret = dma_get_slave_caps(dws->rxchan, &rx); | |
85 | if (ret) | |
86 | return ret; | |
ad4fe126 | 87 | |
d1ca1c52 JC |
88 | if (!(tx.directions & BIT(DMA_MEM_TO_DEV) && |
89 | rx.directions & BIT(DMA_DEV_TO_MEM))) | |
90 | return -ENXIO; | |
ad4fe126 SS |
91 | |
92 | if (tx.max_sg_burst > 0 && rx.max_sg_burst > 0) | |
93 | dws->dma_sg_burst = min(tx.max_sg_burst, rx.max_sg_burst); | |
94 | else if (tx.max_sg_burst > 0) | |
95 | dws->dma_sg_burst = tx.max_sg_burst; | |
96 | else if (rx.max_sg_burst > 0) | |
97 | dws->dma_sg_burst = rx.max_sg_burst; | |
98 | else | |
99 | dws->dma_sg_burst = 0; | |
d1ca1c52 | 100 | |
020a3947 JC |
101 | /* |
102 | * Assuming both channels belong to the same DMA controller hence the | |
103 | * peripheral side address width capabilities most likely would be | |
104 | * the same. | |
105 | */ | |
106 | dws->dma_addr_widths = tx.dst_addr_widths & rx.src_addr_widths; | |
107 | ||
d1ca1c52 | 108 | return 0; |
ad4fe126 SS |
109 | } |
110 | ||
57784411 | 111 | static int dw_spi_dma_init_mfld(struct device *dev, struct dw_spi *dws) |
7063c0d9 | 112 | { |
b3f82dc2 AS |
113 | struct dw_dma_slave dma_tx = { .dst_id = 1 }, *tx = &dma_tx; |
114 | struct dw_dma_slave dma_rx = { .src_id = 0 }, *rx = &dma_rx; | |
b89e9c87 | 115 | struct pci_dev *dma_dev; |
7063c0d9 | 116 | dma_cap_mask_t mask; |
d1ca1c52 | 117 | int ret = -EBUSY; |
7063c0d9 FT |
118 | |
119 | /* | |
120 | * Get pci device for DMA controller, currently it could only | |
ea092455 | 121 | * be the DMA controller of Medfield |
7063c0d9 | 122 | */ |
b89e9c87 AS |
123 | dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL); |
124 | if (!dma_dev) | |
125 | return -ENODEV; | |
126 | ||
7063c0d9 FT |
127 | dma_cap_zero(mask); |
128 | dma_cap_set(DMA_SLAVE, mask); | |
129 | ||
130 | /* 1. Init rx channel */ | |
b3f82dc2 AS |
131 | rx->dma_dev = &dma_dev->dev; |
132 | dws->rxchan = dma_request_channel(mask, dw_spi_dma_chan_filter, rx); | |
7063c0d9 FT |
133 | if (!dws->rxchan) |
134 | goto err_exit; | |
7063c0d9 FT |
135 | |
136 | /* 2. Init tx channel */ | |
b3f82dc2 AS |
137 | tx->dma_dev = &dma_dev->dev; |
138 | dws->txchan = dma_request_channel(mask, dw_spi_dma_chan_filter, tx); | |
7063c0d9 FT |
139 | if (!dws->txchan) |
140 | goto free_rxchan; | |
a041e672 | 141 | |
eefc6c5c YY |
142 | dws->host->dma_rx = dws->rxchan; |
143 | dws->host->dma_tx = dws->txchan; | |
7063c0d9 | 144 | |
bdbdf0f0 SS |
145 | init_completion(&dws->dma_completion); |
146 | ||
d1ca1c52 JC |
147 | ret = dw_spi_dma_caps_init(dws); |
148 | if (ret) | |
149 | goto free_txchan; | |
0b2b6651 | 150 | |
d1ca1c52 | 151 | dw_spi_dma_maxburst_init(dws); |
ad4fe126 | 152 | |
804313b6 XW |
153 | pci_dev_put(dma_dev); |
154 | ||
7063c0d9 FT |
155 | return 0; |
156 | ||
d1ca1c52 JC |
157 | free_txchan: |
158 | dma_release_channel(dws->txchan); | |
159 | dws->txchan = NULL; | |
7063c0d9 FT |
160 | free_rxchan: |
161 | dma_release_channel(dws->rxchan); | |
a041e672 | 162 | dws->rxchan = NULL; |
7063c0d9 | 163 | err_exit: |
804313b6 | 164 | pci_dev_put(dma_dev); |
d1ca1c52 | 165 | return ret; |
7063c0d9 FT |
166 | } |
167 | ||
57784411 | 168 | static int dw_spi_dma_init_generic(struct device *dev, struct dw_spi *dws) |
22d48ad7 | 169 | { |
e95a1cd2 | 170 | int ret; |
22d48ad7 | 171 | |
e95a1cd2 SS |
172 | dws->rxchan = dma_request_chan(dev, "rx"); |
173 | if (IS_ERR(dws->rxchan)) { | |
174 | ret = PTR_ERR(dws->rxchan); | |
a041e672 | 175 | dws->rxchan = NULL; |
e95a1cd2 SS |
176 | goto err_exit; |
177 | } | |
178 | ||
179 | dws->txchan = dma_request_chan(dev, "tx"); | |
180 | if (IS_ERR(dws->txchan)) { | |
181 | ret = PTR_ERR(dws->txchan); | |
182 | dws->txchan = NULL; | |
183 | goto free_rxchan; | |
22d48ad7 | 184 | } |
a041e672 | 185 | |
eefc6c5c YY |
186 | dws->host->dma_rx = dws->rxchan; |
187 | dws->host->dma_tx = dws->txchan; | |
22d48ad7 | 188 | |
bdbdf0f0 SS |
189 | init_completion(&dws->dma_completion); |
190 | ||
d1ca1c52 JC |
191 | ret = dw_spi_dma_caps_init(dws); |
192 | if (ret) | |
193 | goto free_txchan; | |
0b2b6651 | 194 | |
d1ca1c52 | 195 | dw_spi_dma_maxburst_init(dws); |
ad4fe126 | 196 | |
22d48ad7 | 197 | return 0; |
e95a1cd2 | 198 | |
d1ca1c52 JC |
199 | free_txchan: |
200 | dma_release_channel(dws->txchan); | |
201 | dws->txchan = NULL; | |
e95a1cd2 SS |
202 | free_rxchan: |
203 | dma_release_channel(dws->rxchan); | |
204 | dws->rxchan = NULL; | |
205 | err_exit: | |
206 | return ret; | |
22d48ad7 JN |
207 | } |
208 | ||
57784411 | 209 | static void dw_spi_dma_exit(struct dw_spi *dws) |
7063c0d9 | 210 | { |
a041e672 AS |
211 | if (dws->txchan) { |
212 | dmaengine_terminate_sync(dws->txchan); | |
213 | dma_release_channel(dws->txchan); | |
214 | } | |
8e45ef68 | 215 | |
a041e672 AS |
216 | if (dws->rxchan) { |
217 | dmaengine_terminate_sync(dws->rxchan); | |
218 | dma_release_channel(dws->rxchan); | |
219 | } | |
7063c0d9 FT |
220 | } |
221 | ||
57784411 | 222 | static irqreturn_t dw_spi_dma_transfer_handler(struct dw_spi *dws) |
f051fc8f | 223 | { |
bf64b660 | 224 | dw_spi_check_status(dws, false); |
f051fc8f | 225 | |
bdbdf0f0 | 226 | complete(&dws->dma_completion); |
bf64b660 | 227 | |
f051fc8f AS |
228 | return IRQ_HANDLED; |
229 | } | |
230 | ||
57784411 SS |
231 | static enum dma_slave_buswidth dw_spi_dma_convert_width(u8 n_bytes) |
232 | { | |
5147d5bf JC |
233 | switch (n_bytes) { |
234 | case 1: | |
e31abce7 | 235 | return DMA_SLAVE_BUSWIDTH_1_BYTE; |
5147d5bf | 236 | case 2: |
e31abce7 | 237 | return DMA_SLAVE_BUSWIDTH_2_BYTES; |
5147d5bf JC |
238 | case 4: |
239 | return DMA_SLAVE_BUSWIDTH_4_BYTES; | |
240 | default: | |
241 | return DMA_SLAVE_BUSWIDTH_UNDEFINED; | |
242 | } | |
e31abce7 AS |
243 | } |
244 | ||
eefc6c5c | 245 | static bool dw_spi_can_dma(struct spi_controller *host, |
d2ae5d42 JC |
246 | struct spi_device *spi, struct spi_transfer *xfer) |
247 | { | |
eefc6c5c | 248 | struct dw_spi *dws = spi_controller_get_devdata(host); |
020a3947 JC |
249 | enum dma_slave_buswidth dma_bus_width; |
250 | ||
251 | if (xfer->len <= dws->fifo_len) | |
252 | return false; | |
253 | ||
254 | dma_bus_width = dw_spi_dma_convert_width(dws->n_bytes); | |
d2ae5d42 | 255 | |
020a3947 | 256 | return dws->dma_addr_widths & BIT(dma_bus_width); |
d2ae5d42 JC |
257 | } |
258 | ||
917ce29e | 259 | static int dw_spi_dma_wait(struct dw_spi *dws, unsigned int len, u32 speed) |
bdbdf0f0 SS |
260 | { |
261 | unsigned long long ms; | |
262 | ||
917ce29e SS |
263 | ms = len * MSEC_PER_SEC * BITS_PER_BYTE; |
264 | do_div(ms, speed); | |
bdbdf0f0 SS |
265 | ms += ms + 200; |
266 | ||
267 | if (ms > UINT_MAX) | |
268 | ms = UINT_MAX; | |
269 | ||
270 | ms = wait_for_completion_timeout(&dws->dma_completion, | |
271 | msecs_to_jiffies(ms)); | |
272 | ||
273 | if (ms == 0) { | |
eefc6c5c | 274 | dev_err(&dws->host->cur_msg->spi->dev, |
bdbdf0f0 SS |
275 | "DMA transaction timed out\n"); |
276 | return -ETIMEDOUT; | |
277 | } | |
278 | ||
279 | return 0; | |
280 | } | |
281 | ||
1ade2d8a SS |
282 | static inline bool dw_spi_dma_tx_busy(struct dw_spi *dws) |
283 | { | |
725b0e3e | 284 | return !(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_TF_EMPT); |
1ade2d8a SS |
285 | } |
286 | ||
287 | static int dw_spi_dma_wait_tx_done(struct dw_spi *dws, | |
288 | struct spi_transfer *xfer) | |
289 | { | |
725b0e3e | 290 | int retry = DW_SPI_WAIT_RETRIES; |
1ade2d8a SS |
291 | struct spi_delay delay; |
292 | u32 nents; | |
293 | ||
294 | nents = dw_readl(dws, DW_SPI_TXFLR); | |
295 | delay.unit = SPI_DELAY_UNIT_SCK; | |
296 | delay.value = nents * dws->n_bytes * BITS_PER_BYTE; | |
297 | ||
298 | while (dw_spi_dma_tx_busy(dws) && retry--) | |
299 | spi_delay_exec(&delay, xfer); | |
300 | ||
301 | if (retry < 0) { | |
eefc6c5c | 302 | dev_err(&dws->host->dev, "Tx hanged up\n"); |
1ade2d8a SS |
303 | return -EIO; |
304 | } | |
305 | ||
306 | return 0; | |
307 | } | |
308 | ||
7063c0d9 | 309 | /* |
30c8eb52 AS |
310 | * dws->dma_chan_busy is set before the dma transfer starts, callback for tx |
311 | * channel will clear a corresponding bit. | |
7063c0d9 | 312 | */ |
30c8eb52 | 313 | static void dw_spi_dma_tx_done(void *arg) |
7063c0d9 FT |
314 | { |
315 | struct dw_spi *dws = arg; | |
316 | ||
725b0e3e SS |
317 | clear_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy); |
318 | if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy)) | |
7063c0d9 | 319 | return; |
0327f0b8 | 320 | |
bdbdf0f0 | 321 | complete(&dws->dma_completion); |
7063c0d9 FT |
322 | } |
323 | ||
a874d811 | 324 | static int dw_spi_dma_config_tx(struct dw_spi *dws) |
7063c0d9 | 325 | { |
a5c2db96 | 326 | struct dma_slave_config txconf; |
7063c0d9 | 327 | |
3cb97e22 | 328 | memset(&txconf, 0, sizeof(txconf)); |
a485df4b | 329 | txconf.direction = DMA_MEM_TO_DEV; |
7063c0d9 | 330 | txconf.dst_addr = dws->dma_addr; |
0b2b6651 | 331 | txconf.dst_maxburst = dws->txburst; |
7063c0d9 | 332 | txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
57784411 | 333 | txconf.dst_addr_width = dw_spi_dma_convert_width(dws->n_bytes); |
258aea76 | 334 | txconf.device_fc = false; |
7063c0d9 | 335 | |
a874d811 SS |
336 | return dmaengine_slave_config(dws->txchan, &txconf); |
337 | } | |
338 | ||
917ce29e SS |
339 | static int dw_spi_dma_submit_tx(struct dw_spi *dws, struct scatterlist *sgl, |
340 | unsigned int nents) | |
a874d811 SS |
341 | { |
342 | struct dma_async_tx_descriptor *txdesc; | |
9a6471a1 SS |
343 | dma_cookie_t cookie; |
344 | int ret; | |
7063c0d9 | 345 | |
917ce29e SS |
346 | txdesc = dmaengine_prep_slave_sg(dws->txchan, sgl, nents, |
347 | DMA_MEM_TO_DEV, | |
348 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | |
c9dafb27 | 349 | if (!txdesc) |
7a4d61f1 | 350 | return -ENOMEM; |
c9dafb27 | 351 | |
30c8eb52 | 352 | txdesc->callback = dw_spi_dma_tx_done; |
7063c0d9 FT |
353 | txdesc->callback_param = dws; |
354 | ||
9a6471a1 SS |
355 | cookie = dmaengine_submit(txdesc); |
356 | ret = dma_submit_error(cookie); | |
357 | if (ret) { | |
358 | dmaengine_terminate_sync(dws->txchan); | |
7a4d61f1 | 359 | return ret; |
9a6471a1 SS |
360 | } |
361 | ||
725b0e3e | 362 | set_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy); |
ab7a4d75 | 363 | |
7a4d61f1 | 364 | return 0; |
a5c2db96 AS |
365 | } |
366 | ||
33726eff SS |
367 | static inline bool dw_spi_dma_rx_busy(struct dw_spi *dws) |
368 | { | |
725b0e3e | 369 | return !!(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_RF_NOT_EMPT); |
33726eff SS |
370 | } |
371 | ||
372 | static int dw_spi_dma_wait_rx_done(struct dw_spi *dws) | |
373 | { | |
725b0e3e | 374 | int retry = DW_SPI_WAIT_RETRIES; |
33726eff SS |
375 | struct spi_delay delay; |
376 | unsigned long ns, us; | |
377 | u32 nents; | |
378 | ||
379 | /* | |
380 | * It's unlikely that DMA engine is still doing the data fetching, but | |
381 | * if it's let's give it some reasonable time. The timeout calculation | |
382 | * is based on the synchronous APB/SSI reference clock rate, on a | |
383 | * number of data entries left in the Rx FIFO, times a number of clock | |
384 | * periods normally needed for a single APB read/write transaction | |
385 | * without PREADY signal utilized (which is true for the DW APB SSI | |
386 | * controller). | |
387 | */ | |
388 | nents = dw_readl(dws, DW_SPI_RXFLR); | |
389 | ns = 4U * NSEC_PER_SEC / dws->max_freq * nents; | |
390 | if (ns <= NSEC_PER_USEC) { | |
391 | delay.unit = SPI_DELAY_UNIT_NSECS; | |
392 | delay.value = ns; | |
393 | } else { | |
394 | us = DIV_ROUND_UP(ns, NSEC_PER_USEC); | |
395 | delay.unit = SPI_DELAY_UNIT_USECS; | |
396 | delay.value = clamp_val(us, 0, USHRT_MAX); | |
397 | } | |
398 | ||
399 | while (dw_spi_dma_rx_busy(dws) && retry--) | |
400 | spi_delay_exec(&delay, NULL); | |
401 | ||
402 | if (retry < 0) { | |
eefc6c5c | 403 | dev_err(&dws->host->dev, "Rx hanged up\n"); |
33726eff SS |
404 | return -EIO; |
405 | } | |
406 | ||
407 | return 0; | |
408 | } | |
409 | ||
30c8eb52 AS |
410 | /* |
411 | * dws->dma_chan_busy is set before the dma transfer starts, callback for rx | |
412 | * channel will clear a corresponding bit. | |
413 | */ | |
414 | static void dw_spi_dma_rx_done(void *arg) | |
415 | { | |
416 | struct dw_spi *dws = arg; | |
417 | ||
725b0e3e SS |
418 | clear_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy); |
419 | if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy)) | |
30c8eb52 | 420 | return; |
0327f0b8 | 421 | |
bdbdf0f0 | 422 | complete(&dws->dma_completion); |
30c8eb52 AS |
423 | } |
424 | ||
a874d811 | 425 | static int dw_spi_dma_config_rx(struct dw_spi *dws) |
a5c2db96 AS |
426 | { |
427 | struct dma_slave_config rxconf; | |
30c8eb52 | 428 | |
3cb97e22 | 429 | memset(&rxconf, 0, sizeof(rxconf)); |
a485df4b | 430 | rxconf.direction = DMA_DEV_TO_MEM; |
7063c0d9 | 431 | rxconf.src_addr = dws->dma_addr; |
0b2b6651 | 432 | rxconf.src_maxburst = dws->rxburst; |
7063c0d9 | 433 | rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
57784411 | 434 | rxconf.src_addr_width = dw_spi_dma_convert_width(dws->n_bytes); |
258aea76 | 435 | rxconf.device_fc = false; |
7063c0d9 | 436 | |
a874d811 SS |
437 | return dmaengine_slave_config(dws->rxchan, &rxconf); |
438 | } | |
439 | ||
917ce29e SS |
440 | static int dw_spi_dma_submit_rx(struct dw_spi *dws, struct scatterlist *sgl, |
441 | unsigned int nents) | |
a874d811 SS |
442 | { |
443 | struct dma_async_tx_descriptor *rxdesc; | |
9a6471a1 SS |
444 | dma_cookie_t cookie; |
445 | int ret; | |
a874d811 | 446 | |
917ce29e SS |
447 | rxdesc = dmaengine_prep_slave_sg(dws->rxchan, sgl, nents, |
448 | DMA_DEV_TO_MEM, | |
449 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | |
c9dafb27 | 450 | if (!rxdesc) |
7a4d61f1 | 451 | return -ENOMEM; |
c9dafb27 | 452 | |
30c8eb52 | 453 | rxdesc->callback = dw_spi_dma_rx_done; |
7063c0d9 FT |
454 | rxdesc->callback_param = dws; |
455 | ||
9a6471a1 SS |
456 | cookie = dmaengine_submit(rxdesc); |
457 | ret = dma_submit_error(cookie); | |
458 | if (ret) { | |
459 | dmaengine_terminate_sync(dws->rxchan); | |
7a4d61f1 | 460 | return ret; |
9a6471a1 SS |
461 | } |
462 | ||
725b0e3e | 463 | set_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy); |
ab7a4d75 | 464 | |
7a4d61f1 | 465 | return 0; |
a5c2db96 AS |
466 | } |
467 | ||
57784411 | 468 | static int dw_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) |
a5c2db96 | 469 | { |
7ef30385 | 470 | u16 imr, dma_ctrl; |
a874d811 | 471 | int ret; |
a5c2db96 | 472 | |
7ef30385 SS |
473 | if (!xfer->tx_buf) |
474 | return -EINVAL; | |
475 | ||
a874d811 SS |
476 | /* Setup DMA channels */ |
477 | ret = dw_spi_dma_config_tx(dws); | |
478 | if (ret) | |
479 | return ret; | |
480 | ||
481 | if (xfer->rx_buf) { | |
482 | ret = dw_spi_dma_config_rx(dws); | |
483 | if (ret) | |
484 | return ret; | |
485 | } | |
486 | ||
7ef30385 | 487 | /* Set the DMA handshaking interface */ |
725b0e3e | 488 | dma_ctrl = DW_SPI_DMACR_TDMAE; |
3d7db0f1 | 489 | if (xfer->rx_buf) |
725b0e3e | 490 | dma_ctrl |= DW_SPI_DMACR_RDMAE; |
dd114443 | 491 | dw_writel(dws, DW_SPI_DMACR, dma_ctrl); |
a5c2db96 | 492 | |
f051fc8f | 493 | /* Set the interrupt mask */ |
725b0e3e | 494 | imr = DW_SPI_INT_TXOI; |
3d7db0f1 | 495 | if (xfer->rx_buf) |
725b0e3e SS |
496 | imr |= DW_SPI_INT_RXUI | DW_SPI_INT_RXOI; |
497 | dw_spi_umask_intr(dws, imr); | |
f051fc8f | 498 | |
bdbdf0f0 SS |
499 | reinit_completion(&dws->dma_completion); |
500 | ||
57784411 | 501 | dws->transfer_handler = dw_spi_dma_transfer_handler; |
f051fc8f | 502 | |
9f14538e | 503 | return 0; |
a5c2db96 AS |
504 | } |
505 | ||
b86fed12 SS |
506 | static int dw_spi_dma_transfer_all(struct dw_spi *dws, |
507 | struct spi_transfer *xfer) | |
a5c2db96 | 508 | { |
bdbdf0f0 | 509 | int ret; |
a5c2db96 | 510 | |
ab7a4d75 | 511 | /* Submit the DMA Tx transfer */ |
917ce29e | 512 | ret = dw_spi_dma_submit_tx(dws, xfer->tx_sg.sgl, xfer->tx_sg.nents); |
7a4d61f1 | 513 | if (ret) |
945b5b60 | 514 | goto err_clear_dmac; |
a5c2db96 | 515 | |
ab7a4d75 | 516 | /* Submit the DMA Rx transfer if required */ |
be3034d9 | 517 | if (xfer->rx_buf) { |
917ce29e SS |
518 | ret = dw_spi_dma_submit_rx(dws, xfer->rx_sg.sgl, |
519 | xfer->rx_sg.nents); | |
7a4d61f1 | 520 | if (ret) |
945b5b60 | 521 | goto err_clear_dmac; |
a5c2db96 | 522 | |
be3034d9 | 523 | /* rx must be started before tx due to spi instinct */ |
30c8eb52 AS |
524 | dma_async_issue_pending(dws->rxchan); |
525 | } | |
526 | ||
7ef30385 | 527 | dma_async_issue_pending(dws->txchan); |
f7477c2b | 528 | |
917ce29e | 529 | ret = dw_spi_dma_wait(dws, xfer->len, xfer->effective_speed_hz); |
945b5b60 SS |
530 | |
531 | err_clear_dmac: | |
532 | dw_writel(dws, DW_SPI_DMACR, 0); | |
533 | ||
534 | return ret; | |
b86fed12 SS |
535 | } |
536 | ||
ad4fe126 SS |
537 | /* |
538 | * In case if at least one of the requested DMA channels doesn't support the | |
539 | * hardware accelerated SG list entries traverse, the DMA driver will most | |
540 | * likely work that around by performing the IRQ-based SG list entries | |
541 | * resubmission. That might and will cause a problem if the DMA Tx channel is | |
542 | * recharged and re-executed before the Rx DMA channel. Due to | |
543 | * non-deterministic IRQ-handler execution latency the DMA Tx channel will | |
544 | * start pushing data to the SPI bus before the Rx DMA channel is even | |
545 | * reinitialized with the next inbound SG list entry. By doing so the DMA Tx | |
546 | * channel will implicitly start filling the DW APB SSI Rx FIFO up, which while | |
547 | * the DMA Rx channel being recharged and re-executed will eventually be | |
548 | * overflown. | |
549 | * | |
550 | * In order to solve the problem we have to feed the DMA engine with SG list | |
551 | * entries one-by-one. It shall keep the DW APB SSI Tx and Rx FIFOs | |
552 | * synchronized and prevent the Rx FIFO overflow. Since in general the tx_sg | |
553 | * and rx_sg lists may have different number of entries of different lengths | |
554 | * (though total length should match) let's virtually split the SG-lists to the | |
555 | * set of DMA transfers, which length is a minimum of the ordered SG-entries | |
556 | * lengths. An ASCII-sketch of the implemented algo is following: | |
557 | * xfer->len | |
558 | * |___________| | |
559 | * tx_sg list: |___|____|__| | |
560 | * rx_sg list: |_|____|____| | |
561 | * DMA transfers: |_|_|__|_|__| | |
562 | * | |
563 | * Note in order to have this workaround solving the denoted problem the DMA | |
564 | * engine driver should properly initialize the max_sg_burst capability and set | |
565 | * the DMA device max segment size parameter with maximum data block size the | |
566 | * DMA engine supports. | |
567 | */ | |
568 | ||
569 | static int dw_spi_dma_transfer_one(struct dw_spi *dws, | |
570 | struct spi_transfer *xfer) | |
571 | { | |
572 | struct scatterlist *tx_sg = NULL, *rx_sg = NULL, tx_tmp, rx_tmp; | |
573 | unsigned int tx_len = 0, rx_len = 0; | |
574 | unsigned int base, len; | |
575 | int ret; | |
576 | ||
577 | sg_init_table(&tx_tmp, 1); | |
578 | sg_init_table(&rx_tmp, 1); | |
579 | ||
125b28b1 | 580 | for (base = 0; base < xfer->len; base += len) { |
ad4fe126 SS |
581 | /* Fetch next Tx DMA data chunk */ |
582 | if (!tx_len) { | |
583 | tx_sg = !tx_sg ? &xfer->tx_sg.sgl[0] : sg_next(tx_sg); | |
584 | sg_dma_address(&tx_tmp) = sg_dma_address(tx_sg); | |
585 | tx_len = sg_dma_len(tx_sg); | |
586 | } | |
587 | ||
588 | /* Fetch next Rx DMA data chunk */ | |
589 | if (!rx_len) { | |
590 | rx_sg = !rx_sg ? &xfer->rx_sg.sgl[0] : sg_next(rx_sg); | |
591 | sg_dma_address(&rx_tmp) = sg_dma_address(rx_sg); | |
592 | rx_len = sg_dma_len(rx_sg); | |
593 | } | |
594 | ||
595 | len = min(tx_len, rx_len); | |
596 | ||
597 | sg_dma_len(&tx_tmp) = len; | |
598 | sg_dma_len(&rx_tmp) = len; | |
599 | ||
600 | /* Submit DMA Tx transfer */ | |
601 | ret = dw_spi_dma_submit_tx(dws, &tx_tmp, 1); | |
602 | if (ret) | |
603 | break; | |
604 | ||
605 | /* Submit DMA Rx transfer */ | |
606 | ret = dw_spi_dma_submit_rx(dws, &rx_tmp, 1); | |
607 | if (ret) | |
608 | break; | |
609 | ||
610 | /* Rx must be started before Tx due to SPI instinct */ | |
611 | dma_async_issue_pending(dws->rxchan); | |
612 | ||
613 | dma_async_issue_pending(dws->txchan); | |
614 | ||
615 | /* | |
616 | * Here we only need to wait for the DMA transfer to be | |
617 | * finished since SPI controller is kept enabled during the | |
618 | * procedure this loop implements and there is no risk to lose | |
619 | * data left in the Tx/Rx FIFOs. | |
620 | */ | |
621 | ret = dw_spi_dma_wait(dws, len, xfer->effective_speed_hz); | |
622 | if (ret) | |
623 | break; | |
624 | ||
625 | reinit_completion(&dws->dma_completion); | |
626 | ||
627 | sg_dma_address(&tx_tmp) += len; | |
628 | sg_dma_address(&rx_tmp) += len; | |
629 | tx_len -= len; | |
630 | rx_len -= len; | |
631 | } | |
632 | ||
633 | dw_writel(dws, DW_SPI_DMACR, 0); | |
634 | ||
635 | return ret; | |
636 | } | |
637 | ||
b86fed12 SS |
638 | static int dw_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) |
639 | { | |
ad4fe126 | 640 | unsigned int nents; |
b86fed12 SS |
641 | int ret; |
642 | ||
ad4fe126 SS |
643 | nents = max(xfer->tx_sg.nents, xfer->rx_sg.nents); |
644 | ||
645 | /* | |
646 | * Execute normal DMA-based transfer (which submits the Rx and Tx SG | |
647 | * lists directly to the DMA engine at once) if either full hardware | |
648 | * accelerated SG list traverse is supported by both channels, or the | |
649 | * Tx-only SPI transfer is requested, or the DMA engine is capable to | |
650 | * handle both SG lists on hardware accelerated basis. | |
651 | */ | |
652 | if (!dws->dma_sg_burst || !xfer->rx_buf || nents <= dws->dma_sg_burst) | |
653 | ret = dw_spi_dma_transfer_all(dws, xfer); | |
654 | else | |
655 | ret = dw_spi_dma_transfer_one(dws, xfer); | |
bdbdf0f0 SS |
656 | if (ret) |
657 | return ret; | |
658 | ||
eefc6c5c | 659 | if (dws->host->cur_msg->status == -EINPROGRESS) { |
1ade2d8a SS |
660 | ret = dw_spi_dma_wait_tx_done(dws, xfer); |
661 | if (ret) | |
662 | return ret; | |
663 | } | |
664 | ||
eefc6c5c | 665 | if (xfer->rx_buf && dws->host->cur_msg->status == -EINPROGRESS) |
33726eff SS |
666 | ret = dw_spi_dma_wait_rx_done(dws); |
667 | ||
668 | return ret; | |
7063c0d9 FT |
669 | } |
670 | ||
57784411 | 671 | static void dw_spi_dma_stop(struct dw_spi *dws) |
4d5ac1ed | 672 | { |
725b0e3e | 673 | if (test_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy)) { |
cf1716e9 | 674 | dmaengine_terminate_sync(dws->txchan); |
725b0e3e | 675 | clear_bit(DW_SPI_TX_BUSY, &dws->dma_chan_busy); |
4d5ac1ed | 676 | } |
725b0e3e | 677 | if (test_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy)) { |
cf1716e9 | 678 | dmaengine_terminate_sync(dws->rxchan); |
725b0e3e | 679 | clear_bit(DW_SPI_RX_BUSY, &dws->dma_chan_busy); |
4d5ac1ed AS |
680 | } |
681 | } | |
682 | ||
57784411 SS |
683 | static const struct dw_spi_dma_ops dw_spi_dma_mfld_ops = { |
684 | .dma_init = dw_spi_dma_init_mfld, | |
685 | .dma_exit = dw_spi_dma_exit, | |
686 | .dma_setup = dw_spi_dma_setup, | |
687 | .can_dma = dw_spi_can_dma, | |
688 | .dma_transfer = dw_spi_dma_transfer, | |
689 | .dma_stop = dw_spi_dma_stop, | |
7063c0d9 | 690 | }; |
37aa8aa6 | 691 | |
57784411 | 692 | void dw_spi_dma_setup_mfld(struct dw_spi *dws) |
37aa8aa6 | 693 | { |
57784411 | 694 | dws->dma_ops = &dw_spi_dma_mfld_ops; |
37aa8aa6 | 695 | } |
a62bacba | 696 | EXPORT_SYMBOL_NS_GPL(dw_spi_dma_setup_mfld, SPI_DW_CORE); |
57784411 SS |
697 | |
698 | static const struct dw_spi_dma_ops dw_spi_dma_generic_ops = { | |
699 | .dma_init = dw_spi_dma_init_generic, | |
700 | .dma_exit = dw_spi_dma_exit, | |
701 | .dma_setup = dw_spi_dma_setup, | |
702 | .can_dma = dw_spi_can_dma, | |
703 | .dma_transfer = dw_spi_dma_transfer, | |
704 | .dma_stop = dw_spi_dma_stop, | |
22d48ad7 JN |
705 | }; |
706 | ||
57784411 | 707 | void dw_spi_dma_setup_generic(struct dw_spi *dws) |
22d48ad7 | 708 | { |
57784411 | 709 | dws->dma_ops = &dw_spi_dma_generic_ops; |
22d48ad7 | 710 | } |
a62bacba | 711 | EXPORT_SYMBOL_NS_GPL(dw_spi_dma_setup_generic, SPI_DW_CORE); |