net: dsa: mv88e6xxx: Add support for port mirroring
[linux-2.6-block.git] / drivers / spi / spi-dw-mid.c
CommitLineData
2025cf9e 1// SPDX-License-Identifier: GPL-2.0-only
7063c0d9 2/*
ca632f55 3 * Special handling for DW core on Intel MID platform
7063c0d9 4 *
197e96b4 5 * Copyright (c) 2009, 2014 Intel Corporation.
7063c0d9
FT
6 */
7
8#include <linux/dma-mapping.h>
9#include <linux/dmaengine.h>
10#include <linux/interrupt.h>
11#include <linux/slab.h>
12#include <linux/spi/spi.h>
258aea76 13#include <linux/types.h>
568a60ed 14
ca632f55 15#include "spi-dw.h"
7063c0d9
FT
16
17#ifdef CONFIG_SPI_DW_MID_DMA
7063c0d9 18#include <linux/pci.h>
d744f826 19#include <linux/platform_data/dma-dw.h>
7063c0d9 20
30c8eb52
AS
21#define RX_BUSY 0
22#define TX_BUSY 1
23
d744f826
AS
24static struct dw_dma_slave mid_dma_tx = { .dst_id = 1 };
25static struct dw_dma_slave mid_dma_rx = { .src_id = 0 };
7063c0d9
FT
26
27static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param)
28{
d744f826
AS
29 struct dw_dma_slave *s = param;
30
31 if (s->dma_dev != chan->device->dev)
32 return false;
7063c0d9 33
d744f826
AS
34 chan->private = s;
35 return true;
7063c0d9
FT
36}
37
38static int mid_spi_dma_init(struct dw_spi *dws)
39{
b89e9c87 40 struct pci_dev *dma_dev;
d744f826
AS
41 struct dw_dma_slave *tx = dws->dma_tx;
42 struct dw_dma_slave *rx = dws->dma_rx;
7063c0d9
FT
43 dma_cap_mask_t mask;
44
45 /*
46 * Get pci device for DMA controller, currently it could only
ea092455 47 * be the DMA controller of Medfield
7063c0d9 48 */
b89e9c87
AS
49 dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
50 if (!dma_dev)
51 return -ENODEV;
52
7063c0d9
FT
53 dma_cap_zero(mask);
54 dma_cap_set(DMA_SLAVE, mask);
55
56 /* 1. Init rx channel */
d744f826
AS
57 rx->dma_dev = &dma_dev->dev;
58 dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, rx);
7063c0d9
FT
59 if (!dws->rxchan)
60 goto err_exit;
f89a6d8f 61 dws->master->dma_rx = dws->rxchan;
7063c0d9
FT
62
63 /* 2. Init tx channel */
d744f826
AS
64 tx->dma_dev = &dma_dev->dev;
65 dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, tx);
7063c0d9
FT
66 if (!dws->txchan)
67 goto free_rxchan;
f89a6d8f 68 dws->master->dma_tx = dws->txchan;
7063c0d9
FT
69
70 dws->dma_inited = 1;
71 return 0;
72
73free_rxchan:
74 dma_release_channel(dws->rxchan);
75err_exit:
b89e9c87 76 return -EBUSY;
7063c0d9
FT
77}
78
79static void mid_spi_dma_exit(struct dw_spi *dws)
80{
fb57862e
AS
81 if (!dws->dma_inited)
82 return;
8e45ef68 83
a3ff9582 84 dmaengine_terminate_sync(dws->txchan);
7063c0d9 85 dma_release_channel(dws->txchan);
8e45ef68 86
a3ff9582 87 dmaengine_terminate_sync(dws->rxchan);
7063c0d9
FT
88 dma_release_channel(dws->rxchan);
89}
90
f051fc8f
AS
91static irqreturn_t dma_transfer(struct dw_spi *dws)
92{
dd114443 93 u16 irq_status = dw_readl(dws, DW_SPI_ISR);
f051fc8f
AS
94
95 if (!irq_status)
96 return IRQ_NONE;
97
dd114443 98 dw_readl(dws, DW_SPI_ICR);
f051fc8f
AS
99 spi_reset_chip(dws);
100
101 dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__);
102 dws->master->cur_msg->status = -EIO;
103 spi_finalize_current_transfer(dws->master);
104 return IRQ_HANDLED;
105}
106
721483e2
JN
107static bool mid_spi_can_dma(struct spi_controller *master,
108 struct spi_device *spi, struct spi_transfer *xfer)
f89a6d8f 109{
721483e2 110 struct dw_spi *dws = spi_controller_get_devdata(master);
f89a6d8f
AS
111
112 if (!dws->dma_inited)
113 return false;
114
115 return xfer->len > dws->fifo_len;
116}
117
e31abce7
AS
118static enum dma_slave_buswidth convert_dma_width(u32 dma_width) {
119 if (dma_width == 1)
120 return DMA_SLAVE_BUSWIDTH_1_BYTE;
121 else if (dma_width == 2)
122 return DMA_SLAVE_BUSWIDTH_2_BYTES;
123
124 return DMA_SLAVE_BUSWIDTH_UNDEFINED;
125}
126
7063c0d9 127/*
30c8eb52
AS
128 * dws->dma_chan_busy is set before the dma transfer starts, callback for tx
129 * channel will clear a corresponding bit.
7063c0d9 130 */
30c8eb52 131static void dw_spi_dma_tx_done(void *arg)
7063c0d9
FT
132{
133 struct dw_spi *dws = arg;
134
854d2f24
AS
135 clear_bit(TX_BUSY, &dws->dma_chan_busy);
136 if (test_bit(RX_BUSY, &dws->dma_chan_busy))
7063c0d9 137 return;
c22c62db 138 spi_finalize_current_transfer(dws->master);
7063c0d9
FT
139}
140
f89a6d8f
AS
141static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws,
142 struct spi_transfer *xfer)
7063c0d9 143{
a5c2db96
AS
144 struct dma_slave_config txconf;
145 struct dma_async_tx_descriptor *txdesc;
7063c0d9 146
f89a6d8f 147 if (!xfer->tx_buf)
30c8eb52
AS
148 return NULL;
149
a485df4b 150 txconf.direction = DMA_MEM_TO_DEV;
7063c0d9 151 txconf.dst_addr = dws->dma_addr;
d744f826 152 txconf.dst_maxburst = 16;
7063c0d9 153 txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
e31abce7 154 txconf.dst_addr_width = convert_dma_width(dws->dma_width);
258aea76 155 txconf.device_fc = false;
7063c0d9 156
2a285299 157 dmaengine_slave_config(dws->txchan, &txconf);
7063c0d9 158
2a285299 159 txdesc = dmaengine_prep_slave_sg(dws->txchan,
f89a6d8f
AS
160 xfer->tx_sg.sgl,
161 xfer->tx_sg.nents,
a485df4b 162 DMA_MEM_TO_DEV,
f7477c2b 163 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
c9dafb27
AS
164 if (!txdesc)
165 return NULL;
166
30c8eb52 167 txdesc->callback = dw_spi_dma_tx_done;
7063c0d9
FT
168 txdesc->callback_param = dws;
169
a5c2db96
AS
170 return txdesc;
171}
172
30c8eb52
AS
173/*
174 * dws->dma_chan_busy is set before the dma transfer starts, callback for rx
175 * channel will clear a corresponding bit.
176 */
177static void dw_spi_dma_rx_done(void *arg)
178{
179 struct dw_spi *dws = arg;
180
854d2f24
AS
181 clear_bit(RX_BUSY, &dws->dma_chan_busy);
182 if (test_bit(TX_BUSY, &dws->dma_chan_busy))
30c8eb52 183 return;
c22c62db 184 spi_finalize_current_transfer(dws->master);
30c8eb52
AS
185}
186
f89a6d8f
AS
187static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws,
188 struct spi_transfer *xfer)
a5c2db96
AS
189{
190 struct dma_slave_config rxconf;
191 struct dma_async_tx_descriptor *rxdesc;
192
f89a6d8f 193 if (!xfer->rx_buf)
30c8eb52
AS
194 return NULL;
195
a485df4b 196 rxconf.direction = DMA_DEV_TO_MEM;
7063c0d9 197 rxconf.src_addr = dws->dma_addr;
d744f826 198 rxconf.src_maxburst = 16;
7063c0d9 199 rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
e31abce7 200 rxconf.src_addr_width = convert_dma_width(dws->dma_width);
258aea76 201 rxconf.device_fc = false;
7063c0d9 202
2a285299 203 dmaengine_slave_config(dws->rxchan, &rxconf);
7063c0d9 204
2a285299 205 rxdesc = dmaengine_prep_slave_sg(dws->rxchan,
f89a6d8f
AS
206 xfer->rx_sg.sgl,
207 xfer->rx_sg.nents,
a485df4b 208 DMA_DEV_TO_MEM,
f7477c2b 209 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
c9dafb27
AS
210 if (!rxdesc)
211 return NULL;
212
30c8eb52 213 rxdesc->callback = dw_spi_dma_rx_done;
7063c0d9
FT
214 rxdesc->callback_param = dws;
215
a5c2db96
AS
216 return rxdesc;
217}
218
f89a6d8f 219static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
a5c2db96
AS
220{
221 u16 dma_ctrl = 0;
222
dd114443
TT
223 dw_writel(dws, DW_SPI_DMARDLR, 0xf);
224 dw_writel(dws, DW_SPI_DMATDLR, 0x10);
a5c2db96 225
f89a6d8f 226 if (xfer->tx_buf)
a5c2db96 227 dma_ctrl |= SPI_DMA_TDMAE;
f89a6d8f 228 if (xfer->rx_buf)
a5c2db96 229 dma_ctrl |= SPI_DMA_RDMAE;
dd114443 230 dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
a5c2db96 231
f051fc8f
AS
232 /* Set the interrupt mask */
233 spi_umask_intr(dws, SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI);
234
235 dws->transfer_handler = dma_transfer;
236
9f14538e 237 return 0;
a5c2db96
AS
238}
239
f89a6d8f 240static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
a5c2db96
AS
241{
242 struct dma_async_tx_descriptor *txdesc, *rxdesc;
243
9f14538e 244 /* Prepare the TX dma transfer */
f89a6d8f 245 txdesc = dw_spi_dma_prepare_tx(dws, xfer);
a5c2db96 246
9f14538e 247 /* Prepare the RX dma transfer */
f89a6d8f 248 rxdesc = dw_spi_dma_prepare_rx(dws, xfer);
a5c2db96 249
7063c0d9 250 /* rx must be started before tx due to spi instinct */
30c8eb52
AS
251 if (rxdesc) {
252 set_bit(RX_BUSY, &dws->dma_chan_busy);
253 dmaengine_submit(rxdesc);
254 dma_async_issue_pending(dws->rxchan);
255 }
256
257 if (txdesc) {
258 set_bit(TX_BUSY, &dws->dma_chan_busy);
259 dmaengine_submit(txdesc);
260 dma_async_issue_pending(dws->txchan);
261 }
f7477c2b 262
7063c0d9
FT
263 return 0;
264}
265
4d5ac1ed
AS
266static void mid_spi_dma_stop(struct dw_spi *dws)
267{
268 if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
cf1716e9 269 dmaengine_terminate_sync(dws->txchan);
4d5ac1ed
AS
270 clear_bit(TX_BUSY, &dws->dma_chan_busy);
271 }
272 if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
cf1716e9 273 dmaengine_terminate_sync(dws->rxchan);
4d5ac1ed
AS
274 clear_bit(RX_BUSY, &dws->dma_chan_busy);
275 }
276}
277
4fe338c9 278static const struct dw_spi_dma_ops mid_dma_ops = {
7063c0d9
FT
279 .dma_init = mid_spi_dma_init,
280 .dma_exit = mid_spi_dma_exit,
9f14538e 281 .dma_setup = mid_spi_dma_setup,
f89a6d8f 282 .can_dma = mid_spi_can_dma,
7063c0d9 283 .dma_transfer = mid_spi_dma_transfer,
4d5ac1ed 284 .dma_stop = mid_spi_dma_stop,
7063c0d9
FT
285};
286#endif
287
ea092455 288/* Some specific info for SPI0 controller on Intel MID */
7063c0d9 289
d9c14743 290/* HW info for MRST Clk Control Unit, 32b reg per controller */
7063c0d9 291#define MRST_SPI_CLK_BASE 100000000 /* 100m */
d9c14743 292#define MRST_CLK_SPI_REG 0xff11d86c
7063c0d9
FT
293#define CLK_SPI_BDIV_OFFSET 0
294#define CLK_SPI_BDIV_MASK 0x00000007
295#define CLK_SPI_CDIV_OFFSET 9
296#define CLK_SPI_CDIV_MASK 0x00000e00
297#define CLK_SPI_DISABLE_OFFSET 8
298
299int dw_spi_mid_init(struct dw_spi *dws)
300{
7eb187b3
HS
301 void __iomem *clk_reg;
302 u32 clk_cdiv;
7063c0d9 303
d9c14743 304 clk_reg = ioremap_nocache(MRST_CLK_SPI_REG, 16);
7063c0d9
FT
305 if (!clk_reg)
306 return -ENOMEM;
307
d9c14743
AS
308 /* Get SPI controller operating freq info */
309 clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32));
310 clk_cdiv &= CLK_SPI_CDIV_MASK;
311 clk_cdiv >>= CLK_SPI_CDIV_OFFSET;
7063c0d9 312 dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
d9c14743 313
7063c0d9
FT
314 iounmap(clk_reg);
315
7063c0d9 316#ifdef CONFIG_SPI_DW_MID_DMA
d744f826
AS
317 dws->dma_tx = &mid_dma_tx;
318 dws->dma_rx = &mid_dma_rx;
7063c0d9
FT
319 dws->dma_ops = &mid_dma_ops;
320#endif
321 return 0;
322}