Commit | Line | Data |
---|---|---|
a568231f LL |
1 | /* |
2 | * Copyright (c) 2015 MediaTek Inc. | |
3 | * Author: Leilk Liu <leilk.liu@mediatek.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | */ | |
14 | ||
15 | #include <linux/clk.h> | |
16 | #include <linux/device.h> | |
17 | #include <linux/err.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/ioport.h> | |
20 | #include <linux/module.h> | |
21 | #include <linux/of.h> | |
22 | #include <linux/platform_device.h> | |
23 | #include <linux/platform_data/spi-mt65xx.h> | |
24 | #include <linux/pm_runtime.h> | |
25 | #include <linux/spi/spi.h> | |
26 | ||
27 | #define SPI_CFG0_REG 0x0000 | |
28 | #define SPI_CFG1_REG 0x0004 | |
29 | #define SPI_TX_SRC_REG 0x0008 | |
30 | #define SPI_RX_DST_REG 0x000c | |
31 | #define SPI_TX_DATA_REG 0x0010 | |
32 | #define SPI_RX_DATA_REG 0x0014 | |
33 | #define SPI_CMD_REG 0x0018 | |
34 | #define SPI_STATUS0_REG 0x001c | |
35 | #define SPI_PAD_SEL_REG 0x0024 | |
36 | ||
37 | #define SPI_CFG0_SCK_HIGH_OFFSET 0 | |
38 | #define SPI_CFG0_SCK_LOW_OFFSET 8 | |
39 | #define SPI_CFG0_CS_HOLD_OFFSET 16 | |
40 | #define SPI_CFG0_CS_SETUP_OFFSET 24 | |
41 | ||
42 | #define SPI_CFG1_CS_IDLE_OFFSET 0 | |
43 | #define SPI_CFG1_PACKET_LOOP_OFFSET 8 | |
44 | #define SPI_CFG1_PACKET_LENGTH_OFFSET 16 | |
45 | #define SPI_CFG1_GET_TICK_DLY_OFFSET 30 | |
46 | ||
47 | #define SPI_CFG1_CS_IDLE_MASK 0xff | |
48 | #define SPI_CFG1_PACKET_LOOP_MASK 0xff00 | |
49 | #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000 | |
50 | ||
51 | #define SPI_CMD_ACT_OFFSET 0 | |
52 | #define SPI_CMD_RESUME_OFFSET 1 | |
53 | #define SPI_CMD_CPHA_OFFSET 8 | |
54 | #define SPI_CMD_CPOL_OFFSET 9 | |
55 | #define SPI_CMD_TXMSBF_OFFSET 12 | |
56 | #define SPI_CMD_RXMSBF_OFFSET 13 | |
57 | #define SPI_CMD_RX_ENDIAN_OFFSET 14 | |
58 | #define SPI_CMD_TX_ENDIAN_OFFSET 15 | |
59 | ||
60 | #define SPI_CMD_RST BIT(2) | |
61 | #define SPI_CMD_PAUSE_EN BIT(4) | |
62 | #define SPI_CMD_DEASSERT BIT(5) | |
63 | #define SPI_CMD_CPHA BIT(8) | |
64 | #define SPI_CMD_CPOL BIT(9) | |
65 | #define SPI_CMD_RX_DMA BIT(10) | |
66 | #define SPI_CMD_TX_DMA BIT(11) | |
67 | #define SPI_CMD_TXMSBF BIT(12) | |
68 | #define SPI_CMD_RXMSBF BIT(13) | |
69 | #define SPI_CMD_RX_ENDIAN BIT(14) | |
70 | #define SPI_CMD_TX_ENDIAN BIT(15) | |
71 | #define SPI_CMD_FINISH_IE BIT(16) | |
72 | #define SPI_CMD_PAUSE_IE BIT(17) | |
73 | ||
74 | #define MTK_SPI_QUIRK_PAD_SELECT 1 | |
75 | /* Must explicitly send dummy Tx bytes to do Rx only transfer */ | |
76 | #define MTK_SPI_QUIRK_MUST_TX 1 | |
77 | ||
78 | #define MT8173_SPI_MAX_PAD_SEL 3 | |
79 | ||
80 | #define MTK_SPI_IDLE 0 | |
81 | #define MTK_SPI_PAUSED 1 | |
82 | ||
83 | #define MTK_SPI_MAX_FIFO_SIZE 32 | |
84 | #define MTK_SPI_PACKET_SIZE 1024 | |
85 | ||
86 | struct mtk_spi_compatible { | |
87 | u32 need_pad_sel; | |
88 | u32 must_tx; | |
89 | }; | |
90 | ||
91 | struct mtk_spi { | |
92 | void __iomem *base; | |
93 | u32 state; | |
94 | u32 pad_sel; | |
95 | struct clk *spi_clk, *parent_clk; | |
96 | struct spi_transfer *cur_transfer; | |
97 | u32 xfer_len; | |
98 | struct scatterlist *tx_sgl, *rx_sgl; | |
99 | u32 tx_sgl_len, rx_sgl_len; | |
100 | const struct mtk_spi_compatible *dev_comp; | |
101 | }; | |
102 | ||
103 | static const struct mtk_spi_compatible mt6589_compat = { | |
104 | .need_pad_sel = 0, | |
105 | .must_tx = 0, | |
106 | }; | |
107 | ||
108 | static const struct mtk_spi_compatible mt8135_compat = { | |
109 | .need_pad_sel = 0, | |
110 | .must_tx = 0, | |
111 | }; | |
112 | ||
113 | static const struct mtk_spi_compatible mt8173_compat = { | |
114 | .need_pad_sel = MTK_SPI_QUIRK_PAD_SELECT, | |
115 | .must_tx = MTK_SPI_QUIRK_MUST_TX, | |
116 | }; | |
117 | ||
118 | /* | |
119 | * A piece of default chip info unless the platform | |
120 | * supplies it. | |
121 | */ | |
122 | static const struct mtk_chip_config mtk_default_chip_info = { | |
123 | .rx_mlsb = 1, | |
124 | .tx_mlsb = 1, | |
125 | .tx_endian = 0, | |
126 | .rx_endian = 0, | |
127 | }; | |
128 | ||
129 | static const struct of_device_id mtk_spi_of_match[] = { | |
130 | { .compatible = "mediatek,mt6589-spi", .data = (void *)&mt6589_compat }, | |
131 | { .compatible = "mediatek,mt8135-spi", .data = (void *)&mt8135_compat }, | |
132 | { .compatible = "mediatek,mt8173-spi", .data = (void *)&mt8173_compat }, | |
133 | {} | |
134 | }; | |
135 | MODULE_DEVICE_TABLE(of, mtk_spi_of_match); | |
136 | ||
137 | static void mtk_spi_reset(struct mtk_spi *mdata) | |
138 | { | |
139 | u32 reg_val; | |
140 | ||
141 | /* set the software reset bit in SPI_CMD_REG. */ | |
142 | reg_val = readl(mdata->base + SPI_CMD_REG); | |
143 | reg_val |= SPI_CMD_RST; | |
144 | writel(reg_val, mdata->base + SPI_CMD_REG); | |
145 | ||
146 | reg_val = readl(mdata->base + SPI_CMD_REG); | |
147 | reg_val &= ~SPI_CMD_RST; | |
148 | writel(reg_val, mdata->base + SPI_CMD_REG); | |
149 | } | |
150 | ||
151 | static void mtk_spi_config(struct mtk_spi *mdata, | |
152 | struct mtk_chip_config *chip_config) | |
153 | { | |
154 | u32 reg_val; | |
155 | ||
156 | reg_val = readl(mdata->base + SPI_CMD_REG); | |
157 | ||
158 | /* set the mlsbx and mlsbtx */ | |
159 | reg_val &= ~(SPI_CMD_TXMSBF | SPI_CMD_RXMSBF); | |
160 | reg_val |= (chip_config->tx_mlsb << SPI_CMD_TXMSBF_OFFSET); | |
161 | reg_val |= (chip_config->rx_mlsb << SPI_CMD_RXMSBF_OFFSET); | |
162 | ||
163 | /* set the tx/rx endian */ | |
164 | reg_val &= ~(SPI_CMD_TX_ENDIAN | SPI_CMD_RX_ENDIAN); | |
165 | reg_val |= (chip_config->tx_endian << SPI_CMD_TX_ENDIAN_OFFSET); | |
166 | reg_val |= (chip_config->rx_endian << SPI_CMD_RX_ENDIAN_OFFSET); | |
167 | ||
168 | /* set finish and pause interrupt always enable */ | |
169 | reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_EN; | |
170 | ||
171 | /* disable dma mode */ | |
172 | reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA); | |
173 | ||
174 | /* disable deassert mode */ | |
175 | reg_val &= ~SPI_CMD_DEASSERT; | |
176 | ||
177 | writel(reg_val, mdata->base + SPI_CMD_REG); | |
178 | ||
179 | /* pad select */ | |
180 | if (mdata->dev_comp->need_pad_sel) | |
181 | writel(mdata->pad_sel, mdata->base + SPI_PAD_SEL_REG); | |
182 | } | |
183 | ||
184 | static int mtk_spi_prepare_hardware(struct spi_master *master) | |
185 | { | |
186 | struct spi_transfer *trans; | |
187 | struct mtk_spi *mdata = spi_master_get_devdata(master); | |
188 | struct spi_message *msg = master->cur_msg; | |
189 | int ret; | |
190 | ||
191 | ret = clk_prepare_enable(mdata->spi_clk); | |
192 | if (ret < 0) { | |
193 | dev_err(&master->dev, "failed to enable clock (%d)\n", ret); | |
194 | return ret; | |
195 | } | |
196 | ||
197 | trans = list_first_entry(&msg->transfers, struct spi_transfer, | |
198 | transfer_list); | |
199 | if (trans->cs_change == 0) { | |
200 | mdata->state = MTK_SPI_IDLE; | |
201 | mtk_spi_reset(mdata); | |
202 | } | |
203 | ||
204 | return ret; | |
205 | } | |
206 | ||
207 | static int mtk_spi_unprepare_hardware(struct spi_master *master) | |
208 | { | |
209 | struct mtk_spi *mdata = spi_master_get_devdata(master); | |
210 | ||
211 | clk_disable_unprepare(mdata->spi_clk); | |
212 | ||
213 | return 0; | |
214 | } | |
215 | ||
216 | static int mtk_spi_prepare_message(struct spi_master *master, | |
217 | struct spi_message *msg) | |
218 | { | |
219 | u32 reg_val; | |
220 | u8 cpha, cpol; | |
221 | struct mtk_chip_config *chip_config; | |
222 | struct spi_device *spi = msg->spi; | |
223 | struct mtk_spi *mdata = spi_master_get_devdata(master); | |
224 | ||
225 | cpha = spi->mode & SPI_CPHA ? 1 : 0; | |
226 | cpol = spi->mode & SPI_CPOL ? 1 : 0; | |
227 | ||
228 | reg_val = readl(mdata->base + SPI_CMD_REG); | |
229 | reg_val &= ~(SPI_CMD_CPHA | SPI_CMD_CPOL); | |
230 | reg_val |= (cpha << SPI_CMD_CPHA_OFFSET); | |
231 | reg_val |= (cpol << SPI_CMD_CPOL_OFFSET); | |
232 | writel(reg_val, mdata->base + SPI_CMD_REG); | |
233 | ||
234 | chip_config = spi->controller_data; | |
235 | if (!chip_config) { | |
236 | chip_config = (void *)&mtk_default_chip_info; | |
237 | spi->controller_data = chip_config; | |
238 | } | |
239 | mtk_spi_config(mdata, chip_config); | |
240 | ||
241 | return 0; | |
242 | } | |
243 | ||
244 | static void mtk_spi_set_cs(struct spi_device *spi, bool enable) | |
245 | { | |
246 | u32 reg_val; | |
247 | struct mtk_spi *mdata = spi_master_get_devdata(spi->master); | |
248 | ||
249 | reg_val = readl(mdata->base + SPI_CMD_REG); | |
250 | if (!enable) | |
251 | reg_val |= SPI_CMD_PAUSE_EN; | |
252 | else | |
253 | reg_val &= ~SPI_CMD_PAUSE_EN; | |
254 | writel(reg_val, mdata->base + SPI_CMD_REG); | |
255 | } | |
256 | ||
257 | static void mtk_spi_prepare_transfer(struct spi_master *master, | |
258 | struct spi_transfer *xfer) | |
259 | { | |
260 | u32 spi_clk_hz, div, high_time, low_time, holdtime, | |
261 | setuptime, cs_idletime, reg_val = 0; | |
262 | struct mtk_spi *mdata = spi_master_get_devdata(master); | |
263 | ||
264 | spi_clk_hz = clk_get_rate(mdata->spi_clk); | |
265 | if (xfer->speed_hz < spi_clk_hz / 2) | |
266 | div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz); | |
267 | else | |
268 | div = 1; | |
269 | ||
270 | high_time = (div + 1) / 2; | |
271 | low_time = (div + 1) / 2; | |
272 | holdtime = (div + 1) / 2 * 2; | |
273 | setuptime = (div + 1) / 2 * 2; | |
274 | cs_idletime = (div + 1) / 2 * 2; | |
275 | ||
276 | reg_val |= (((high_time - 1) & 0xff) << SPI_CFG0_SCK_HIGH_OFFSET); | |
277 | reg_val |= (((low_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET); | |
278 | reg_val |= (((holdtime - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET); | |
279 | reg_val |= (((setuptime - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET); | |
280 | writel(reg_val, mdata->base + SPI_CFG0_REG); | |
281 | ||
282 | reg_val = readl(mdata->base + SPI_CFG1_REG); | |
283 | reg_val &= ~SPI_CFG1_CS_IDLE_MASK; | |
284 | reg_val |= (((cs_idletime - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET); | |
285 | writel(reg_val, mdata->base + SPI_CFG1_REG); | |
286 | } | |
287 | ||
288 | static void mtk_spi_setup_packet(struct spi_master *master) | |
289 | { | |
290 | u32 packet_size, packet_loop, reg_val; | |
291 | struct mtk_spi *mdata = spi_master_get_devdata(master); | |
292 | ||
293 | packet_size = min_t(unsigned, mdata->xfer_len, MTK_SPI_PACKET_SIZE); | |
294 | packet_loop = mdata->xfer_len / packet_size; | |
295 | ||
296 | reg_val = readl(mdata->base + SPI_CFG1_REG); | |
297 | reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK + SPI_CFG1_PACKET_LOOP_MASK); | |
298 | reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET; | |
299 | reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET; | |
300 | writel(reg_val, mdata->base + SPI_CFG1_REG); | |
301 | } | |
302 | ||
303 | static void mtk_spi_enable_transfer(struct spi_master *master) | |
304 | { | |
305 | int cmd; | |
306 | struct mtk_spi *mdata = spi_master_get_devdata(master); | |
307 | ||
308 | cmd = readl(mdata->base + SPI_CMD_REG); | |
309 | if (mdata->state == MTK_SPI_IDLE) | |
310 | cmd |= 1 << SPI_CMD_ACT_OFFSET; | |
311 | else | |
312 | cmd |= 1 << SPI_CMD_RESUME_OFFSET; | |
313 | writel(cmd, mdata->base + SPI_CMD_REG); | |
314 | } | |
315 | ||
316 | static int mtk_spi_get_mult_delta(int xfer_len) | |
317 | { | |
318 | int mult_delta; | |
319 | ||
320 | if (xfer_len > MTK_SPI_PACKET_SIZE) | |
321 | mult_delta = xfer_len % MTK_SPI_PACKET_SIZE; | |
322 | else | |
323 | mult_delta = 0; | |
324 | ||
325 | return mult_delta; | |
326 | } | |
327 | ||
328 | static void mtk_spi_update_mdata_len(struct spi_master *master) | |
329 | { | |
330 | int mult_delta; | |
331 | struct mtk_spi *mdata = spi_master_get_devdata(master); | |
332 | ||
333 | if (mdata->tx_sgl_len && mdata->rx_sgl_len) { | |
334 | if (mdata->tx_sgl_len > mdata->rx_sgl_len) { | |
335 | mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len); | |
336 | mdata->xfer_len = mdata->rx_sgl_len - mult_delta; | |
337 | mdata->rx_sgl_len = mult_delta; | |
338 | mdata->tx_sgl_len -= mdata->xfer_len; | |
339 | } else { | |
340 | mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len); | |
341 | mdata->xfer_len = mdata->tx_sgl_len - mult_delta; | |
342 | mdata->tx_sgl_len = mult_delta; | |
343 | mdata->rx_sgl_len -= mdata->xfer_len; | |
344 | } | |
345 | } else if (mdata->tx_sgl_len) { | |
346 | mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len); | |
347 | mdata->xfer_len = mdata->tx_sgl_len - mult_delta; | |
348 | mdata->tx_sgl_len = mult_delta; | |
349 | } else if (mdata->rx_sgl_len) { | |
350 | mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len); | |
351 | mdata->xfer_len = mdata->rx_sgl_len - mult_delta; | |
352 | mdata->rx_sgl_len = mult_delta; | |
353 | } | |
354 | } | |
355 | ||
356 | static void mtk_spi_setup_dma_addr(struct spi_master *master, | |
357 | struct spi_transfer *xfer) | |
358 | { | |
359 | struct mtk_spi *mdata = spi_master_get_devdata(master); | |
360 | ||
361 | if (mdata->tx_sgl) | |
7abc01b3 LL |
362 | writel((__force u32)cpu_to_le32(xfer->tx_dma), |
363 | mdata->base + SPI_TX_SRC_REG); | |
a568231f | 364 | if (mdata->rx_sgl) |
7abc01b3 LL |
365 | writel((__force u32)cpu_to_le32(xfer->rx_dma), |
366 | mdata->base + SPI_RX_DST_REG); | |
a568231f LL |
367 | } |
368 | ||
369 | static int mtk_spi_fifo_transfer(struct spi_master *master, | |
370 | struct spi_device *spi, | |
371 | struct spi_transfer *xfer) | |
372 | { | |
373 | int cnt, i; | |
374 | struct mtk_spi *mdata = spi_master_get_devdata(master); | |
375 | ||
376 | mdata->cur_transfer = xfer; | |
377 | mdata->xfer_len = xfer->len; | |
378 | mtk_spi_prepare_transfer(master, xfer); | |
379 | mtk_spi_setup_packet(master); | |
380 | ||
381 | if (xfer->len % 4) | |
382 | cnt = xfer->len / 4 + 1; | |
383 | else | |
384 | cnt = xfer->len / 4; | |
385 | ||
386 | for (i = 0; i < cnt; i++) | |
387 | writel(*((u32 *)xfer->tx_buf + i), | |
388 | mdata->base + SPI_TX_DATA_REG); | |
389 | ||
390 | mtk_spi_enable_transfer(master); | |
391 | ||
392 | return 1; | |
393 | } | |
394 | ||
395 | static int mtk_spi_dma_transfer(struct spi_master *master, | |
396 | struct spi_device *spi, | |
397 | struct spi_transfer *xfer) | |
398 | { | |
399 | int cmd; | |
400 | struct mtk_spi *mdata = spi_master_get_devdata(master); | |
401 | ||
402 | mdata->tx_sgl = NULL; | |
403 | mdata->rx_sgl = NULL; | |
404 | mdata->tx_sgl_len = 0; | |
405 | mdata->rx_sgl_len = 0; | |
406 | mdata->cur_transfer = xfer; | |
407 | ||
408 | mtk_spi_prepare_transfer(master, xfer); | |
409 | ||
410 | cmd = readl(mdata->base + SPI_CMD_REG); | |
411 | if (xfer->tx_buf) | |
412 | cmd |= SPI_CMD_TX_DMA; | |
413 | if (xfer->rx_buf) | |
414 | cmd |= SPI_CMD_RX_DMA; | |
415 | writel(cmd, mdata->base + SPI_CMD_REG); | |
416 | ||
417 | if (xfer->tx_buf) | |
418 | mdata->tx_sgl = xfer->tx_sg.sgl; | |
419 | if (xfer->rx_buf) | |
420 | mdata->rx_sgl = xfer->rx_sg.sgl; | |
421 | ||
422 | if (mdata->tx_sgl) { | |
423 | xfer->tx_dma = sg_dma_address(mdata->tx_sgl); | |
424 | mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl); | |
425 | } | |
426 | if (mdata->rx_sgl) { | |
427 | xfer->rx_dma = sg_dma_address(mdata->rx_sgl); | |
428 | mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl); | |
429 | } | |
430 | ||
431 | mtk_spi_update_mdata_len(master); | |
432 | mtk_spi_setup_packet(master); | |
433 | mtk_spi_setup_dma_addr(master, xfer); | |
434 | mtk_spi_enable_transfer(master); | |
435 | ||
436 | return 1; | |
437 | } | |
438 | ||
439 | static int mtk_spi_transfer_one(struct spi_master *master, | |
440 | struct spi_device *spi, | |
441 | struct spi_transfer *xfer) | |
442 | { | |
443 | if (master->can_dma(master, spi, xfer)) | |
444 | return mtk_spi_dma_transfer(master, spi, xfer); | |
445 | else | |
446 | return mtk_spi_fifo_transfer(master, spi, xfer); | |
447 | } | |
448 | ||
449 | static bool mtk_spi_can_dma(struct spi_master *master, | |
450 | struct spi_device *spi, | |
451 | struct spi_transfer *xfer) | |
452 | { | |
453 | return xfer->len > MTK_SPI_MAX_FIFO_SIZE; | |
454 | } | |
455 | ||
456 | static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) | |
457 | { | |
458 | u32 cmd, reg_val, i; | |
459 | struct spi_master *master = dev_id; | |
460 | struct mtk_spi *mdata = spi_master_get_devdata(master); | |
461 | struct spi_transfer *trans = mdata->cur_transfer; | |
462 | ||
463 | reg_val = readl(mdata->base + SPI_STATUS0_REG); | |
464 | if (reg_val & 0x2) | |
465 | mdata->state = MTK_SPI_PAUSED; | |
466 | else | |
467 | mdata->state = MTK_SPI_IDLE; | |
468 | ||
469 | if (!master->can_dma(master, master->cur_msg->spi, trans)) { | |
470 | /* xfer len is not N*4 bytes every time in a transfer, | |
471 | * but SPI_RX_DATA_REG must reads 4 bytes once, | |
472 | * so rx buffer byte by byte. | |
473 | */ | |
474 | if (trans->rx_buf) { | |
475 | for (i = 0; i < mdata->xfer_len; i++) { | |
476 | if (i % 4 == 0) | |
477 | reg_val = | |
478 | readl(mdata->base + SPI_RX_DATA_REG); | |
479 | *((u8 *)(trans->rx_buf + i)) = | |
480 | (reg_val >> ((i % 4) * 8)) & 0xff; | |
481 | } | |
482 | } | |
483 | spi_finalize_current_transfer(master); | |
484 | return IRQ_HANDLED; | |
485 | } | |
486 | ||
487 | if (mdata->tx_sgl) | |
488 | trans->tx_dma += mdata->xfer_len; | |
489 | if (mdata->rx_sgl) | |
490 | trans->rx_dma += mdata->xfer_len; | |
491 | ||
492 | if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) { | |
493 | mdata->tx_sgl = sg_next(mdata->tx_sgl); | |
494 | if (mdata->tx_sgl) { | |
495 | trans->tx_dma = sg_dma_address(mdata->tx_sgl); | |
496 | mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl); | |
497 | } | |
498 | } | |
499 | if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) { | |
500 | mdata->rx_sgl = sg_next(mdata->rx_sgl); | |
501 | if (mdata->rx_sgl) { | |
502 | trans->rx_dma = sg_dma_address(mdata->rx_sgl); | |
503 | mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl); | |
504 | } | |
505 | } | |
506 | ||
507 | if (!mdata->tx_sgl && !mdata->rx_sgl) { | |
508 | /* spi disable dma */ | |
509 | cmd = readl(mdata->base + SPI_CMD_REG); | |
510 | cmd &= ~SPI_CMD_TX_DMA; | |
511 | cmd &= ~SPI_CMD_RX_DMA; | |
512 | writel(cmd, mdata->base + SPI_CMD_REG); | |
513 | ||
514 | spi_finalize_current_transfer(master); | |
515 | return IRQ_HANDLED; | |
516 | } | |
517 | ||
518 | mtk_spi_update_mdata_len(master); | |
519 | mtk_spi_setup_packet(master); | |
520 | mtk_spi_setup_dma_addr(master, trans); | |
521 | mtk_spi_enable_transfer(master); | |
522 | ||
523 | return IRQ_HANDLED; | |
524 | } | |
525 | ||
526 | static int mtk_spi_probe(struct platform_device *pdev) | |
527 | { | |
528 | struct spi_master *master; | |
529 | struct mtk_spi *mdata; | |
530 | const struct of_device_id *of_id; | |
531 | struct resource *res; | |
532 | int irq, ret; | |
533 | ||
534 | master = spi_alloc_master(&pdev->dev, sizeof(*mdata)); | |
535 | if (!master) { | |
536 | dev_err(&pdev->dev, "failed to alloc spi master\n"); | |
537 | return -ENOMEM; | |
538 | } | |
539 | ||
540 | master->auto_runtime_pm = true; | |
541 | master->dev.of_node = pdev->dev.of_node; | |
542 | master->mode_bits = SPI_CPOL | SPI_CPHA; | |
543 | ||
544 | master->set_cs = mtk_spi_set_cs; | |
545 | master->prepare_transfer_hardware = mtk_spi_prepare_hardware; | |
546 | master->unprepare_transfer_hardware = mtk_spi_unprepare_hardware; | |
547 | master->prepare_message = mtk_spi_prepare_message; | |
548 | master->transfer_one = mtk_spi_transfer_one; | |
549 | master->can_dma = mtk_spi_can_dma; | |
550 | ||
551 | of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node); | |
552 | if (!of_id) { | |
553 | dev_err(&pdev->dev, "failed to probe of_node\n"); | |
554 | ret = -EINVAL; | |
555 | goto err_put_master; | |
556 | } | |
557 | ||
558 | mdata = spi_master_get_devdata(master); | |
559 | mdata->dev_comp = of_id->data; | |
560 | if (mdata->dev_comp->must_tx) | |
561 | master->flags = SPI_MASTER_MUST_TX; | |
562 | ||
563 | if (mdata->dev_comp->need_pad_sel) { | |
564 | ret = of_property_read_u32(pdev->dev.of_node, | |
565 | "mediatek,pad-select", | |
566 | &mdata->pad_sel); | |
567 | if (ret) { | |
568 | dev_err(&pdev->dev, "failed to read pad select: %d\n", | |
569 | ret); | |
570 | goto err_put_master; | |
571 | } | |
572 | ||
573 | if (mdata->pad_sel > MT8173_SPI_MAX_PAD_SEL) { | |
574 | dev_err(&pdev->dev, "wrong pad-select: %u\n", | |
575 | mdata->pad_sel); | |
576 | ret = -EINVAL; | |
577 | goto err_put_master; | |
578 | } | |
579 | } | |
580 | ||
581 | platform_set_drvdata(pdev, master); | |
582 | ||
583 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
584 | if (!res) { | |
585 | ret = -ENODEV; | |
586 | dev_err(&pdev->dev, "failed to determine base address\n"); | |
587 | goto err_put_master; | |
588 | } | |
589 | ||
590 | mdata->base = devm_ioremap_resource(&pdev->dev, res); | |
591 | if (IS_ERR(mdata->base)) { | |
592 | ret = PTR_ERR(mdata->base); | |
593 | goto err_put_master; | |
594 | } | |
595 | ||
596 | irq = platform_get_irq(pdev, 0); | |
597 | if (irq < 0) { | |
598 | dev_err(&pdev->dev, "failed to get irq (%d)\n", irq); | |
599 | ret = irq; | |
600 | goto err_put_master; | |
601 | } | |
602 | ||
603 | if (!pdev->dev.dma_mask) | |
604 | pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; | |
605 | ||
606 | ret = devm_request_irq(&pdev->dev, irq, mtk_spi_interrupt, | |
607 | IRQF_TRIGGER_NONE, dev_name(&pdev->dev), master); | |
608 | if (ret) { | |
609 | dev_err(&pdev->dev, "failed to register irq (%d)\n", ret); | |
610 | goto err_put_master; | |
611 | } | |
612 | ||
613 | mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk"); | |
614 | if (IS_ERR(mdata->spi_clk)) { | |
615 | ret = PTR_ERR(mdata->spi_clk); | |
616 | dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret); | |
617 | goto err_put_master; | |
618 | } | |
619 | ||
620 | mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk"); | |
621 | if (IS_ERR(mdata->parent_clk)) { | |
622 | ret = PTR_ERR(mdata->parent_clk); | |
623 | dev_err(&pdev->dev, "failed to get parent-clk: %d\n", ret); | |
624 | goto err_put_master; | |
625 | } | |
626 | ||
627 | ret = clk_prepare_enable(mdata->spi_clk); | |
628 | if (ret < 0) { | |
629 | dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret); | |
630 | goto err_put_master; | |
631 | } | |
632 | ||
633 | ret = clk_set_parent(mdata->spi_clk, mdata->parent_clk); | |
634 | if (ret < 0) { | |
635 | dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret); | |
636 | goto err_disable_clk; | |
637 | } | |
638 | ||
639 | clk_disable_unprepare(mdata->spi_clk); | |
640 | ||
641 | pm_runtime_enable(&pdev->dev); | |
642 | ||
643 | ret = devm_spi_register_master(&pdev->dev, master); | |
644 | if (ret) { | |
645 | dev_err(&pdev->dev, "failed to register master (%d)\n", ret); | |
646 | goto err_put_master; | |
647 | } | |
648 | ||
649 | return 0; | |
650 | ||
651 | err_disable_clk: | |
652 | clk_disable_unprepare(mdata->spi_clk); | |
653 | err_put_master: | |
654 | spi_master_put(master); | |
655 | ||
656 | return ret; | |
657 | } | |
658 | ||
659 | static int mtk_spi_remove(struct platform_device *pdev) | |
660 | { | |
661 | struct spi_master *master = platform_get_drvdata(pdev); | |
662 | struct mtk_spi *mdata = spi_master_get_devdata(master); | |
663 | ||
664 | pm_runtime_disable(&pdev->dev); | |
665 | ||
666 | mtk_spi_reset(mdata); | |
667 | clk_disable_unprepare(mdata->spi_clk); | |
668 | spi_master_put(master); | |
669 | ||
670 | return 0; | |
671 | } | |
672 | ||
673 | #ifdef CONFIG_PM_SLEEP | |
674 | static int mtk_spi_suspend(struct device *dev) | |
675 | { | |
676 | int ret; | |
677 | struct spi_master *master = dev_get_drvdata(dev); | |
678 | struct mtk_spi *mdata = spi_master_get_devdata(master); | |
679 | ||
680 | ret = spi_master_suspend(master); | |
681 | if (ret) | |
682 | return ret; | |
683 | ||
684 | if (!pm_runtime_suspended(dev)) | |
685 | clk_disable_unprepare(mdata->spi_clk); | |
686 | ||
687 | return ret; | |
688 | } | |
689 | ||
690 | static int mtk_spi_resume(struct device *dev) | |
691 | { | |
692 | int ret; | |
693 | struct spi_master *master = dev_get_drvdata(dev); | |
694 | struct mtk_spi *mdata = spi_master_get_devdata(master); | |
695 | ||
696 | if (!pm_runtime_suspended(dev)) { | |
697 | ret = clk_prepare_enable(mdata->spi_clk); | |
698 | if (ret < 0) | |
699 | return ret; | |
700 | } | |
701 | ||
702 | ret = spi_master_resume(master); | |
703 | if (ret < 0) | |
704 | clk_disable_unprepare(mdata->spi_clk); | |
705 | ||
706 | return ret; | |
707 | } | |
708 | #endif /* CONFIG_PM_SLEEP */ | |
709 | ||
710 | #ifdef CONFIG_PM | |
711 | static int mtk_spi_runtime_suspend(struct device *dev) | |
712 | { | |
713 | struct spi_master *master = dev_get_drvdata(dev); | |
714 | struct mtk_spi *mdata = spi_master_get_devdata(master); | |
715 | ||
716 | clk_disable_unprepare(mdata->spi_clk); | |
717 | ||
718 | return 0; | |
719 | } | |
720 | ||
721 | static int mtk_spi_runtime_resume(struct device *dev) | |
722 | { | |
723 | struct spi_master *master = dev_get_drvdata(dev); | |
724 | struct mtk_spi *mdata = spi_master_get_devdata(master); | |
725 | ||
726 | return clk_prepare_enable(mdata->spi_clk); | |
727 | } | |
728 | #endif /* CONFIG_PM */ | |
729 | ||
730 | static const struct dev_pm_ops mtk_spi_pm = { | |
731 | SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume) | |
732 | SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend, | |
733 | mtk_spi_runtime_resume, NULL) | |
734 | }; | |
735 | ||
4299aaaa | 736 | static struct platform_driver mtk_spi_driver = { |
a568231f LL |
737 | .driver = { |
738 | .name = "mtk-spi", | |
739 | .pm = &mtk_spi_pm, | |
740 | .of_match_table = mtk_spi_of_match, | |
741 | }, | |
742 | .probe = mtk_spi_probe, | |
743 | .remove = mtk_spi_remove, | |
744 | }; | |
745 | ||
746 | module_platform_driver(mtk_spi_driver); | |
747 | ||
748 | MODULE_DESCRIPTION("MTK SPI Controller driver"); | |
749 | MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>"); | |
750 | MODULE_LICENSE("GPL v2"); | |
e4001885 | 751 | MODULE_ALIAS("platform:mtk-spi"); |