Merge tag 'pm-6.16-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[linux-block.git] / drivers / spi / spi-mt65xx.c
CommitLineData
1802d0be 1// SPDX-License-Identifier: GPL-2.0-only
a568231f
LL
2/*
3 * Copyright (c) 2015 MediaTek Inc.
4 * Author: Leilk Liu <leilk.liu@mediatek.com>
a568231f
LL
5 */
6
7#include <linux/clk.h>
8#include <linux/device.h>
9#include <linux/err.h>
10#include <linux/interrupt.h>
dd69a0a6 11#include <linux/io.h>
a568231f
LL
12#include <linux/ioport.h>
13#include <linux/module.h>
14#include <linux/of.h>
1a5a87d5 15#include <linux/gpio/consumer.h>
4247d7f2 16#include <linux/pinctrl/consumer.h>
a568231f
LL
17#include <linux/platform_device.h>
18#include <linux/platform_data/spi-mt65xx.h>
19#include <linux/pm_runtime.h>
20#include <linux/spi/spi.h>
9f763fd2 21#include <linux/spi/spi-mem.h>
fdeae8f5 22#include <linux/dma-mapping.h>
632556d5 23#include <linux/pm_qos.h>
a568231f 24
8e8a9e36
ADR
25#define SPI_CFG0_REG 0x0000
26#define SPI_CFG1_REG 0x0004
27#define SPI_TX_SRC_REG 0x0008
28#define SPI_RX_DST_REG 0x000c
29#define SPI_TX_DATA_REG 0x0010
30#define SPI_RX_DATA_REG 0x0014
31#define SPI_CMD_REG 0x0018
32#define SPI_STATUS0_REG 0x001c
33#define SPI_PAD_SEL_REG 0x0024
34#define SPI_CFG2_REG 0x0028
35#define SPI_TX_SRC_REG_64 0x002c
36#define SPI_RX_DST_REG_64 0x0030
37#define SPI_CFG3_IPM_REG 0x0040
38
39#define SPI_CFG0_SCK_HIGH_OFFSET 0
40#define SPI_CFG0_SCK_LOW_OFFSET 8
41#define SPI_CFG0_CS_HOLD_OFFSET 16
42#define SPI_CFG0_CS_SETUP_OFFSET 24
43#define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
44#define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
45
46#define SPI_CFG1_CS_IDLE_OFFSET 0
47#define SPI_CFG1_PACKET_LOOP_OFFSET 8
48#define SPI_CFG1_PACKET_LENGTH_OFFSET 16
49#define SPI_CFG1_GET_TICK_DLY_OFFSET 29
50#define SPI_CFG1_GET_TICK_DLY_OFFSET_V1 30
51
52#define SPI_CFG1_GET_TICK_DLY_MASK 0xe0000000
53#define SPI_CFG1_GET_TICK_DLY_MASK_V1 0xc0000000
54
55#define SPI_CFG1_CS_IDLE_MASK 0xff
56#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
57#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
58#define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
59#define SPI_CFG2_SCK_HIGH_OFFSET 0
60#define SPI_CFG2_SCK_LOW_OFFSET 16
61
62#define SPI_CMD_ACT BIT(0)
63#define SPI_CMD_RESUME BIT(1)
64#define SPI_CMD_RST BIT(2)
65#define SPI_CMD_PAUSE_EN BIT(4)
66#define SPI_CMD_DEASSERT BIT(5)
67#define SPI_CMD_SAMPLE_SEL BIT(6)
68#define SPI_CMD_CS_POL BIT(7)
69#define SPI_CMD_CPHA BIT(8)
70#define SPI_CMD_CPOL BIT(9)
71#define SPI_CMD_RX_DMA BIT(10)
72#define SPI_CMD_TX_DMA BIT(11)
73#define SPI_CMD_TXMSBF BIT(12)
74#define SPI_CMD_RXMSBF BIT(13)
75#define SPI_CMD_RX_ENDIAN BIT(14)
76#define SPI_CMD_TX_ENDIAN BIT(15)
77#define SPI_CMD_FINISH_IE BIT(16)
78#define SPI_CMD_PAUSE_IE BIT(17)
79#define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
80#define SPI_CMD_IPM_SPIM_LOOP BIT(21)
81#define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
a568231f 82
7e963fb2 83#define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
9f763fd2
LL
84
85#define PIN_MODE_CFG(x) ((x) / 2)
86
8e8a9e36
ADR
87#define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
88#define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
89#define SPI_CFG3_IPM_XMODE_EN BIT(4)
90#define SPI_CFG3_IPM_NODATA_FLAG BIT(5)
91#define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8
92#define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
9f763fd2 93
8e8a9e36
ADR
94#define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0)
95#define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8)
96#define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12)
9f763fd2 97
8e8a9e36 98#define MT8173_SPI_MAX_PAD_SEL 3
a568231f 99
8e8a9e36 100#define MTK_SPI_PAUSE_INT_STATUS 0x2
50f8fec2 101
8e8a9e36
ADR
102#define MTK_SPI_MAX_FIFO_SIZE 32U
103#define MTK_SPI_PACKET_SIZE 1024
104#define MTK_SPI_IPM_PACKET_SIZE SZ_64K
105#define MTK_SPI_IPM_PACKET_LOOP SZ_256
a568231f 106
8e8a9e36
ADR
107#define MTK_SPI_IDLE 0
108#define MTK_SPI_PAUSED 1
9f763fd2 109
8e8a9e36 110#define MTK_SPI_32BITS_MASK (0xffffffff)
fdeae8f5 111
8e8a9e36
ADR
112#define DMA_ADDR_EXT_BITS (36)
113#define DMA_ADDR_DEF_BITS (32)
a568231f 114
3c5cd2e2
ADR
115/**
116 * struct mtk_spi_compatible - device data structure
117 * @need_pad_sel: Enable pad (pins) selection in SPI controller
118 * @must_tx: Must explicitly send dummy TX bytes to do RX only transfer
119 * @enhance_timing: Enable adjusting cfg register to enhance time accuracy
120 * @dma_ext: DMA address extension supported
121 * @no_need_unprepare: Don't unprepare the SPI clk during runtime
122 * @ipm_design: Adjust/extend registers to support IPM design IP features
123 */
a568231f 124struct mtk_spi_compatible {
af57937e 125 bool need_pad_sel;
af57937e 126 bool must_tx;
058fe49d 127 bool enhance_timing;
fdeae8f5 128 bool dma_ext;
162a31ef 129 bool no_need_unprepare;
7e963fb2 130 bool ipm_design;
a568231f
LL
131};
132
3c5cd2e2
ADR
133/**
134 * struct mtk_spi - SPI driver instance
135 * @base: Start address of the SPI controller registers
136 * @state: SPI controller state
137 * @pad_num: Number of pad_sel entries
138 * @pad_sel: Groups of pins to select
139 * @parent_clk: Parent of sel_clk
cae15788 140 * @sel_clk: SPI host mux clock
3c5cd2e2
ADR
141 * @spi_clk: Peripheral clock
142 * @spi_hclk: AHB bus clock
143 * @cur_transfer: Currently processed SPI transfer
144 * @xfer_len: Number of bytes to transfer
145 * @num_xfered: Number of transferred bytes
146 * @tx_sgl: TX transfer scatterlist
147 * @rx_sgl: RX transfer scatterlist
148 * @tx_sgl_len: Size of TX DMA transfer
149 * @rx_sgl_len: Size of RX DMA transfer
150 * @dev_comp: Device data structure
632556d5 151 * @qos_request: QoS request
3c5cd2e2
ADR
152 * @spi_clk_hz: Current SPI clock in Hz
153 * @spimem_done: SPI-MEM operation completion
154 * @use_spimem: Enables SPI-MEM
155 * @dev: Device pointer
156 * @tx_dma: DMA start for SPI-MEM TX
157 * @rx_dma: DMA start for SPI-MEM RX
158 */
a568231f
LL
159struct mtk_spi {
160 void __iomem *base;
161 u32 state;
37457607
LL
162 int pad_num;
163 u32 *pad_sel;
a740f4e6 164 struct clk *parent_clk, *sel_clk, *spi_clk, *spi_hclk;
a568231f
LL
165 struct spi_transfer *cur_transfer;
166 u32 xfer_len;
00bca73b 167 u32 num_xfered;
a568231f
LL
168 struct scatterlist *tx_sgl, *rx_sgl;
169 u32 tx_sgl_len, rx_sgl_len;
170 const struct mtk_spi_compatible *dev_comp;
632556d5 171 struct pm_qos_request qos_request;
162a31ef 172 u32 spi_clk_hz;
9f763fd2
LL
173 struct completion spimem_done;
174 bool use_spimem;
175 struct device *dev;
176 dma_addr_t tx_dma;
177 dma_addr_t rx_dma;
a568231f
LL
178};
179
4eaf6f73 180static const struct mtk_spi_compatible mtk_common_compat;
fc4f226f 181
b6b1f2d9 182static const struct mtk_spi_compatible mt2712_compat = {
183 .must_tx = true,
184};
185
7e963fb2
LL
186static const struct mtk_spi_compatible mtk_ipm_compat = {
187 .enhance_timing = true,
188 .dma_ext = true,
189 .ipm_design = true,
190};
191
2c231e0a 192static const struct mtk_spi_compatible mt6765_compat = {
193 .need_pad_sel = true,
194 .must_tx = true,
195 .enhance_timing = true,
fdeae8f5 196 .dma_ext = true,
2c231e0a 197};
198
fc4f226f
LL
199static const struct mtk_spi_compatible mt7622_compat = {
200 .must_tx = true,
201 .enhance_timing = true,
202};
203
a568231f 204static const struct mtk_spi_compatible mt8173_compat = {
af57937e
LL
205 .need_pad_sel = true,
206 .must_tx = true,
a568231f
LL
207};
208
b654aa6f
LL
209static const struct mtk_spi_compatible mt8183_compat = {
210 .need_pad_sel = true,
211 .must_tx = true,
212 .enhance_timing = true,
213};
214
162a31ef
MZ
215static const struct mtk_spi_compatible mt6893_compat = {
216 .need_pad_sel = true,
217 .must_tx = true,
218 .enhance_timing = true,
219 .dma_ext = true,
220 .no_need_unprepare = true,
221};
222
a568231f
LL
223/*
224 * A piece of default chip info unless the platform
225 * supplies it.
226 */
227static const struct mtk_chip_config mtk_default_chip_info = {
058fe49d 228 .sample_sel = 0,
f84d866a 229 .tick_delay = 0,
a568231f
LL
230};
231
232static const struct of_device_id mtk_spi_of_match[] = {
7e963fb2
LL
233 { .compatible = "mediatek,spi-ipm",
234 .data = (void *)&mtk_ipm_compat,
235 },
15bcdefd
LL
236 { .compatible = "mediatek,mt2701-spi",
237 .data = (void *)&mtk_common_compat,
238 },
b6b1f2d9 239 { .compatible = "mediatek,mt2712-spi",
240 .data = (void *)&mt2712_compat,
241 },
4eaf6f73
LL
242 { .compatible = "mediatek,mt6589-spi",
243 .data = (void *)&mtk_common_compat,
244 },
2c231e0a 245 { .compatible = "mediatek,mt6765-spi",
246 .data = (void *)&mt6765_compat,
247 },
fc4f226f
LL
248 { .compatible = "mediatek,mt7622-spi",
249 .data = (void *)&mt7622_compat,
250 },
942779c6
LL
251 { .compatible = "mediatek,mt7629-spi",
252 .data = (void *)&mt7622_compat,
253 },
4eaf6f73
LL
254 { .compatible = "mediatek,mt8135-spi",
255 .data = (void *)&mtk_common_compat,
256 },
257 { .compatible = "mediatek,mt8173-spi",
258 .data = (void *)&mt8173_compat,
259 },
b654aa6f
LL
260 { .compatible = "mediatek,mt8183-spi",
261 .data = (void *)&mt8183_compat,
262 },
8cf125c4 263 { .compatible = "mediatek,mt8192-spi",
264 .data = (void *)&mt6765_compat,
265 },
162a31ef
MZ
266 { .compatible = "mediatek,mt6893-spi",
267 .data = (void *)&mt6893_compat,
268 },
a568231f
LL
269 {}
270};
271MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
272
273static void mtk_spi_reset(struct mtk_spi *mdata)
274{
275 u32 reg_val;
276
277 /* set the software reset bit in SPI_CMD_REG. */
278 reg_val = readl(mdata->base + SPI_CMD_REG);
279 reg_val |= SPI_CMD_RST;
280 writel(reg_val, mdata->base + SPI_CMD_REG);
281
282 reg_val = readl(mdata->base + SPI_CMD_REG);
283 reg_val &= ~SPI_CMD_RST;
284 writel(reg_val, mdata->base + SPI_CMD_REG);
285}
286
04e6bb0d
MZ
287static int mtk_spi_set_hw_cs_timing(struct spi_device *spi)
288{
cae15788 289 struct mtk_spi *mdata = spi_controller_get_devdata(spi->controller);
04e6bb0d
MZ
290 struct spi_delay *cs_setup = &spi->cs_setup;
291 struct spi_delay *cs_hold = &spi->cs_hold;
292 struct spi_delay *cs_inactive = &spi->cs_inactive;
5c842e51 293 u32 setup, hold, inactive;
04e6bb0d
MZ
294 u32 reg_val;
295 int delay;
296
297 delay = spi_delay_to_ns(cs_setup, NULL);
298 if (delay < 0)
299 return delay;
300 setup = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
301
302 delay = spi_delay_to_ns(cs_hold, NULL);
303 if (delay < 0)
304 return delay;
305 hold = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
306
307 delay = spi_delay_to_ns(cs_inactive, NULL);
308 if (delay < 0)
309 return delay;
310 inactive = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
311
3672bb82
DH
312 if (hold || setup) {
313 reg_val = readl(mdata->base + SPI_CFG0_REG);
314 if (mdata->dev_comp->enhance_timing) {
315 if (hold) {
316 hold = min_t(u32, hold, 0x10000);
317 reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
318 reg_val |= (((hold - 1) & 0xffff)
319 << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
320 }
321 if (setup) {
322 setup = min_t(u32, setup, 0x10000);
323 reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
324 reg_val |= (((setup - 1) & 0xffff)
325 << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
326 }
327 } else {
328 if (hold) {
329 hold = min_t(u32, hold, 0x100);
330 reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
331 reg_val |= (((hold - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
332 }
333 if (setup) {
334 setup = min_t(u32, setup, 0x100);
335 reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
336 reg_val |= (((setup - 1) & 0xff)
337 << SPI_CFG0_CS_SETUP_OFFSET);
338 }
339 }
340 writel(reg_val, mdata->base + SPI_CFG0_REG);
04e6bb0d 341 }
04e6bb0d 342
3672bb82
DH
343 if (inactive) {
344 inactive = min_t(u32, inactive, 0x100);
345 reg_val = readl(mdata->base + SPI_CFG1_REG);
346 reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
347 reg_val |= (((inactive - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
348 writel(reg_val, mdata->base + SPI_CFG1_REG);
349 }
04e6bb0d
MZ
350
351 return 0;
352}
353
cae15788 354static int mtk_spi_hw_init(struct spi_controller *host,
7e963fb2 355 struct spi_device *spi)
a568231f 356{
79b5d3f2 357 u16 cpha, cpol;
a568231f 358 u32 reg_val;
58a984c7 359 struct mtk_chip_config *chip_config = spi->controller_data;
cae15788 360 struct mtk_spi *mdata = spi_controller_get_devdata(host);
79b5d3f2 361
632556d5 362 cpu_latency_qos_update_request(&mdata->qos_request, 500);
79b5d3f2
LL
363 cpha = spi->mode & SPI_CPHA ? 1 : 0;
364 cpol = spi->mode & SPI_CPOL ? 1 : 0;
365
79b5d3f2 366 reg_val = readl(mdata->base + SPI_CMD_REG);
7e963fb2
LL
367 if (mdata->dev_comp->ipm_design) {
368 /* SPI transfer without idle time until packet length done */
369 reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
370 if (spi->mode & SPI_LOOP)
371 reg_val |= SPI_CMD_IPM_SPIM_LOOP;
372 else
373 reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
374 }
375
79b5d3f2
LL
376 if (cpha)
377 reg_val |= SPI_CMD_CPHA;
378 else
379 reg_val &= ~SPI_CMD_CPHA;
380 if (cpol)
381 reg_val |= SPI_CMD_CPOL;
382 else
383 reg_val &= ~SPI_CMD_CPOL;
a568231f
LL
384
385 /* set the mlsbx and mlsbtx */
3e582c6e 386 if (spi->mode & SPI_LSB_FIRST) {
a71d6ea6 387 reg_val &= ~SPI_CMD_TXMSBF;
a71d6ea6 388 reg_val &= ~SPI_CMD_RXMSBF;
3e582c6e
LL
389 } else {
390 reg_val |= SPI_CMD_TXMSBF;
391 reg_val |= SPI_CMD_RXMSBF;
392 }
a568231f
LL
393
394 /* set the tx/rx endian */
44f636da
LL
395#ifdef __LITTLE_ENDIAN
396 reg_val &= ~SPI_CMD_TX_ENDIAN;
397 reg_val &= ~SPI_CMD_RX_ENDIAN;
398#else
399 reg_val |= SPI_CMD_TX_ENDIAN;
400 reg_val |= SPI_CMD_RX_ENDIAN;
401#endif
a568231f 402
058fe49d 403 if (mdata->dev_comp->enhance_timing) {
ae7c2d34
LX
404 /* set CS polarity */
405 if (spi->mode & SPI_CS_HIGH)
058fe49d
LL
406 reg_val |= SPI_CMD_CS_POL;
407 else
408 reg_val &= ~SPI_CMD_CS_POL;
ae7c2d34 409
058fe49d
LL
410 if (chip_config->sample_sel)
411 reg_val |= SPI_CMD_SAMPLE_SEL;
412 else
413 reg_val &= ~SPI_CMD_SAMPLE_SEL;
414 }
415
a568231f 416 /* set finish and pause interrupt always enable */
15293324 417 reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE;
a568231f
LL
418
419 /* disable dma mode */
420 reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
421
422 /* disable deassert mode */
423 reg_val &= ~SPI_CMD_DEASSERT;
424
425 writel(reg_val, mdata->base + SPI_CMD_REG);
426
427 /* pad select */
428 if (mdata->dev_comp->need_pad_sel)
9e264f3f 429 writel(mdata->pad_sel[spi_get_chipselect(spi, 0)],
37457607 430 mdata->base + SPI_PAD_SEL_REG);
a568231f 431
f84d866a 432 /* tick delay */
03b1be37 433 if (mdata->dev_comp->enhance_timing) {
7e963fb2
LL
434 if (mdata->dev_comp->ipm_design) {
435 reg_val = readl(mdata->base + SPI_CMD_REG);
436 reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK;
437 reg_val |= ((chip_config->tick_delay & 0x7)
438 << SPI_CMD_IPM_GET_TICKDLY_OFFSET);
439 writel(reg_val, mdata->base + SPI_CMD_REG);
440 } else {
441 reg_val = readl(mdata->base + SPI_CFG1_REG);
442 reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
443 reg_val |= ((chip_config->tick_delay & 0x7)
444 << SPI_CFG1_GET_TICK_DLY_OFFSET);
445 writel(reg_val, mdata->base + SPI_CFG1_REG);
446 }
03b1be37 447 } else {
7e963fb2 448 reg_val = readl(mdata->base + SPI_CFG1_REG);
03b1be37
LL
449 reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK_V1;
450 reg_val |= ((chip_config->tick_delay & 0x3)
451 << SPI_CFG1_GET_TICK_DLY_OFFSET_V1);
7e963fb2 452 writel(reg_val, mdata->base + SPI_CFG1_REG);
03b1be37 453 }
f84d866a 454
04e6bb0d
MZ
455 /* set hw cs timing */
456 mtk_spi_set_hw_cs_timing(spi);
a568231f
LL
457 return 0;
458}
459
cae15788 460static int mtk_spi_prepare_message(struct spi_controller *host,
7e963fb2
LL
461 struct spi_message *msg)
462{
cae15788 463 return mtk_spi_hw_init(host, msg->spi);
7e963fb2
LL
464}
465
632556d5
LL
466static int mtk_spi_unprepare_message(struct spi_controller *host,
467 struct spi_message *message)
468{
469 struct mtk_spi *mdata = spi_controller_get_devdata(host);
470
471 cpu_latency_qos_update_request(&mdata->qos_request, PM_QOS_DEFAULT_VALUE);
472 return 0;
473}
474
a568231f
LL
475static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
476{
477 u32 reg_val;
cae15788 478 struct mtk_spi *mdata = spi_controller_get_devdata(spi->controller);
a568231f 479
ae7c2d34
LX
480 if (spi->mode & SPI_CS_HIGH)
481 enable = !enable;
482
a568231f 483 reg_val = readl(mdata->base + SPI_CMD_REG);
6583d203 484 if (!enable) {
a568231f 485 reg_val |= SPI_CMD_PAUSE_EN;
6583d203
LL
486 writel(reg_val, mdata->base + SPI_CMD_REG);
487 } else {
a568231f 488 reg_val &= ~SPI_CMD_PAUSE_EN;
6583d203
LL
489 writel(reg_val, mdata->base + SPI_CMD_REG);
490 mdata->state = MTK_SPI_IDLE;
491 mtk_spi_reset(mdata);
492 }
a568231f
LL
493}
494
cae15788 495static void mtk_spi_prepare_transfer(struct spi_controller *host,
7e963fb2 496 u32 speed_hz)
a568231f 497{
162a31ef 498 u32 div, sck_time, reg_val;
cae15788 499 struct mtk_spi *mdata = spi_controller_get_devdata(host);
a568231f 500
7e963fb2
LL
501 if (speed_hz < mdata->spi_clk_hz / 2)
502 div = DIV_ROUND_UP(mdata->spi_clk_hz, speed_hz);
a568231f
LL
503 else
504 div = 1;
505
2ce0acf5 506 sck_time = (div + 1) / 2;
a568231f 507
058fe49d 508 if (mdata->dev_comp->enhance_timing) {
9f6e7e8d 509 reg_val = readl(mdata->base + SPI_CFG2_REG);
510 reg_val &= ~(0xffff << SPI_CFG2_SCK_HIGH_OFFSET);
511 reg_val |= (((sck_time - 1) & 0xffff)
44b37eb7 512 << SPI_CFG2_SCK_HIGH_OFFSET);
9f6e7e8d 513 reg_val &= ~(0xffff << SPI_CFG2_SCK_LOW_OFFSET);
058fe49d 514 reg_val |= (((sck_time - 1) & 0xffff)
44b37eb7 515 << SPI_CFG2_SCK_LOW_OFFSET);
058fe49d 516 writel(reg_val, mdata->base + SPI_CFG2_REG);
058fe49d 517 } else {
9f6e7e8d 518 reg_val = readl(mdata->base + SPI_CFG0_REG);
519 reg_val &= ~(0xff << SPI_CFG0_SCK_HIGH_OFFSET);
520 reg_val |= (((sck_time - 1) & 0xff)
058fe49d 521 << SPI_CFG0_SCK_HIGH_OFFSET);
9f6e7e8d 522 reg_val &= ~(0xff << SPI_CFG0_SCK_LOW_OFFSET);
058fe49d 523 reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
058fe49d
LL
524 writel(reg_val, mdata->base + SPI_CFG0_REG);
525 }
a568231f
LL
526}
527
cae15788 528static void mtk_spi_setup_packet(struct spi_controller *host)
a568231f
LL
529{
530 u32 packet_size, packet_loop, reg_val;
cae15788 531 struct mtk_spi *mdata = spi_controller_get_devdata(host);
a568231f 532
7e963fb2
LL
533 if (mdata->dev_comp->ipm_design)
534 packet_size = min_t(u32,
535 mdata->xfer_len,
536 MTK_SPI_IPM_PACKET_SIZE);
537 else
538 packet_size = min_t(u32,
539 mdata->xfer_len,
540 MTK_SPI_PACKET_SIZE);
541
a568231f
LL
542 packet_loop = mdata->xfer_len / packet_size;
543
544 reg_val = readl(mdata->base + SPI_CFG1_REG);
7e963fb2
LL
545 if (mdata->dev_comp->ipm_design)
546 reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
547 else
548 reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
a568231f 549 reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
7e963fb2 550 reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
a568231f
LL
551 reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
552 writel(reg_val, mdata->base + SPI_CFG1_REG);
553}
554
cae15788 555static void mtk_spi_enable_transfer(struct spi_controller *host)
a568231f 556{
50f8fec2 557 u32 cmd;
cae15788 558 struct mtk_spi *mdata = spi_controller_get_devdata(host);
a568231f
LL
559
560 cmd = readl(mdata->base + SPI_CMD_REG);
561 if (mdata->state == MTK_SPI_IDLE)
a71d6ea6 562 cmd |= SPI_CMD_ACT;
a568231f 563 else
a71d6ea6 564 cmd |= SPI_CMD_RESUME;
a568231f
LL
565 writel(cmd, mdata->base + SPI_CMD_REG);
566}
567
cf82d0ec 568static int mtk_spi_get_mult_delta(struct mtk_spi *mdata, u32 xfer_len)
a568231f 569{
cf82d0ec 570 u32 mult_delta = 0;
a568231f 571
cf82d0ec 572 if (mdata->dev_comp->ipm_design) {
573 if (xfer_len > MTK_SPI_IPM_PACKET_SIZE)
574 mult_delta = xfer_len % MTK_SPI_IPM_PACKET_SIZE;
575 } else {
576 if (xfer_len > MTK_SPI_PACKET_SIZE)
577 mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
578 }
a568231f
LL
579
580 return mult_delta;
581}
582
cae15788 583static void mtk_spi_update_mdata_len(struct spi_controller *host)
a568231f
LL
584{
585 int mult_delta;
cae15788 586 struct mtk_spi *mdata = spi_controller_get_devdata(host);
a568231f
LL
587
588 if (mdata->tx_sgl_len && mdata->rx_sgl_len) {
589 if (mdata->tx_sgl_len > mdata->rx_sgl_len) {
cf82d0ec 590 mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len);
a568231f
LL
591 mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
592 mdata->rx_sgl_len = mult_delta;
593 mdata->tx_sgl_len -= mdata->xfer_len;
594 } else {
cf82d0ec 595 mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len);
a568231f
LL
596 mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
597 mdata->tx_sgl_len = mult_delta;
598 mdata->rx_sgl_len -= mdata->xfer_len;
599 }
600 } else if (mdata->tx_sgl_len) {
cf82d0ec 601 mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len);
a568231f
LL
602 mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
603 mdata->tx_sgl_len = mult_delta;
604 } else if (mdata->rx_sgl_len) {
cf82d0ec 605 mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len);
a568231f
LL
606 mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
607 mdata->rx_sgl_len = mult_delta;
608 }
609}
610
cae15788 611static void mtk_spi_setup_dma_addr(struct spi_controller *host,
a568231f
LL
612 struct spi_transfer *xfer)
613{
cae15788 614 struct mtk_spi *mdata = spi_controller_get_devdata(host);
a568231f 615
fdeae8f5 616 if (mdata->tx_sgl) {
617 writel((u32)(xfer->tx_dma & MTK_SPI_32BITS_MASK),
618 mdata->base + SPI_TX_SRC_REG);
619#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
620 if (mdata->dev_comp->dma_ext)
621 writel((u32)(xfer->tx_dma >> 32),
622 mdata->base + SPI_TX_SRC_REG_64);
623#endif
624 }
625
626 if (mdata->rx_sgl) {
627 writel((u32)(xfer->rx_dma & MTK_SPI_32BITS_MASK),
628 mdata->base + SPI_RX_DST_REG);
629#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
630 if (mdata->dev_comp->dma_ext)
631 writel((u32)(xfer->rx_dma >> 32),
632 mdata->base + SPI_RX_DST_REG_64);
633#endif
634 }
a568231f
LL
635}
636
cae15788 637static int mtk_spi_fifo_transfer(struct spi_controller *host,
a568231f
LL
638 struct spi_device *spi,
639 struct spi_transfer *xfer)
640{
de327e49
NB
641 int cnt, remainder;
642 u32 reg_val;
cae15788 643 struct mtk_spi *mdata = spi_controller_get_devdata(host);
a568231f
LL
644
645 mdata->cur_transfer = xfer;
1ce24864 646 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
00bca73b 647 mdata->num_xfered = 0;
cae15788
YY
648 mtk_spi_prepare_transfer(host, xfer->speed_hz);
649 mtk_spi_setup_packet(host);
a568231f 650
0d5c3954
GR
651 if (xfer->tx_buf) {
652 cnt = xfer->len / 4;
3a70dd2d 653 iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
0d5c3954
GR
654 remainder = xfer->len % 4;
655 if (remainder > 0) {
656 reg_val = 0;
3a70dd2d
PH
657 memcpy(&reg_val, xfer->tx_buf + (cnt * 4), remainder);
658 writel(reg_val, mdata->base + SPI_TX_DATA_REG);
659 }
de327e49
NB
660 }
661
cae15788 662 mtk_spi_enable_transfer(host);
a568231f
LL
663
664 return 1;
665}
666
cae15788 667static int mtk_spi_dma_transfer(struct spi_controller *host,
a568231f
LL
668 struct spi_device *spi,
669 struct spi_transfer *xfer)
670{
671 int cmd;
cae15788 672 struct mtk_spi *mdata = spi_controller_get_devdata(host);
a568231f
LL
673
674 mdata->tx_sgl = NULL;
675 mdata->rx_sgl = NULL;
676 mdata->tx_sgl_len = 0;
677 mdata->rx_sgl_len = 0;
678 mdata->cur_transfer = xfer;
00bca73b 679 mdata->num_xfered = 0;
a568231f 680
cae15788 681 mtk_spi_prepare_transfer(host, xfer->speed_hz);
a568231f
LL
682
683 cmd = readl(mdata->base + SPI_CMD_REG);
684 if (xfer->tx_buf)
685 cmd |= SPI_CMD_TX_DMA;
686 if (xfer->rx_buf)
687 cmd |= SPI_CMD_RX_DMA;
688 writel(cmd, mdata->base + SPI_CMD_REG);
689
690 if (xfer->tx_buf)
691 mdata->tx_sgl = xfer->tx_sg.sgl;
692 if (xfer->rx_buf)
693 mdata->rx_sgl = xfer->rx_sg.sgl;
694
695 if (mdata->tx_sgl) {
696 xfer->tx_dma = sg_dma_address(mdata->tx_sgl);
697 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
698 }
699 if (mdata->rx_sgl) {
700 xfer->rx_dma = sg_dma_address(mdata->rx_sgl);
701 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
702 }
703
cae15788
YY
704 mtk_spi_update_mdata_len(host);
705 mtk_spi_setup_packet(host);
706 mtk_spi_setup_dma_addr(host, xfer);
707 mtk_spi_enable_transfer(host);
a568231f
LL
708
709 return 1;
710}
711
cae15788 712static int mtk_spi_transfer_one(struct spi_controller *host,
a568231f
LL
713 struct spi_device *spi,
714 struct spi_transfer *xfer)
715{
cae15788 716 struct mtk_spi *mdata = spi_controller_get_devdata(spi->controller);
7e963fb2
LL
717 u32 reg_val = 0;
718
719 /* prepare xfer direction and duplex mode */
720 if (mdata->dev_comp->ipm_design) {
721 if (!xfer->tx_buf || !xfer->rx_buf) {
722 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
723 if (xfer->rx_buf)
724 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
725 }
726 writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
727 }
728
cae15788
YY
729 if (host->can_dma(host, spi, xfer))
730 return mtk_spi_dma_transfer(host, spi, xfer);
a568231f 731 else
cae15788 732 return mtk_spi_fifo_transfer(host, spi, xfer);
a568231f
LL
733}
734
cae15788 735static bool mtk_spi_can_dma(struct spi_controller *host,
a568231f
LL
736 struct spi_device *spi,
737 struct spi_transfer *xfer)
738{
1ce24864
DK
739 /* Buffers for DMA transactions must be 4-byte aligned */
740 return (xfer->len > MTK_SPI_MAX_FIFO_SIZE &&
741 (unsigned long)xfer->tx_buf % 4 == 0 &&
742 (unsigned long)xfer->rx_buf % 4 == 0);
a568231f
LL
743}
744
58a984c7
LL
745static int mtk_spi_setup(struct spi_device *spi)
746{
cae15788 747 struct mtk_spi *mdata = spi_controller_get_devdata(spi->controller);
58a984c7
LL
748
749 if (!spi->controller_data)
750 spi->controller_data = (void *)&mtk_default_chip_info;
751
9e264f3f 752 if (mdata->dev_comp->need_pad_sel && spi_get_csgpiod(spi, 0))
1a5a87d5 753 /* CS de-asserted, gpiolib will handle inversion */
9e264f3f 754 gpiod_direction_output(spi_get_csgpiod(spi, 0), 0);
37457607 755
58a984c7
LL
756 return 0;
757}
758
5972eb05 759static irqreturn_t mtk_spi_interrupt_thread(int irq, void *dev_id)
a568231f 760{
00bca73b 761 u32 cmd, reg_val, cnt, remainder, len;
cae15788
YY
762 struct spi_controller *host = dev_id;
763 struct mtk_spi *mdata = spi_controller_get_devdata(host);
10402419 764 struct spi_transfer *xfer = mdata->cur_transfer;
a568231f 765
10402419
FS
766 if (!host->can_dma(host, NULL, xfer)) {
767 if (xfer->rx_buf) {
de327e49 768 cnt = mdata->xfer_len / 4;
44f636da 769 ioread32_rep(mdata->base + SPI_RX_DATA_REG,
10402419 770 xfer->rx_buf + mdata->num_xfered, cnt);
de327e49
NB
771 remainder = mdata->xfer_len % 4;
772 if (remainder > 0) {
773 reg_val = readl(mdata->base + SPI_RX_DATA_REG);
10402419 774 memcpy(xfer->rx_buf + (cnt * 4) + mdata->num_xfered,
00bca73b
PS
775 &reg_val,
776 remainder);
de327e49 777 }
a568231f 778 }
1ce24864 779
00bca73b 780 mdata->num_xfered += mdata->xfer_len;
10402419 781 if (mdata->num_xfered == xfer->len) {
cae15788 782 spi_finalize_current_transfer(host);
1ce24864
DK
783 return IRQ_HANDLED;
784 }
785
10402419 786 len = xfer->len - mdata->num_xfered;
00bca73b 787 mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
cae15788 788 mtk_spi_setup_packet(host);
1ce24864 789
10402419 790 if (xfer->tx_buf) {
a20ad450
FS
791 cnt = mdata->xfer_len / 4;
792 iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
10402419 793 xfer->tx_buf + mdata->num_xfered, cnt);
1ce24864 794
a20ad450
FS
795 remainder = mdata->xfer_len % 4;
796 if (remainder > 0) {
797 reg_val = 0;
798 memcpy(&reg_val,
10402419 799 xfer->tx_buf + (cnt * 4) + mdata->num_xfered,
a20ad450
FS
800 remainder);
801 writel(reg_val, mdata->base + SPI_TX_DATA_REG);
802 }
1ce24864
DK
803 }
804
cae15788 805 mtk_spi_enable_transfer(host);
1ce24864 806
a568231f
LL
807 return IRQ_HANDLED;
808 }
809
810 if (mdata->tx_sgl)
10402419 811 xfer->tx_dma += mdata->xfer_len;
a568231f 812 if (mdata->rx_sgl)
10402419 813 xfer->rx_dma += mdata->xfer_len;
a568231f
LL
814
815 if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) {
816 mdata->tx_sgl = sg_next(mdata->tx_sgl);
817 if (mdata->tx_sgl) {
10402419 818 xfer->tx_dma = sg_dma_address(mdata->tx_sgl);
a568231f
LL
819 mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
820 }
821 }
822 if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) {
823 mdata->rx_sgl = sg_next(mdata->rx_sgl);
824 if (mdata->rx_sgl) {
10402419 825 xfer->rx_dma = sg_dma_address(mdata->rx_sgl);
a568231f
LL
826 mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
827 }
828 }
829
830 if (!mdata->tx_sgl && !mdata->rx_sgl) {
831 /* spi disable dma */
832 cmd = readl(mdata->base + SPI_CMD_REG);
833 cmd &= ~SPI_CMD_TX_DMA;
834 cmd &= ~SPI_CMD_RX_DMA;
835 writel(cmd, mdata->base + SPI_CMD_REG);
836
cae15788 837 spi_finalize_current_transfer(host);
a568231f
LL
838 return IRQ_HANDLED;
839 }
840
cae15788
YY
841 mtk_spi_update_mdata_len(host);
842 mtk_spi_setup_packet(host);
10402419 843 mtk_spi_setup_dma_addr(host, xfer);
cae15788 844 mtk_spi_enable_transfer(host);
a568231f
LL
845
846 return IRQ_HANDLED;
847}
848
5972eb05
ADR
849static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
850{
851 struct spi_controller *host = dev_id;
852 struct mtk_spi *mdata = spi_controller_get_devdata(host);
853 u32 reg_val;
854
855 reg_val = readl(mdata->base + SPI_STATUS0_REG);
856 if (reg_val & MTK_SPI_PAUSE_INT_STATUS)
857 mdata->state = MTK_SPI_PAUSED;
858 else
859 mdata->state = MTK_SPI_IDLE;
860
861 /* SPI-MEM ops */
862 if (mdata->use_spimem) {
863 complete(&mdata->spimem_done);
864 return IRQ_HANDLED;
865 }
866
867 return IRQ_WAKE_THREAD;
868}
869
9f763fd2
LL
870static int mtk_spi_mem_adjust_op_size(struct spi_mem *mem,
871 struct spi_mem_op *op)
872{
873 int opcode_len;
874
875 if (op->data.dir != SPI_MEM_NO_DATA) {
876 opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
877 if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
878 op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE - opcode_len;
879 /* force data buffer dma-aligned. */
880 op->data.nbytes -= op->data.nbytes % 4;
881 }
882 }
883
884 return 0;
885}
886
887static bool mtk_spi_mem_supports_op(struct spi_mem *mem,
888 const struct spi_mem_op *op)
889{
890 if (!spi_mem_default_supports_op(mem, op))
891 return false;
892
893 if (op->addr.nbytes && op->dummy.nbytes &&
894 op->addr.buswidth != op->dummy.buswidth)
895 return false;
896
897 if (op->addr.nbytes + op->dummy.nbytes > 16)
898 return false;
899
900 if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
901 if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
902 MTK_SPI_IPM_PACKET_LOOP ||
903 op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
904 return false;
905 }
906
907 return true;
908}
909
cae15788 910static void mtk_spi_mem_setup_dma_xfer(struct spi_controller *host,
9f763fd2
LL
911 const struct spi_mem_op *op)
912{
cae15788 913 struct mtk_spi *mdata = spi_controller_get_devdata(host);
9f763fd2
LL
914
915 writel((u32)(mdata->tx_dma & MTK_SPI_32BITS_MASK),
916 mdata->base + SPI_TX_SRC_REG);
917#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
918 if (mdata->dev_comp->dma_ext)
919 writel((u32)(mdata->tx_dma >> 32),
920 mdata->base + SPI_TX_SRC_REG_64);
921#endif
922
923 if (op->data.dir == SPI_MEM_DATA_IN) {
924 writel((u32)(mdata->rx_dma & MTK_SPI_32BITS_MASK),
925 mdata->base + SPI_RX_DST_REG);
926#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
927 if (mdata->dev_comp->dma_ext)
928 writel((u32)(mdata->rx_dma >> 32),
929 mdata->base + SPI_RX_DST_REG_64);
930#endif
931 }
932}
933
934static int mtk_spi_transfer_wait(struct spi_mem *mem,
935 const struct spi_mem_op *op)
936{
cae15788 937 struct mtk_spi *mdata = spi_controller_get_devdata(mem->spi->controller);
9f763fd2
LL
938 /*
939 * For each byte we wait for 8 cycles of the SPI clock.
940 * Since speed is defined in Hz and we want milliseconds,
941 * so it should be 8 * 1000.
942 */
943 u64 ms = 8000LL;
944
945 if (op->data.dir == SPI_MEM_NO_DATA)
946 ms *= 32; /* prevent we may get 0 for short transfers. */
947 else
948 ms *= op->data.nbytes;
949 ms = div_u64(ms, mem->spi->max_speed_hz);
950 ms += ms + 1000; /* 1s tolerance */
951
952 if (ms > UINT_MAX)
953 ms = UINT_MAX;
954
955 if (!wait_for_completion_timeout(&mdata->spimem_done,
956 msecs_to_jiffies(ms))) {
957 dev_err(mdata->dev, "spi-mem transfer timeout\n");
958 return -ETIMEDOUT;
959 }
960
961 return 0;
962}
963
964static int mtk_spi_mem_exec_op(struct spi_mem *mem,
965 const struct spi_mem_op *op)
966{
cae15788 967 struct mtk_spi *mdata = spi_controller_get_devdata(mem->spi->controller);
9f763fd2
LL
968 u32 reg_val, nio, tx_size;
969 char *tx_tmp_buf, *rx_tmp_buf;
970 int ret = 0;
971
972 mdata->use_spimem = true;
973 reinit_completion(&mdata->spimem_done);
974
975 mtk_spi_reset(mdata);
cae15788 976 mtk_spi_hw_init(mem->spi->controller, mem->spi);
13fd04b5 977 mtk_spi_prepare_transfer(mem->spi->controller, op->max_freq);
9f763fd2
LL
978
979 reg_val = readl(mdata->base + SPI_CFG3_IPM_REG);
980 /* opcode byte len */
981 reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
982 reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
983
984 /* addr & dummy byte len */
985 reg_val &= ~SPI_CFG3_IPM_ADDR_BYTELEN_MASK;
986 if (op->addr.nbytes || op->dummy.nbytes)
987 reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
988 SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
989
990 /* data byte len */
991 if (op->data.dir == SPI_MEM_NO_DATA) {
992 reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
993 writel(0, mdata->base + SPI_CFG1_REG);
994 } else {
995 reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
996 mdata->xfer_len = op->data.nbytes;
cae15788 997 mtk_spi_setup_packet(mem->spi->controller);
9f763fd2
LL
998 }
999
1000 if (op->addr.nbytes || op->dummy.nbytes) {
1001 if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
1002 reg_val |= SPI_CFG3_IPM_XMODE_EN;
1003 else
1004 reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
1005 }
1006
1007 if (op->addr.buswidth == 2 ||
1008 op->dummy.buswidth == 2 ||
1009 op->data.buswidth == 2)
1010 nio = 2;
1011 else if (op->addr.buswidth == 4 ||
1012 op->dummy.buswidth == 4 ||
1013 op->data.buswidth == 4)
1014 nio = 4;
1015 else
1016 nio = 1;
1017
1018 reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
1019 reg_val |= PIN_MODE_CFG(nio);
1020
1021 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
1022 if (op->data.dir == SPI_MEM_DATA_IN)
1023 reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
1024 else
1025 reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
1026 writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
1027
1028 tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
1029 if (op->data.dir == SPI_MEM_DATA_OUT)
1030 tx_size += op->data.nbytes;
1031
1032 tx_size = max_t(u32, tx_size, 32);
1033
1034 tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL | GFP_DMA);
1035 if (!tx_tmp_buf) {
1036 mdata->use_spimem = false;
1037 return -ENOMEM;
1038 }
1039
1040 tx_tmp_buf[0] = op->cmd.opcode;
1041
1042 if (op->addr.nbytes) {
1043 int i;
1044
1045 for (i = 0; i < op->addr.nbytes; i++)
1046 tx_tmp_buf[i + 1] = op->addr.val >>
1047 (8 * (op->addr.nbytes - i - 1));
1048 }
1049
1050 if (op->dummy.nbytes)
1051 memset(tx_tmp_buf + op->addr.nbytes + 1,
1052 0xff,
1053 op->dummy.nbytes);
1054
1055 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
1056 memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
1057 op->data.buf.out,
1058 op->data.nbytes);
1059
1060 mdata->tx_dma = dma_map_single(mdata->dev, tx_tmp_buf,
1061 tx_size, DMA_TO_DEVICE);
1062 if (dma_mapping_error(mdata->dev, mdata->tx_dma)) {
1063 ret = -ENOMEM;
1064 goto err_exit;
1065 }
1066
1067 if (op->data.dir == SPI_MEM_DATA_IN) {
1068 if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
1069 rx_tmp_buf = kzalloc(op->data.nbytes,
1070 GFP_KERNEL | GFP_DMA);
1071 if (!rx_tmp_buf) {
1072 ret = -ENOMEM;
1073 goto unmap_tx_dma;
1074 }
1075 } else {
1076 rx_tmp_buf = op->data.buf.in;
1077 }
1078
1079 mdata->rx_dma = dma_map_single(mdata->dev,
1080 rx_tmp_buf,
1081 op->data.nbytes,
1082 DMA_FROM_DEVICE);
1083 if (dma_mapping_error(mdata->dev, mdata->rx_dma)) {
1084 ret = -ENOMEM;
1085 goto kfree_rx_tmp_buf;
1086 }
1087 }
1088
1089 reg_val = readl(mdata->base + SPI_CMD_REG);
1090 reg_val |= SPI_CMD_TX_DMA;
1091 if (op->data.dir == SPI_MEM_DATA_IN)
1092 reg_val |= SPI_CMD_RX_DMA;
1093 writel(reg_val, mdata->base + SPI_CMD_REG);
1094
cae15788 1095 mtk_spi_mem_setup_dma_xfer(mem->spi->controller, op);
9f763fd2 1096
cae15788 1097 mtk_spi_enable_transfer(mem->spi->controller);
9f763fd2
LL
1098
1099 /* Wait for the interrupt. */
1100 ret = mtk_spi_transfer_wait(mem, op);
1101 if (ret)
1102 goto unmap_rx_dma;
1103
1104 /* spi disable dma */
1105 reg_val = readl(mdata->base + SPI_CMD_REG);
1106 reg_val &= ~SPI_CMD_TX_DMA;
1107 if (op->data.dir == SPI_MEM_DATA_IN)
1108 reg_val &= ~SPI_CMD_RX_DMA;
1109 writel(reg_val, mdata->base + SPI_CMD_REG);
1110
1111unmap_rx_dma:
1112 if (op->data.dir == SPI_MEM_DATA_IN) {
1113 dma_unmap_single(mdata->dev, mdata->rx_dma,
1114 op->data.nbytes, DMA_FROM_DEVICE);
1115 if (!IS_ALIGNED((size_t)op->data.buf.in, 4))
1116 memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
1117 }
1118kfree_rx_tmp_buf:
1119 if (op->data.dir == SPI_MEM_DATA_IN &&
1120 !IS_ALIGNED((size_t)op->data.buf.in, 4))
1121 kfree(rx_tmp_buf);
1122unmap_tx_dma:
1123 dma_unmap_single(mdata->dev, mdata->tx_dma,
1124 tx_size, DMA_TO_DEVICE);
1125err_exit:
1126 kfree(tx_tmp_buf);
1127 mdata->use_spimem = false;
1128
1129 return ret;
1130}
1131
1132static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
1133 .adjust_op_size = mtk_spi_mem_adjust_op_size,
1134 .supports_op = mtk_spi_mem_supports_op,
1135 .exec_op = mtk_spi_mem_exec_op,
1136};
1137
13fd04b5
MR
1138static const struct spi_controller_mem_caps mtk_spi_mem_caps = {
1139 .per_op_freq = true,
1140};
1141
a568231f
LL
1142static int mtk_spi_probe(struct platform_device *pdev)
1143{
6b444058 1144 struct device *dev = &pdev->dev;
cae15788 1145 struct spi_controller *host;
a568231f 1146 struct mtk_spi *mdata;
fdeae8f5 1147 int i, irq, ret, addr_bits;
a568231f 1148
cae15788
YY
1149 host = devm_spi_alloc_host(dev, sizeof(*mdata));
1150 if (!host)
1151 return dev_err_probe(dev, -ENOMEM, "failed to alloc spi host\n");
a568231f 1152
cae15788
YY
1153 host->auto_runtime_pm = true;
1154 host->dev.of_node = dev->of_node;
1155 host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
a568231f 1156
cae15788
YY
1157 host->set_cs = mtk_spi_set_cs;
1158 host->prepare_message = mtk_spi_prepare_message;
632556d5 1159 host->unprepare_message = mtk_spi_unprepare_message;
cae15788
YY
1160 host->transfer_one = mtk_spi_transfer_one;
1161 host->can_dma = mtk_spi_can_dma;
1162 host->setup = mtk_spi_setup;
1163 host->set_cs_timing = mtk_spi_set_hw_cs_timing;
1164 host->use_gpio_descriptors = true;
a568231f 1165
cae15788 1166 mdata = spi_controller_get_devdata(host);
6b444058 1167 mdata->dev_comp = device_get_match_data(dev);
ae7c2d34
LX
1168
1169 if (mdata->dev_comp->enhance_timing)
cae15788 1170 host->mode_bits |= SPI_CS_HIGH;
ae7c2d34 1171
a568231f 1172 if (mdata->dev_comp->must_tx)
cae15788 1173 host->flags = SPI_CONTROLLER_MUST_TX;
7e963fb2 1174 if (mdata->dev_comp->ipm_design)
cae15788
YY
1175 host->mode_bits |= SPI_LOOP | SPI_RX_DUAL | SPI_TX_DUAL |
1176 SPI_RX_QUAD | SPI_TX_QUAD;
a568231f 1177
9f763fd2 1178 if (mdata->dev_comp->ipm_design) {
6b444058 1179 mdata->dev = dev;
cae15788 1180 host->mem_ops = &mtk_spi_mem_ops;
13fd04b5 1181 host->mem_caps = &mtk_spi_mem_caps;
9f763fd2
LL
1182 init_completion(&mdata->spimem_done);
1183 }
1184
a568231f 1185 if (mdata->dev_comp->need_pad_sel) {
6b444058 1186 mdata->pad_num = of_property_count_u32_elems(dev->of_node,
37457607 1187 "mediatek,pad-select");
20cdbb80
ADR
1188 if (mdata->pad_num < 0)
1189 return dev_err_probe(dev, -EINVAL,
37457607 1190 "No 'mediatek,pad-select' property\n");
a568231f 1191
6b444058 1192 mdata->pad_sel = devm_kmalloc_array(dev, mdata->pad_num,
37457607 1193 sizeof(u32), GFP_KERNEL);
ace14580
ADR
1194 if (!mdata->pad_sel)
1195 return -ENOMEM;
37457607
LL
1196
1197 for (i = 0; i < mdata->pad_num; i++) {
6b444058 1198 of_property_read_u32_index(dev->of_node,
37457607
LL
1199 "mediatek,pad-select",
1200 i, &mdata->pad_sel[i]);
20cdbb80
ADR
1201 if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL)
1202 return dev_err_probe(dev, -EINVAL,
1203 "wrong pad-sel[%d]: %u\n",
1204 i, mdata->pad_sel[i]);
37457607 1205 }
a568231f
LL
1206 }
1207
cae15788 1208 platform_set_drvdata(pdev, host);
5dd381e7 1209 mdata->base = devm_platform_ioremap_resource(pdev, 0);
ace14580
ADR
1210 if (IS_ERR(mdata->base))
1211 return PTR_ERR(mdata->base);
a568231f
LL
1212
1213 irq = platform_get_irq(pdev, 0);
ace14580
ADR
1214 if (irq < 0)
1215 return irq;
a568231f 1216
6b444058
ADR
1217 if (!dev->dma_mask)
1218 dev->dma_mask = &dev->coherent_dma_mask;
a568231f 1219
309e9854 1220 if (mdata->dev_comp->ipm_design)
1221 dma_set_max_seg_size(dev, SZ_16M);
1222 else
1223 dma_set_max_seg_size(dev, SZ_256K);
1224
6b444058 1225 mdata->parent_clk = devm_clk_get(dev, "parent-clk");
20cdbb80
ADR
1226 if (IS_ERR(mdata->parent_clk))
1227 return dev_err_probe(dev, PTR_ERR(mdata->parent_clk),
1228 "failed to get parent-clk\n");
a568231f 1229
6b444058 1230 mdata->sel_clk = devm_clk_get(dev, "sel-clk");
20cdbb80
ADR
1231 if (IS_ERR(mdata->sel_clk))
1232 return dev_err_probe(dev, PTR_ERR(mdata->sel_clk), "failed to get sel-clk\n");
a568231f 1233
6b444058 1234 mdata->spi_clk = devm_clk_get(dev, "spi-clk");
20cdbb80
ADR
1235 if (IS_ERR(mdata->spi_clk))
1236 return dev_err_probe(dev, PTR_ERR(mdata->spi_clk), "failed to get spi-clk\n");
a568231f 1237
6b444058 1238 mdata->spi_hclk = devm_clk_get_optional(dev, "hclk");
20cdbb80
ADR
1239 if (IS_ERR(mdata->spi_hclk))
1240 return dev_err_probe(dev, PTR_ERR(mdata->spi_hclk), "failed to get hclk\n");
a740f4e6 1241
5dee8bb8 1242 ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
20cdbb80
ADR
1243 if (ret < 0)
1244 return dev_err_probe(dev, ret, "failed to clk_set_parent\n");
5dee8bb8 1245
a740f4e6 1246 ret = clk_prepare_enable(mdata->spi_hclk);
20cdbb80
ADR
1247 if (ret < 0)
1248 return dev_err_probe(dev, ret, "failed to enable hclk\n");
a740f4e6 1249
a568231f
LL
1250 ret = clk_prepare_enable(mdata->spi_clk);
1251 if (ret < 0) {
5dee8bb8 1252 clk_disable_unprepare(mdata->spi_hclk);
20cdbb80 1253 return dev_err_probe(dev, ret, "failed to enable spi_clk\n");
a568231f
LL
1254 }
1255
162a31ef
MZ
1256 mdata->spi_clk_hz = clk_get_rate(mdata->spi_clk);
1257
a740f4e6 1258 if (mdata->dev_comp->no_need_unprepare) {
162a31ef 1259 clk_disable(mdata->spi_clk);
a740f4e6
LL
1260 clk_disable(mdata->spi_hclk);
1261 } else {
162a31ef 1262 clk_disable_unprepare(mdata->spi_clk);
a740f4e6
LL
1263 clk_disable_unprepare(mdata->spi_hclk);
1264 }
a568231f 1265
632556d5
LL
1266 cpu_latency_qos_add_request(&mdata->qos_request, PM_QOS_DEFAULT_VALUE);
1267
37457607 1268 if (mdata->dev_comp->need_pad_sel) {
cae15788 1269 if (mdata->pad_num != host->num_chipselect)
20cdbb80 1270 return dev_err_probe(dev, -EINVAL,
37457607 1271 "pad_num does not match num_chipselect(%d != %d)\n",
cae15788 1272 mdata->pad_num, host->num_chipselect);
37457607 1273
cae15788 1274 if (!host->cs_gpiods && host->num_chipselect > 1)
20cdbb80 1275 return dev_err_probe(dev, -EINVAL,
98c8dccf 1276 "cs_gpios not specified and num_chipselect > 1\n");
37457607
LL
1277 }
1278
fdeae8f5 1279 if (mdata->dev_comp->dma_ext)
1280 addr_bits = DMA_ADDR_EXT_BITS;
1281 else
1282 addr_bits = DMA_ADDR_DEF_BITS;
6b444058 1283 ret = dma_set_mask(dev, DMA_BIT_MASK(addr_bits));
fdeae8f5 1284 if (ret)
6b444058 1285 dev_notice(dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
fdeae8f5 1286 addr_bits, ret);
1287
5972eb05
ADR
1288 ret = devm_request_threaded_irq(dev, irq, mtk_spi_interrupt,
1289 mtk_spi_interrupt_thread,
1290 IRQF_TRIGGER_NONE, dev_name(dev), host);
b24cded8
RR
1291 if (ret)
1292 return dev_err_probe(dev, ret, "failed to register irq\n");
1293
5088b313
ADR
1294 pm_runtime_enable(dev);
1295
cae15788 1296 ret = devm_spi_register_controller(dev, host);
c934fec1 1297 if (ret) {
5088b313 1298 pm_runtime_disable(dev);
cae15788 1299 return dev_err_probe(dev, ret, "failed to register host\n");
c934fec1
MZ
1300 }
1301
a568231f 1302 return 0;
a568231f
LL
1303}
1304
df7e4719 1305static void mtk_spi_remove(struct platform_device *pdev)
a568231f 1306{
cae15788
YY
1307 struct spi_controller *host = platform_get_drvdata(pdev);
1308 struct mtk_spi *mdata = spi_controller_get_devdata(host);
0d10e90c 1309 int ret;
a568231f 1310
632556d5 1311 cpu_latency_qos_remove_request(&mdata->qos_request);
4be47a5d
DG
1312 if (mdata->use_spimem && !completion_done(&mdata->spimem_done))
1313 complete(&mdata->spimem_done);
1314
22f40727
UKK
1315 ret = pm_runtime_get_sync(&pdev->dev);
1316 if (ret < 0) {
1317 dev_warn(&pdev->dev, "Failed to resume hardware (%pe)\n", ERR_PTR(ret));
1318 } else {
1319 /*
1320 * If pm runtime resume failed, clks are disabled and
1321 * unprepared. So don't access the hardware and skip clk
1322 * unpreparing.
1323 */
1324 mtk_spi_reset(mdata);
a568231f 1325
22f40727
UKK
1326 if (mdata->dev_comp->no_need_unprepare) {
1327 clk_unprepare(mdata->spi_clk);
1328 clk_unprepare(mdata->spi_hclk);
1329 }
a740f4e6 1330 }
162a31ef 1331
0d10e90c
ZL
1332 pm_runtime_put_noidle(&pdev->dev);
1333 pm_runtime_disable(&pdev->dev);
a568231f
LL
1334}
1335
1336#ifdef CONFIG_PM_SLEEP
1337static int mtk_spi_suspend(struct device *dev)
1338{
1339 int ret;
cae15788
YY
1340 struct spi_controller *host = dev_get_drvdata(dev);
1341 struct mtk_spi *mdata = spi_controller_get_devdata(host);
a568231f 1342
cae15788 1343 ret = spi_controller_suspend(host);
a568231f
LL
1344 if (ret)
1345 return ret;
1346
a740f4e6 1347 if (!pm_runtime_suspended(dev)) {
a568231f 1348 clk_disable_unprepare(mdata->spi_clk);
a740f4e6
LL
1349 clk_disable_unprepare(mdata->spi_hclk);
1350 }
a568231f 1351
4247d7f2
RZ
1352 pinctrl_pm_select_sleep_state(dev);
1353
6f089e98 1354 return 0;
a568231f
LL
1355}
1356
1357static int mtk_spi_resume(struct device *dev)
1358{
1359 int ret;
cae15788
YY
1360 struct spi_controller *host = dev_get_drvdata(dev);
1361 struct mtk_spi *mdata = spi_controller_get_devdata(host);
a568231f 1362
4247d7f2
RZ
1363 pinctrl_pm_select_default_state(dev);
1364
a568231f
LL
1365 if (!pm_runtime_suspended(dev)) {
1366 ret = clk_prepare_enable(mdata->spi_clk);
13da5a0b
LL
1367 if (ret < 0) {
1368 dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
a568231f 1369 return ret;
13da5a0b 1370 }
a740f4e6
LL
1371
1372 ret = clk_prepare_enable(mdata->spi_hclk);
1373 if (ret < 0) {
1374 dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
1375 clk_disable_unprepare(mdata->spi_clk);
1376 return ret;
1377 }
a568231f
LL
1378 }
1379
cae15788 1380 ret = spi_controller_resume(host);
a740f4e6 1381 if (ret < 0) {
a568231f 1382 clk_disable_unprepare(mdata->spi_clk);
a740f4e6
LL
1383 clk_disable_unprepare(mdata->spi_hclk);
1384 }
a568231f
LL
1385
1386 return ret;
1387}
1388#endif /* CONFIG_PM_SLEEP */
1389
1390#ifdef CONFIG_PM
1391static int mtk_spi_runtime_suspend(struct device *dev)
1392{
cae15788
YY
1393 struct spi_controller *host = dev_get_drvdata(dev);
1394 struct mtk_spi *mdata = spi_controller_get_devdata(host);
a568231f 1395
a740f4e6 1396 if (mdata->dev_comp->no_need_unprepare) {
162a31ef 1397 clk_disable(mdata->spi_clk);
a740f4e6
LL
1398 clk_disable(mdata->spi_hclk);
1399 } else {
162a31ef 1400 clk_disable_unprepare(mdata->spi_clk);
a740f4e6
LL
1401 clk_disable_unprepare(mdata->spi_hclk);
1402 }
a568231f
LL
1403
1404 return 0;
1405}
1406
1407static int mtk_spi_runtime_resume(struct device *dev)
1408{
cae15788
YY
1409 struct spi_controller *host = dev_get_drvdata(dev);
1410 struct mtk_spi *mdata = spi_controller_get_devdata(host);
13da5a0b
LL
1411 int ret;
1412
a740f4e6 1413 if (mdata->dev_comp->no_need_unprepare) {
162a31ef 1414 ret = clk_enable(mdata->spi_clk);
a740f4e6
LL
1415 if (ret < 0) {
1416 dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
1417 return ret;
1418 }
1419 ret = clk_enable(mdata->spi_hclk);
1420 if (ret < 0) {
1421 dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
1422 clk_disable(mdata->spi_clk);
1423 return ret;
1424 }
1425 } else {
162a31ef 1426 ret = clk_prepare_enable(mdata->spi_clk);
a740f4e6
LL
1427 if (ret < 0) {
1428 dev_err(dev, "failed to prepare_enable spi_clk (%d)\n", ret);
1429 return ret;
1430 }
1431
1432 ret = clk_prepare_enable(mdata->spi_hclk);
1433 if (ret < 0) {
1434 dev_err(dev, "failed to prepare_enable spi_hclk (%d)\n", ret);
1435 clk_disable_unprepare(mdata->spi_clk);
1436 return ret;
1437 }
13da5a0b 1438 }
a568231f 1439
13da5a0b 1440 return 0;
a568231f
LL
1441}
1442#endif /* CONFIG_PM */
1443
1444static const struct dev_pm_ops mtk_spi_pm = {
1445 SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume)
1446 SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend,
1447 mtk_spi_runtime_resume, NULL)
1448};
1449
4299aaaa 1450static struct platform_driver mtk_spi_driver = {
a568231f
LL
1451 .driver = {
1452 .name = "mtk-spi",
1453 .pm = &mtk_spi_pm,
1454 .of_match_table = mtk_spi_of_match,
1455 },
1456 .probe = mtk_spi_probe,
494c3dc4 1457 .remove = mtk_spi_remove,
a568231f
LL
1458};
1459
1460module_platform_driver(mtk_spi_driver);
1461
1462MODULE_DESCRIPTION("MTK SPI Controller driver");
1463MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
1464MODULE_LICENSE("GPL v2");
e4001885 1465MODULE_ALIAS("platform:mtk-spi");