1 // SPDX-License-Identifier: GPL-2.0
3 // Mediatek SPI NOR controller driver
5 // Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com>
7 #include <linux/bits.h>
9 #include <linux/completion.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
13 #include <linux/iopoll.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/of_device.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/spi/spi.h>
19 #include <linux/spi/spi-mem.h>
20 #include <linux/string.h>
22 #define DRIVER_NAME "mtk-spi-nor"
24 #define MTK_NOR_REG_CMD 0x00
25 #define MTK_NOR_CMD_WRITE BIT(4)
26 #define MTK_NOR_CMD_PROGRAM BIT(2)
27 #define MTK_NOR_CMD_READ BIT(0)
28 #define MTK_NOR_CMD_MASK GENMASK(5, 0)
30 #define MTK_NOR_REG_PRG_CNT 0x04
31 #define MTK_NOR_PRG_CNT_MAX 56
32 #define MTK_NOR_REG_RDATA 0x0c
34 #define MTK_NOR_REG_RADR0 0x10
35 #define MTK_NOR_REG_RADR(n) (MTK_NOR_REG_RADR0 + 4 * (n))
36 #define MTK_NOR_REG_RADR3 0xc8
38 #define MTK_NOR_REG_WDATA 0x1c
40 #define MTK_NOR_REG_PRGDATA0 0x20
41 #define MTK_NOR_REG_PRGDATA(n) (MTK_NOR_REG_PRGDATA0 + 4 * (n))
42 #define MTK_NOR_REG_PRGDATA_MAX 5
44 #define MTK_NOR_REG_SHIFT0 0x38
45 #define MTK_NOR_REG_SHIFT(n) (MTK_NOR_REG_SHIFT0 + 4 * (n))
46 #define MTK_NOR_REG_SHIFT_MAX 9
48 #define MTK_NOR_REG_CFG1 0x60
49 #define MTK_NOR_FAST_READ BIT(0)
51 #define MTK_NOR_REG_CFG2 0x64
52 #define MTK_NOR_WR_CUSTOM_OP_EN BIT(4)
53 #define MTK_NOR_WR_BUF_EN BIT(0)
55 #define MTK_NOR_REG_PP_DATA 0x98
57 #define MTK_NOR_REG_IRQ_STAT 0xa8
58 #define MTK_NOR_REG_IRQ_EN 0xac
59 #define MTK_NOR_IRQ_DMA BIT(7)
60 #define MTK_NOR_IRQ_MASK GENMASK(7, 0)
62 #define MTK_NOR_REG_CFG3 0xb4
63 #define MTK_NOR_DISABLE_WREN BIT(7)
64 #define MTK_NOR_DISABLE_SR_POLL BIT(5)
66 #define MTK_NOR_REG_WP 0xc4
67 #define MTK_NOR_ENABLE_SF_CMD 0x30
69 #define MTK_NOR_REG_BUSCFG 0xcc
70 #define MTK_NOR_4B_ADDR BIT(4)
71 #define MTK_NOR_QUAD_ADDR BIT(3)
72 #define MTK_NOR_QUAD_READ BIT(2)
73 #define MTK_NOR_DUAL_ADDR BIT(1)
74 #define MTK_NOR_DUAL_READ BIT(0)
75 #define MTK_NOR_BUS_MODE_MASK GENMASK(4, 0)
77 #define MTK_NOR_REG_DMA_CTL 0x718
78 #define MTK_NOR_DMA_START BIT(0)
80 #define MTK_NOR_REG_DMA_FADR 0x71c
81 #define MTK_NOR_REG_DMA_DADR 0x720
82 #define MTK_NOR_REG_DMA_END_DADR 0x724
83 #define MTK_NOR_REG_DMA_DADR_HB 0x738
84 #define MTK_NOR_REG_DMA_END_DADR_HB 0x73c
86 #define MTK_NOR_PRG_MAX_SIZE 6
87 // Reading DMA src/dst addresses have to be 16-byte aligned
88 #define MTK_NOR_DMA_ALIGN 16
89 #define MTK_NOR_DMA_ALIGN_MASK (MTK_NOR_DMA_ALIGN - 1)
90 // and we allocate a bounce buffer if destination address isn't aligned.
91 #define MTK_NOR_BOUNCE_BUF_SIZE PAGE_SIZE
93 // Buffered page program can do one 128-byte transfer
94 #define MTK_NOR_PP_SIZE 128
96 #define CLK_TO_US(sp, clkcnt) DIV_ROUND_UP(clkcnt, sp->spi_freq / 1000000)
99 struct spi_controller *ctlr;
103 dma_addr_t buffer_dma;
105 struct clk *ctlr_clk;
106 unsigned int spi_freq;
110 struct completion op_done;
113 static inline void mtk_nor_rmw(struct mtk_nor *sp, u32 reg, u32 set, u32 clr)
115 u32 val = readl(sp->base + reg);
119 writel(val, sp->base + reg);
122 static inline int mtk_nor_cmd_exec(struct mtk_nor *sp, u32 cmd, ulong clk)
124 ulong delay = CLK_TO_US(sp, clk);
128 writel(cmd, sp->base + MTK_NOR_REG_CMD);
129 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CMD, reg, !(reg & cmd),
130 delay / 3, (delay + 1) * 200);
132 dev_err(sp->dev, "command %u timeout.\n", cmd);
136 static void mtk_nor_set_addr(struct mtk_nor *sp, const struct spi_mem_op *op)
138 u32 addr = op->addr.val;
141 for (i = 0; i < 3; i++) {
142 writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR(i));
145 if (op->addr.nbytes == 4) {
146 writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR3);
147 mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, MTK_NOR_4B_ADDR, 0);
149 mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, 0, MTK_NOR_4B_ADDR);
153 static bool need_bounce(struct mtk_nor *sp, const struct spi_mem_op *op)
155 return ((uintptr_t)op->data.buf.in & MTK_NOR_DMA_ALIGN_MASK);
158 static bool mtk_nor_match_read(const struct spi_mem_op *op)
162 if (op->dummy.buswidth)
163 dummy = op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth;
165 if ((op->data.buswidth == 2) || (op->data.buswidth == 4)) {
166 if (op->addr.buswidth == 1)
168 else if (op->addr.buswidth == 2)
170 else if (op->addr.buswidth == 4)
172 } else if ((op->addr.buswidth == 1) && (op->data.buswidth == 1)) {
173 if (op->cmd.opcode == 0x03)
175 else if (op->cmd.opcode == 0x0b)
181 static bool mtk_nor_match_prg(const struct spi_mem_op *op)
183 int tx_len, rx_len, prg_len, prg_left;
185 // prg mode is spi-only.
186 if ((op->cmd.buswidth > 1) || (op->addr.buswidth > 1) ||
187 (op->dummy.buswidth > 1) || (op->data.buswidth > 1))
190 tx_len = op->cmd.nbytes + op->addr.nbytes;
192 if (op->data.dir == SPI_MEM_DATA_OUT) {
193 // count dummy bytes only if we need to write data after it
194 tx_len += op->dummy.nbytes;
196 // leave at least one byte for data
197 if (tx_len > MTK_NOR_REG_PRGDATA_MAX)
200 // if there's no addr, meaning adjust_op_size is impossible,
201 // check data length as well.
202 if ((!op->addr.nbytes) &&
203 (tx_len + op->data.nbytes > MTK_NOR_REG_PRGDATA_MAX + 1))
205 } else if (op->data.dir == SPI_MEM_DATA_IN) {
206 if (tx_len > MTK_NOR_REG_PRGDATA_MAX + 1)
209 rx_len = op->data.nbytes;
210 prg_left = MTK_NOR_PRG_CNT_MAX / 8 - tx_len - op->dummy.nbytes;
211 if (prg_left > MTK_NOR_REG_SHIFT_MAX + 1)
212 prg_left = MTK_NOR_REG_SHIFT_MAX + 1;
213 if (rx_len > prg_left) {
214 if (!op->addr.nbytes)
219 prg_len = tx_len + op->dummy.nbytes + rx_len;
220 if (prg_len > MTK_NOR_PRG_CNT_MAX / 8)
223 prg_len = tx_len + op->dummy.nbytes;
224 if (prg_len > MTK_NOR_PRG_CNT_MAX / 8)
230 static void mtk_nor_adj_prg_size(struct spi_mem_op *op)
232 int tx_len, tx_left, prg_left;
234 tx_len = op->cmd.nbytes + op->addr.nbytes;
235 if (op->data.dir == SPI_MEM_DATA_OUT) {
236 tx_len += op->dummy.nbytes;
237 tx_left = MTK_NOR_REG_PRGDATA_MAX + 1 - tx_len;
238 if (op->data.nbytes > tx_left)
239 op->data.nbytes = tx_left;
240 } else if (op->data.dir == SPI_MEM_DATA_IN) {
241 prg_left = MTK_NOR_PRG_CNT_MAX / 8 - tx_len - op->dummy.nbytes;
242 if (prg_left > MTK_NOR_REG_SHIFT_MAX + 1)
243 prg_left = MTK_NOR_REG_SHIFT_MAX + 1;
244 if (op->data.nbytes > prg_left)
245 op->data.nbytes = prg_left;
249 static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
251 struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master);
253 if (!op->data.nbytes)
256 if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
257 if ((op->data.dir == SPI_MEM_DATA_IN) &&
258 mtk_nor_match_read(op)) {
259 // limit size to prevent timeout calculation overflow
260 if (op->data.nbytes > 0x400000)
261 op->data.nbytes = 0x400000;
263 if ((op->addr.val & MTK_NOR_DMA_ALIGN_MASK) ||
264 (op->data.nbytes < MTK_NOR_DMA_ALIGN))
266 else if (!need_bounce(sp, op))
267 op->data.nbytes &= ~MTK_NOR_DMA_ALIGN_MASK;
268 else if (op->data.nbytes > MTK_NOR_BOUNCE_BUF_SIZE)
269 op->data.nbytes = MTK_NOR_BOUNCE_BUF_SIZE;
271 } else if (op->data.dir == SPI_MEM_DATA_OUT) {
272 if (op->data.nbytes >= MTK_NOR_PP_SIZE)
273 op->data.nbytes = MTK_NOR_PP_SIZE;
280 mtk_nor_adj_prg_size(op);
284 static bool mtk_nor_supports_op(struct spi_mem *mem,
285 const struct spi_mem_op *op)
287 if (!spi_mem_default_supports_op(mem, op))
290 if (op->cmd.buswidth != 1)
293 if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
294 switch(op->data.dir) {
295 case SPI_MEM_DATA_IN:
296 if (mtk_nor_match_read(op))
299 case SPI_MEM_DATA_OUT:
300 if ((op->addr.buswidth == 1) &&
301 (op->dummy.nbytes == 0) &&
302 (op->data.buswidth == 1))
310 return mtk_nor_match_prg(op);
313 static void mtk_nor_setup_bus(struct mtk_nor *sp, const struct spi_mem_op *op)
317 if (op->addr.nbytes == 4)
318 reg |= MTK_NOR_4B_ADDR;
320 if (op->data.buswidth == 4) {
321 reg |= MTK_NOR_QUAD_READ;
322 writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(4));
323 if (op->addr.buswidth == 4)
324 reg |= MTK_NOR_QUAD_ADDR;
325 } else if (op->data.buswidth == 2) {
326 reg |= MTK_NOR_DUAL_READ;
327 writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(3));
328 if (op->addr.buswidth == 2)
329 reg |= MTK_NOR_DUAL_ADDR;
331 if (op->cmd.opcode == 0x0b)
332 mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, MTK_NOR_FAST_READ, 0);
334 mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, 0, MTK_NOR_FAST_READ);
336 mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, reg, MTK_NOR_BUS_MODE_MASK);
339 static int mtk_nor_dma_exec(struct mtk_nor *sp, u32 from, unsigned int length,
346 writel(from, sp->base + MTK_NOR_REG_DMA_FADR);
347 writel(dma_addr, sp->base + MTK_NOR_REG_DMA_DADR);
348 writel(dma_addr + length, sp->base + MTK_NOR_REG_DMA_END_DADR);
351 writel(upper_32_bits(dma_addr),
352 sp->base + MTK_NOR_REG_DMA_DADR_HB);
353 writel(upper_32_bits(dma_addr + length),
354 sp->base + MTK_NOR_REG_DMA_END_DADR_HB);
358 reinit_completion(&sp->op_done);
359 mtk_nor_rmw(sp, MTK_NOR_REG_IRQ_EN, MTK_NOR_IRQ_DMA, 0);
362 mtk_nor_rmw(sp, MTK_NOR_REG_DMA_CTL, MTK_NOR_DMA_START, 0);
364 delay = CLK_TO_US(sp, (length + 5) * BITS_PER_BYTE);
367 if (!wait_for_completion_timeout(&sp->op_done,
371 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_DMA_CTL, reg,
372 !(reg & MTK_NOR_DMA_START), delay / 3,
377 dev_err(sp->dev, "dma read timeout.\n");
382 static int mtk_nor_read_bounce(struct mtk_nor *sp, const struct spi_mem_op *op)
387 if (op->data.nbytes & MTK_NOR_DMA_ALIGN_MASK)
388 rdlen = (op->data.nbytes + MTK_NOR_DMA_ALIGN) & ~MTK_NOR_DMA_ALIGN_MASK;
390 rdlen = op->data.nbytes;
392 ret = mtk_nor_dma_exec(sp, op->addr.val, rdlen, sp->buffer_dma);
395 memcpy(op->data.buf.in, sp->buffer, op->data.nbytes);
400 static int mtk_nor_read_dma(struct mtk_nor *sp, const struct spi_mem_op *op)
405 if (need_bounce(sp, op))
406 return mtk_nor_read_bounce(sp, op);
408 dma_addr = dma_map_single(sp->dev, op->data.buf.in,
409 op->data.nbytes, DMA_FROM_DEVICE);
411 if (dma_mapping_error(sp->dev, dma_addr))
414 ret = mtk_nor_dma_exec(sp, op->addr.val, op->data.nbytes, dma_addr);
416 dma_unmap_single(sp->dev, dma_addr, op->data.nbytes, DMA_FROM_DEVICE);
421 static int mtk_nor_read_pio(struct mtk_nor *sp, const struct spi_mem_op *op)
423 u8 *buf = op->data.buf.in;
426 ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_READ, 6 * BITS_PER_BYTE);
428 buf[0] = readb(sp->base + MTK_NOR_REG_RDATA);
432 static int mtk_nor_write_buffer_enable(struct mtk_nor *sp)
440 val = readl(sp->base + MTK_NOR_REG_CFG2);
441 writel(val | MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
442 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
443 val & MTK_NOR_WR_BUF_EN, 0, 10000);
449 static int mtk_nor_write_buffer_disable(struct mtk_nor *sp)
456 val = readl(sp->base + MTK_NOR_REG_CFG2);
457 writel(val & ~MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
458 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
459 !(val & MTK_NOR_WR_BUF_EN), 0, 10000);
465 static int mtk_nor_pp_buffered(struct mtk_nor *sp, const struct spi_mem_op *op)
467 const u8 *buf = op->data.buf.out;
471 ret = mtk_nor_write_buffer_enable(sp);
475 for (i = 0; i < op->data.nbytes; i += 4) {
476 val = buf[i + 3] << 24 | buf[i + 2] << 16 | buf[i + 1] << 8 |
478 writel(val, sp->base + MTK_NOR_REG_PP_DATA);
480 return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE,
481 (op->data.nbytes + 5) * BITS_PER_BYTE);
484 static int mtk_nor_pp_unbuffered(struct mtk_nor *sp,
485 const struct spi_mem_op *op)
487 const u8 *buf = op->data.buf.out;
490 ret = mtk_nor_write_buffer_disable(sp);
493 writeb(buf[0], sp->base + MTK_NOR_REG_WDATA);
494 return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE);
497 static int mtk_nor_spi_mem_prg(struct mtk_nor *sp, const struct spi_mem_op *op)
500 int reg_offset = MTK_NOR_REG_PRGDATA_MAX;
506 tx_len = op->cmd.nbytes + op->addr.nbytes;
508 // count dummy bytes only if we need to write data after it
509 if (op->data.dir == SPI_MEM_DATA_OUT)
510 tx_len += op->dummy.nbytes + op->data.nbytes;
511 else if (op->data.dir == SPI_MEM_DATA_IN)
512 rx_len = op->data.nbytes;
514 prg_len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes +
517 // an invalid op may reach here if the caller calls exec_op without
518 // adjust_op_size. return -EINVAL instead of -ENOTSUPP so that
519 // spi-mem won't try this op again with generic spi transfers.
520 if ((tx_len > MTK_NOR_REG_PRGDATA_MAX + 1) ||
521 (rx_len > MTK_NOR_REG_SHIFT_MAX + 1) ||
522 (prg_len > MTK_NOR_PRG_CNT_MAX / 8))
526 for (i = op->cmd.nbytes; i > 0; i--, reg_offset--) {
527 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
528 bufbyte = (op->cmd.opcode >> ((i - 1) * BITS_PER_BYTE)) & 0xff;
529 writeb(bufbyte, reg);
532 for (i = op->addr.nbytes; i > 0; i--, reg_offset--) {
533 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
534 bufbyte = (op->addr.val >> ((i - 1) * BITS_PER_BYTE)) & 0xff;
535 writeb(bufbyte, reg);
538 if (op->data.dir == SPI_MEM_DATA_OUT) {
539 for (i = 0; i < op->dummy.nbytes; i++, reg_offset--) {
540 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
544 for (i = 0; i < op->data.nbytes; i++, reg_offset--) {
545 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
546 writeb(((const u8 *)(op->data.buf.out))[i], reg);
550 for (; reg_offset >= 0; reg_offset--) {
551 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
556 writel(prg_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT);
557 ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM,
558 prg_len * BITS_PER_BYTE);
564 if (op->data.dir == SPI_MEM_DATA_IN) {
565 for (i = op->data.nbytes - 1; i >= 0; i--, reg_offset++) {
566 reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset);
567 ((u8 *)(op->data.buf.in))[i] = readb(reg);
574 static int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
576 struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master);
579 if ((op->data.nbytes == 0) ||
580 ((op->addr.nbytes != 3) && (op->addr.nbytes != 4)))
581 return mtk_nor_spi_mem_prg(sp, op);
583 if (op->data.dir == SPI_MEM_DATA_OUT) {
584 mtk_nor_set_addr(sp, op);
585 writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA0);
586 if (op->data.nbytes == MTK_NOR_PP_SIZE)
587 return mtk_nor_pp_buffered(sp, op);
588 return mtk_nor_pp_unbuffered(sp, op);
591 if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op)) {
592 ret = mtk_nor_write_buffer_disable(sp);
595 mtk_nor_setup_bus(sp, op);
596 if (op->data.nbytes == 1) {
597 mtk_nor_set_addr(sp, op);
598 return mtk_nor_read_pio(sp, op);
600 return mtk_nor_read_dma(sp, op);
604 return mtk_nor_spi_mem_prg(sp, op);
607 static int mtk_nor_setup(struct spi_device *spi)
609 struct mtk_nor *sp = spi_controller_get_devdata(spi->master);
611 if (spi->max_speed_hz && (spi->max_speed_hz < sp->spi_freq)) {
612 dev_err(&spi->dev, "spi clock should be %u Hz.\n",
616 spi->max_speed_hz = sp->spi_freq;
621 static int mtk_nor_transfer_one_message(struct spi_controller *master,
622 struct spi_message *m)
624 struct mtk_nor *sp = spi_controller_get_devdata(master);
625 struct spi_transfer *t = NULL;
626 unsigned long trx_len = 0;
628 int reg_offset = MTK_NOR_REG_PRGDATA_MAX;
634 list_for_each_entry(t, &m->transfers, transfer_list) {
636 for (i = 0; i < t->len; i++, reg_offset--) {
637 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
639 writeb(txbuf[i], reg);
646 writel(trx_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT);
648 stat = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM,
649 trx_len * BITS_PER_BYTE);
653 reg_offset = trx_len - 1;
654 list_for_each_entry(t, &m->transfers, transfer_list) {
656 for (i = 0; i < t->len; i++, reg_offset--) {
657 reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset);
659 rxbuf[i] = readb(reg);
663 m->actual_length = trx_len;
666 spi_finalize_current_message(master);
671 static void mtk_nor_disable_clk(struct mtk_nor *sp)
673 clk_disable_unprepare(sp->spi_clk);
674 clk_disable_unprepare(sp->ctlr_clk);
677 static int mtk_nor_enable_clk(struct mtk_nor *sp)
681 ret = clk_prepare_enable(sp->spi_clk);
685 ret = clk_prepare_enable(sp->ctlr_clk);
687 clk_disable_unprepare(sp->spi_clk);
694 static void mtk_nor_init(struct mtk_nor *sp)
696 writel(0, sp->base + MTK_NOR_REG_IRQ_EN);
697 writel(MTK_NOR_IRQ_MASK, sp->base + MTK_NOR_REG_IRQ_STAT);
699 writel(MTK_NOR_ENABLE_SF_CMD, sp->base + MTK_NOR_REG_WP);
700 mtk_nor_rmw(sp, MTK_NOR_REG_CFG2, MTK_NOR_WR_CUSTOM_OP_EN, 0);
701 mtk_nor_rmw(sp, MTK_NOR_REG_CFG3,
702 MTK_NOR_DISABLE_WREN | MTK_NOR_DISABLE_SR_POLL, 0);
705 static irqreturn_t mtk_nor_irq_handler(int irq, void *data)
707 struct mtk_nor *sp = data;
708 u32 irq_status, irq_enabled;
710 irq_status = readl(sp->base + MTK_NOR_REG_IRQ_STAT);
711 irq_enabled = readl(sp->base + MTK_NOR_REG_IRQ_EN);
712 // write status back to clear interrupt
713 writel(irq_status, sp->base + MTK_NOR_REG_IRQ_STAT);
715 if (!(irq_status & irq_enabled))
718 if (irq_status & MTK_NOR_IRQ_DMA) {
719 complete(&sp->op_done);
720 writel(0, sp->base + MTK_NOR_REG_IRQ_EN);
726 static size_t mtk_max_msg_size(struct spi_device *spi)
728 return MTK_NOR_PRG_MAX_SIZE;
731 static const struct spi_controller_mem_ops mtk_nor_mem_ops = {
732 .adjust_op_size = mtk_nor_adjust_op_size,
733 .supports_op = mtk_nor_supports_op,
734 .exec_op = mtk_nor_exec_op
737 static const struct of_device_id mtk_nor_match[] = {
738 { .compatible = "mediatek,mt8192-nor", .data = (void *)36 },
739 { .compatible = "mediatek,mt8173-nor", .data = (void *)32 },
742 MODULE_DEVICE_TABLE(of, mtk_nor_match);
744 static int mtk_nor_probe(struct platform_device *pdev)
746 struct spi_controller *ctlr;
749 struct clk *spi_clk, *ctlr_clk;
751 unsigned long dma_bits;
753 base = devm_platform_ioremap_resource(pdev, 0);
755 return PTR_ERR(base);
757 spi_clk = devm_clk_get(&pdev->dev, "spi");
759 return PTR_ERR(spi_clk);
761 ctlr_clk = devm_clk_get(&pdev->dev, "sf");
762 if (IS_ERR(ctlr_clk))
763 return PTR_ERR(ctlr_clk);
765 dma_bits = (unsigned long)of_device_get_match_data(&pdev->dev);
766 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_bits))) {
767 dev_err(&pdev->dev, "failed to set dma mask(%lu)\n", dma_bits);
771 ctlr = spi_alloc_master(&pdev->dev, sizeof(*sp));
773 dev_err(&pdev->dev, "failed to allocate spi controller\n");
777 ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
778 ctlr->dev.of_node = pdev->dev.of_node;
779 ctlr->max_message_size = mtk_max_msg_size;
780 ctlr->mem_ops = &mtk_nor_mem_ops;
781 ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
782 ctlr->num_chipselect = 1;
783 ctlr->setup = mtk_nor_setup;
784 ctlr->transfer_one_message = mtk_nor_transfer_one_message;
785 ctlr->auto_runtime_pm = true;
787 dev_set_drvdata(&pdev->dev, ctlr);
789 sp = spi_controller_get_devdata(ctlr);
794 sp->dev = &pdev->dev;
795 sp->spi_clk = spi_clk;
796 sp->ctlr_clk = ctlr_clk;
797 sp->high_dma = (dma_bits > 32);
798 sp->buffer = dmam_alloc_coherent(&pdev->dev,
799 MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN,
800 &sp->buffer_dma, GFP_KERNEL);
804 if ((uintptr_t)sp->buffer & MTK_NOR_DMA_ALIGN_MASK) {
805 dev_err(sp->dev, "misaligned allocation of internal buffer.\n");
809 ret = mtk_nor_enable_clk(sp);
813 sp->spi_freq = clk_get_rate(sp->spi_clk);
817 irq = platform_get_irq_optional(pdev, 0);
820 dev_warn(sp->dev, "IRQ not available.");
822 ret = devm_request_irq(sp->dev, irq, mtk_nor_irq_handler, 0,
825 dev_warn(sp->dev, "failed to request IRQ.");
827 init_completion(&sp->op_done);
832 pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
833 pm_runtime_use_autosuspend(&pdev->dev);
834 pm_runtime_set_active(&pdev->dev);
835 pm_runtime_enable(&pdev->dev);
836 pm_runtime_get_noresume(&pdev->dev);
838 ret = devm_spi_register_controller(&pdev->dev, ctlr);
842 pm_runtime_mark_last_busy(&pdev->dev);
843 pm_runtime_put_autosuspend(&pdev->dev);
845 dev_info(&pdev->dev, "spi frequency: %d Hz\n", sp->spi_freq);
850 pm_runtime_disable(&pdev->dev);
851 pm_runtime_set_suspended(&pdev->dev);
852 pm_runtime_dont_use_autosuspend(&pdev->dev);
854 mtk_nor_disable_clk(sp);
859 static int mtk_nor_remove(struct platform_device *pdev)
861 struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev);
862 struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
864 pm_runtime_disable(&pdev->dev);
865 pm_runtime_set_suspended(&pdev->dev);
866 pm_runtime_dont_use_autosuspend(&pdev->dev);
868 mtk_nor_disable_clk(sp);
873 static int __maybe_unused mtk_nor_runtime_suspend(struct device *dev)
875 struct spi_controller *ctlr = dev_get_drvdata(dev);
876 struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
878 mtk_nor_disable_clk(sp);
883 static int __maybe_unused mtk_nor_runtime_resume(struct device *dev)
885 struct spi_controller *ctlr = dev_get_drvdata(dev);
886 struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
888 return mtk_nor_enable_clk(sp);
891 static int __maybe_unused mtk_nor_suspend(struct device *dev)
893 return pm_runtime_force_suspend(dev);
896 static int __maybe_unused mtk_nor_resume(struct device *dev)
898 return pm_runtime_force_resume(dev);
901 static const struct dev_pm_ops mtk_nor_pm_ops = {
902 SET_RUNTIME_PM_OPS(mtk_nor_runtime_suspend,
903 mtk_nor_runtime_resume, NULL)
904 SET_SYSTEM_SLEEP_PM_OPS(mtk_nor_suspend, mtk_nor_resume)
907 static struct platform_driver mtk_nor_driver = {
910 .of_match_table = mtk_nor_match,
911 .pm = &mtk_nor_pm_ops,
913 .probe = mtk_nor_probe,
914 .remove = mtk_nor_remove,
917 module_platform_driver(mtk_nor_driver);
919 MODULE_DESCRIPTION("Mediatek SPI NOR controller driver");
920 MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>");
921 MODULE_LICENSE("GPL v2");
922 MODULE_ALIAS("platform:" DRIVER_NAME);