Commit | Line | Data |
---|---|---|
764f1b74 CG |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // | |
3 | // Driver for the SPI-NAND mode of Mediatek NAND Flash Interface | |
4 | // | |
5 | // Copyright (c) 2022 Chuanhong Guo <gch981213@gmail.com> | |
6 | // | |
7 | // This driver is based on the SPI-NAND mtd driver from Mediatek SDK: | |
8 | // | |
9 | // Copyright (C) 2020 MediaTek Inc. | |
10 | // Author: Weijie Gao <weijie.gao@mediatek.com> | |
11 | // | |
12 | // This controller organize the page data as several interleaved sectors | |
13 | // like the following: (sizeof(FDM + ECC) = snf->nfi_cfg.spare_size) | |
14 | // +---------+------+------+---------+------+------+-----+ | |
15 | // | Sector1 | FDM1 | ECC1 | Sector2 | FDM2 | ECC2 | ... | | |
16 | // +---------+------+------+---------+------+------+-----+ | |
17 | // With auto-format turned on, DMA only returns this part: | |
18 | // +---------+---------+-----+ | |
19 | // | Sector1 | Sector2 | ... | | |
20 | // +---------+---------+-----+ | |
21 | // The FDM data will be filled to the registers, and ECC parity data isn't | |
22 | // accessible. | |
23 | // With auto-format off, all ((Sector+FDM+ECC)*nsectors) will be read over DMA | |
24 | // in it's original order shown in the first table. ECC can't be turned on when | |
25 | // auto-format is off. | |
26 | // | |
27 | // However, Linux SPI-NAND driver expects the data returned as: | |
28 | // +------+-----+ | |
29 | // | Page | OOB | | |
30 | // +------+-----+ | |
31 | // where the page data is continuously stored instead of interleaved. | |
32 | // So we assume all instructions matching the page_op template between ECC | |
33 | // prepare_io_req and finish_io_req are for page cache r/w. | |
34 | // Here's how this spi-mem driver operates when reading: | |
35 | // 1. Always set snf->autofmt = true in prepare_io_req (even when ECC is off). | |
36 | // 2. Perform page ops and let the controller fill the DMA bounce buffer with | |
37 | // de-interleaved sector data and set FDM registers. | |
38 | // 3. Return the data as: | |
39 | // +---------+---------+-----+------+------+-----+ | |
40 | // | Sector1 | Sector2 | ... | FDM1 | FDM2 | ... | | |
41 | // +---------+---------+-----+------+------+-----+ | |
42 | // 4. For other matching spi_mem ops outside a prepare/finish_io_req pair, | |
43 | // read the data with auto-format off into the bounce buffer and copy | |
44 | // needed data to the buffer specified in the request. | |
45 | // | |
46 | // Write requests operates in a similar manner. | |
47 | // As a limitation of this strategy, we won't be able to access any ECC parity | |
48 | // data at all in Linux. | |
49 | // | |
50 | // Here's the bad block mark situation on MTK chips: | |
51 | // In older chips like mt7622, MTK uses the first FDM byte in the first sector | |
52 | // as the bad block mark. After de-interleaving, this byte appears at [pagesize] | |
53 | // in the returned data, which is the BBM position expected by kernel. However, | |
54 | // the conventional bad block mark is the first byte of the OOB, which is part | |
55 | // of the last sector data in the interleaved layout. Instead of fixing their | |
56 | // hardware, MTK decided to address this inconsistency in software. On these | |
57 | // later chips, the BootROM expects the following: | |
58 | // 1. The [pagesize] byte on a nand page is used as BBM, which will appear at | |
59 | // (page_size - (nsectors - 1) * spare_size) in the DMA buffer. | |
60 | // 2. The original byte stored at that position in the DMA buffer will be stored | |
61 | // as the first byte of the FDM section in the last sector. | |
62 | // We can't disagree with the BootROM, so after de-interleaving, we need to | |
63 | // perform the following swaps in read: | |
64 | // 1. Store the BBM at [page_size - (nsectors - 1) * spare_size] to [page_size], | |
65 | // which is the expected BBM position by kernel. | |
66 | // 2. Store the page data byte at [pagesize + (nsectors-1) * fdm] back to | |
67 | // [page_size - (nsectors - 1) * spare_size] | |
68 | // Similarly, when writing, we need to perform swaps in the other direction. | |
69 | ||
70 | #include <linux/kernel.h> | |
71 | #include <linux/module.h> | |
72 | #include <linux/init.h> | |
73 | #include <linux/device.h> | |
74 | #include <linux/mutex.h> | |
75 | #include <linux/clk.h> | |
76 | #include <linux/interrupt.h> | |
77 | #include <linux/dma-mapping.h> | |
78 | #include <linux/iopoll.h> | |
79 | #include <linux/of_platform.h> | |
80 | #include <linux/mtd/nand-ecc-mtk.h> | |
81 | #include <linux/spi/spi.h> | |
82 | #include <linux/spi/spi-mem.h> | |
83 | #include <linux/mtd/nand.h> | |
84 | ||
85 | // NFI registers | |
86 | #define NFI_CNFG 0x000 | |
87 | #define CNFG_OP_MODE_S 12 | |
88 | #define CNFG_OP_MODE_CUST 6 | |
89 | #define CNFG_OP_MODE_PROGRAM 3 | |
90 | #define CNFG_AUTO_FMT_EN BIT(9) | |
91 | #define CNFG_HW_ECC_EN BIT(8) | |
92 | #define CNFG_DMA_BURST_EN BIT(2) | |
93 | #define CNFG_READ_MODE BIT(1) | |
94 | #define CNFG_DMA_MODE BIT(0) | |
95 | ||
96 | #define NFI_PAGEFMT 0x0004 | |
97 | #define NFI_SPARE_SIZE_LS_S 16 | |
98 | #define NFI_FDM_ECC_NUM_S 12 | |
99 | #define NFI_FDM_NUM_S 8 | |
100 | #define NFI_SPARE_SIZE_S 4 | |
101 | #define NFI_SEC_SEL_512 BIT(2) | |
102 | #define NFI_PAGE_SIZE_S 0 | |
103 | #define NFI_PAGE_SIZE_512_2K 0 | |
104 | #define NFI_PAGE_SIZE_2K_4K 1 | |
105 | #define NFI_PAGE_SIZE_4K_8K 2 | |
106 | #define NFI_PAGE_SIZE_8K_16K 3 | |
107 | ||
108 | #define NFI_CON 0x008 | |
109 | #define CON_SEC_NUM_S 12 | |
110 | #define CON_BWR BIT(9) | |
111 | #define CON_BRD BIT(8) | |
112 | #define CON_NFI_RST BIT(1) | |
113 | #define CON_FIFO_FLUSH BIT(0) | |
114 | ||
115 | #define NFI_INTR_EN 0x010 | |
116 | #define NFI_INTR_STA 0x014 | |
117 | #define NFI_IRQ_INTR_EN BIT(31) | |
118 | #define NFI_IRQ_CUS_READ BIT(8) | |
119 | #define NFI_IRQ_CUS_PG BIT(7) | |
120 | ||
121 | #define NFI_CMD 0x020 | |
122 | #define NFI_CMD_DUMMY_READ 0x00 | |
123 | #define NFI_CMD_DUMMY_WRITE 0x80 | |
124 | ||
125 | #define NFI_STRDATA 0x040 | |
126 | #define STR_DATA BIT(0) | |
127 | ||
128 | #define NFI_STA 0x060 | |
7073888c XH |
129 | #define NFI_NAND_FSM_7622 GENMASK(28, 24) |
130 | #define NFI_NAND_FSM_7986 GENMASK(29, 23) | |
764f1b74 CG |
131 | #define NFI_FSM GENMASK(19, 16) |
132 | #define READ_EMPTY BIT(12) | |
133 | ||
134 | #define NFI_FIFOSTA 0x064 | |
135 | #define FIFO_WR_REMAIN_S 8 | |
136 | #define FIFO_RD_REMAIN_S 0 | |
137 | ||
138 | #define NFI_ADDRCNTR 0x070 | |
139 | #define SEC_CNTR GENMASK(16, 12) | |
140 | #define SEC_CNTR_S 12 | |
141 | #define NFI_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S) | |
142 | ||
143 | #define NFI_STRADDR 0x080 | |
144 | ||
145 | #define NFI_BYTELEN 0x084 | |
146 | #define BUS_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S) | |
147 | ||
148 | #define NFI_FDM0L 0x0a0 | |
149 | #define NFI_FDM0M 0x0a4 | |
150 | #define NFI_FDML(n) (NFI_FDM0L + (n)*8) | |
151 | #define NFI_FDMM(n) (NFI_FDM0M + (n)*8) | |
152 | ||
153 | #define NFI_DEBUG_CON1 0x220 | |
154 | #define WBUF_EN BIT(2) | |
155 | ||
156 | #define NFI_MASTERSTA 0x224 | |
157 | #define MAS_ADDR GENMASK(11, 9) | |
158 | #define MAS_RD GENMASK(8, 6) | |
159 | #define MAS_WR GENMASK(5, 3) | |
160 | #define MAS_RDDLY GENMASK(2, 0) | |
161 | #define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY) | |
7073888c | 162 | #define NFI_MASTERSTA_MASK_7986 3 |
764f1b74 CG |
163 | |
164 | // SNFI registers | |
165 | #define SNF_MAC_CTL 0x500 | |
166 | #define MAC_XIO_SEL BIT(4) | |
167 | #define SF_MAC_EN BIT(3) | |
168 | #define SF_TRIG BIT(2) | |
169 | #define WIP_READY BIT(1) | |
170 | #define WIP BIT(0) | |
171 | ||
172 | #define SNF_MAC_OUTL 0x504 | |
173 | #define SNF_MAC_INL 0x508 | |
174 | ||
175 | #define SNF_RD_CTL2 0x510 | |
176 | #define DATA_READ_DUMMY_S 8 | |
177 | #define DATA_READ_MAX_DUMMY 0xf | |
178 | #define DATA_READ_CMD_S 0 | |
179 | ||
180 | #define SNF_RD_CTL3 0x514 | |
181 | ||
182 | #define SNF_PG_CTL1 0x524 | |
183 | #define PG_LOAD_CMD_S 8 | |
184 | ||
185 | #define SNF_PG_CTL2 0x528 | |
186 | ||
187 | #define SNF_MISC_CTL 0x538 | |
188 | #define SW_RST BIT(28) | |
189 | #define FIFO_RD_LTC_S 25 | |
190 | #define PG_LOAD_X4_EN BIT(20) | |
191 | #define DATA_READ_MODE_S 16 | |
192 | #define DATA_READ_MODE GENMASK(18, 16) | |
193 | #define DATA_READ_MODE_X1 0 | |
194 | #define DATA_READ_MODE_X2 1 | |
195 | #define DATA_READ_MODE_X4 2 | |
196 | #define DATA_READ_MODE_DUAL 5 | |
197 | #define DATA_READ_MODE_QUAD 6 | |
198 | #define PG_LOAD_CUSTOM_EN BIT(7) | |
199 | #define DATARD_CUSTOM_EN BIT(6) | |
200 | #define CS_DESELECT_CYC_S 0 | |
201 | ||
202 | #define SNF_MISC_CTL2 0x53c | |
203 | #define PROGRAM_LOAD_BYTE_NUM_S 16 | |
204 | #define READ_DATA_BYTE_NUM_S 11 | |
205 | ||
206 | #define SNF_DLY_CTL3 0x548 | |
207 | #define SFCK_SAM_DLY_S 0 | |
208 | ||
209 | #define SNF_STA_CTL1 0x550 | |
210 | #define CUS_PG_DONE BIT(28) | |
211 | #define CUS_READ_DONE BIT(27) | |
212 | #define SPI_STATE_S 0 | |
213 | #define SPI_STATE GENMASK(3, 0) | |
214 | ||
215 | #define SNF_CFG 0x55c | |
216 | #define SPI_MODE BIT(0) | |
217 | ||
218 | #define SNF_GPRAM 0x800 | |
219 | #define SNF_GPRAM_SIZE 0xa0 | |
220 | ||
221 | #define SNFI_POLL_INTERVAL 1000000 | |
222 | ||
223 | static const u8 mt7622_spare_sizes[] = { 16, 26, 27, 28 }; | |
224 | ||
7073888c XH |
225 | static const u8 mt7986_spare_sizes[] = { |
226 | 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64, 67, | |
227 | 74 | |
228 | }; | |
229 | ||
764f1b74 CG |
230 | struct mtk_snand_caps { |
231 | u16 sector_size; | |
232 | u16 max_sectors; | |
233 | u16 fdm_size; | |
234 | u16 fdm_ecc_size; | |
235 | u16 fifo_size; | |
236 | ||
237 | bool bbm_swap; | |
238 | bool empty_page_check; | |
239 | u32 mastersta_mask; | |
7073888c | 240 | u32 nandfsm_mask; |
764f1b74 CG |
241 | |
242 | const u8 *spare_sizes; | |
243 | u32 num_spare_size; | |
244 | }; | |
245 | ||
246 | static const struct mtk_snand_caps mt7622_snand_caps = { | |
247 | .sector_size = 512, | |
248 | .max_sectors = 8, | |
249 | .fdm_size = 8, | |
250 | .fdm_ecc_size = 1, | |
251 | .fifo_size = 32, | |
252 | .bbm_swap = false, | |
253 | .empty_page_check = false, | |
254 | .mastersta_mask = NFI_MASTERSTA_MASK_7622, | |
7073888c | 255 | .nandfsm_mask = NFI_NAND_FSM_7622, |
764f1b74 CG |
256 | .spare_sizes = mt7622_spare_sizes, |
257 | .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes) | |
258 | }; | |
259 | ||
260 | static const struct mtk_snand_caps mt7629_snand_caps = { | |
261 | .sector_size = 512, | |
262 | .max_sectors = 8, | |
263 | .fdm_size = 8, | |
264 | .fdm_ecc_size = 1, | |
265 | .fifo_size = 32, | |
266 | .bbm_swap = true, | |
267 | .empty_page_check = false, | |
268 | .mastersta_mask = NFI_MASTERSTA_MASK_7622, | |
7073888c | 269 | .nandfsm_mask = NFI_NAND_FSM_7622, |
764f1b74 CG |
270 | .spare_sizes = mt7622_spare_sizes, |
271 | .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes) | |
272 | }; | |
273 | ||
7073888c XH |
274 | static const struct mtk_snand_caps mt7986_snand_caps = { |
275 | .sector_size = 1024, | |
276 | .max_sectors = 8, | |
277 | .fdm_size = 8, | |
278 | .fdm_ecc_size = 1, | |
279 | .fifo_size = 64, | |
280 | .bbm_swap = true, | |
281 | .empty_page_check = true, | |
282 | .mastersta_mask = NFI_MASTERSTA_MASK_7986, | |
283 | .nandfsm_mask = NFI_NAND_FSM_7986, | |
284 | .spare_sizes = mt7986_spare_sizes, | |
285 | .num_spare_size = ARRAY_SIZE(mt7986_spare_sizes) | |
286 | }; | |
287 | ||
764f1b74 CG |
288 | struct mtk_snand_conf { |
289 | size_t page_size; | |
290 | size_t oob_size; | |
291 | u8 nsectors; | |
292 | u8 spare_size; | |
293 | }; | |
294 | ||
295 | struct mtk_snand { | |
296 | struct spi_controller *ctlr; | |
297 | struct device *dev; | |
298 | struct clk *nfi_clk; | |
299 | struct clk *pad_clk; | |
300 | void __iomem *nfi_base; | |
301 | int irq; | |
302 | struct completion op_done; | |
303 | const struct mtk_snand_caps *caps; | |
304 | struct mtk_ecc_config *ecc_cfg; | |
305 | struct mtk_ecc *ecc; | |
306 | struct mtk_snand_conf nfi_cfg; | |
307 | struct mtk_ecc_stats ecc_stats; | |
308 | struct nand_ecc_engine ecc_eng; | |
309 | bool autofmt; | |
310 | u8 *buf; | |
311 | size_t buf_len; | |
312 | }; | |
313 | ||
314 | static struct mtk_snand *nand_to_mtk_snand(struct nand_device *nand) | |
315 | { | |
316 | struct nand_ecc_engine *eng = nand->ecc.engine; | |
317 | ||
318 | return container_of(eng, struct mtk_snand, ecc_eng); | |
319 | } | |
320 | ||
321 | static inline int snand_prepare_bouncebuf(struct mtk_snand *snf, size_t size) | |
322 | { | |
323 | if (snf->buf_len >= size) | |
324 | return 0; | |
325 | kfree(snf->buf); | |
326 | snf->buf = kmalloc(size, GFP_KERNEL); | |
327 | if (!snf->buf) | |
328 | return -ENOMEM; | |
329 | snf->buf_len = size; | |
330 | memset(snf->buf, 0xff, snf->buf_len); | |
331 | return 0; | |
332 | } | |
333 | ||
334 | static inline u32 nfi_read32(struct mtk_snand *snf, u32 reg) | |
335 | { | |
336 | return readl(snf->nfi_base + reg); | |
337 | } | |
338 | ||
339 | static inline void nfi_write32(struct mtk_snand *snf, u32 reg, u32 val) | |
340 | { | |
341 | writel(val, snf->nfi_base + reg); | |
342 | } | |
343 | ||
344 | static inline void nfi_write16(struct mtk_snand *snf, u32 reg, u16 val) | |
345 | { | |
346 | writew(val, snf->nfi_base + reg); | |
347 | } | |
348 | ||
349 | static inline void nfi_rmw32(struct mtk_snand *snf, u32 reg, u32 clr, u32 set) | |
350 | { | |
351 | u32 val; | |
352 | ||
353 | val = readl(snf->nfi_base + reg); | |
354 | val &= ~clr; | |
355 | val |= set; | |
356 | writel(val, snf->nfi_base + reg); | |
357 | } | |
358 | ||
359 | static void nfi_read_data(struct mtk_snand *snf, u32 reg, u8 *data, u32 len) | |
360 | { | |
361 | u32 i, val = 0, es = sizeof(u32); | |
362 | ||
363 | for (i = reg; i < reg + len; i++) { | |
364 | if (i == reg || i % es == 0) | |
365 | val = nfi_read32(snf, i & ~(es - 1)); | |
366 | ||
367 | *data++ = (u8)(val >> (8 * (i % es))); | |
368 | } | |
369 | } | |
370 | ||
371 | static int mtk_nfi_reset(struct mtk_snand *snf) | |
372 | { | |
373 | u32 val, fifo_mask; | |
374 | int ret; | |
375 | ||
376 | nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST); | |
377 | ||
378 | ret = readw_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val, | |
379 | !(val & snf->caps->mastersta_mask), 0, | |
380 | SNFI_POLL_INTERVAL); | |
381 | if (ret) { | |
382 | dev_err(snf->dev, "NFI master is still busy after reset\n"); | |
383 | return ret; | |
384 | } | |
385 | ||
386 | ret = readl_poll_timeout(snf->nfi_base + NFI_STA, val, | |
7073888c | 387 | !(val & (NFI_FSM | snf->caps->nandfsm_mask)), 0, |
764f1b74 CG |
388 | SNFI_POLL_INTERVAL); |
389 | if (ret) { | |
390 | dev_err(snf->dev, "Failed to reset NFI\n"); | |
391 | return ret; | |
392 | } | |
393 | ||
394 | fifo_mask = ((snf->caps->fifo_size - 1) << FIFO_RD_REMAIN_S) | | |
395 | ((snf->caps->fifo_size - 1) << FIFO_WR_REMAIN_S); | |
396 | ret = readw_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val, | |
397 | !(val & fifo_mask), 0, SNFI_POLL_INTERVAL); | |
398 | if (ret) { | |
399 | dev_err(snf->dev, "NFI FIFOs are not empty\n"); | |
400 | return ret; | |
401 | } | |
402 | ||
403 | return 0; | |
404 | } | |
405 | ||
406 | static int mtk_snand_mac_reset(struct mtk_snand *snf) | |
407 | { | |
408 | int ret; | |
409 | u32 val; | |
410 | ||
411 | nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST); | |
412 | ||
413 | ret = readl_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val, | |
414 | !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL); | |
415 | if (ret) | |
416 | dev_err(snf->dev, "Failed to reset SNFI MAC\n"); | |
417 | ||
418 | nfi_write32(snf, SNF_MISC_CTL, | |
419 | (2 << FIFO_RD_LTC_S) | (10 << CS_DESELECT_CYC_S)); | |
420 | ||
421 | return ret; | |
422 | } | |
423 | ||
424 | static int mtk_snand_mac_trigger(struct mtk_snand *snf, u32 outlen, u32 inlen) | |
425 | { | |
426 | int ret; | |
427 | u32 val; | |
428 | ||
429 | nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN); | |
430 | nfi_write32(snf, SNF_MAC_OUTL, outlen); | |
431 | nfi_write32(snf, SNF_MAC_INL, inlen); | |
432 | ||
433 | nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG); | |
434 | ||
435 | ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, | |
436 | val & WIP_READY, 0, SNFI_POLL_INTERVAL); | |
437 | if (ret) { | |
438 | dev_err(snf->dev, "Timed out waiting for WIP_READY\n"); | |
439 | goto cleanup; | |
440 | } | |
441 | ||
442 | ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, !(val & WIP), | |
443 | 0, SNFI_POLL_INTERVAL); | |
444 | if (ret) | |
445 | dev_err(snf->dev, "Timed out waiting for WIP cleared\n"); | |
446 | ||
447 | cleanup: | |
448 | nfi_write32(snf, SNF_MAC_CTL, 0); | |
449 | ||
450 | return ret; | |
451 | } | |
452 | ||
453 | static int mtk_snand_mac_io(struct mtk_snand *snf, const struct spi_mem_op *op) | |
454 | { | |
455 | u32 rx_len = 0; | |
456 | u32 reg_offs = 0; | |
457 | u32 val = 0; | |
458 | const u8 *tx_buf = NULL; | |
459 | u8 *rx_buf = NULL; | |
460 | int i, ret; | |
461 | u8 b; | |
462 | ||
463 | if (op->data.dir == SPI_MEM_DATA_IN) { | |
464 | rx_len = op->data.nbytes; | |
465 | rx_buf = op->data.buf.in; | |
466 | } else { | |
467 | tx_buf = op->data.buf.out; | |
468 | } | |
469 | ||
470 | mtk_snand_mac_reset(snf); | |
471 | ||
472 | for (i = 0; i < op->cmd.nbytes; i++, reg_offs++) { | |
473 | b = (op->cmd.opcode >> ((op->cmd.nbytes - i - 1) * 8)) & 0xff; | |
474 | val |= b << (8 * (reg_offs % 4)); | |
475 | if (reg_offs % 4 == 3) { | |
476 | nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); | |
477 | val = 0; | |
478 | } | |
479 | } | |
480 | ||
481 | for (i = 0; i < op->addr.nbytes; i++, reg_offs++) { | |
482 | b = (op->addr.val >> ((op->addr.nbytes - i - 1) * 8)) & 0xff; | |
483 | val |= b << (8 * (reg_offs % 4)); | |
484 | if (reg_offs % 4 == 3) { | |
485 | nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); | |
486 | val = 0; | |
487 | } | |
488 | } | |
489 | ||
490 | for (i = 0; i < op->dummy.nbytes; i++, reg_offs++) { | |
491 | if (reg_offs % 4 == 3) { | |
492 | nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); | |
493 | val = 0; | |
494 | } | |
495 | } | |
496 | ||
497 | if (op->data.dir == SPI_MEM_DATA_OUT) { | |
498 | for (i = 0; i < op->data.nbytes; i++, reg_offs++) { | |
499 | val |= tx_buf[i] << (8 * (reg_offs % 4)); | |
500 | if (reg_offs % 4 == 3) { | |
501 | nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); | |
502 | val = 0; | |
503 | } | |
504 | } | |
505 | } | |
506 | ||
507 | if (reg_offs % 4) | |
508 | nfi_write32(snf, SNF_GPRAM + (reg_offs & ~3), val); | |
509 | ||
510 | for (i = 0; i < reg_offs; i += 4) | |
511 | dev_dbg(snf->dev, "%d: %08X", i, | |
512 | nfi_read32(snf, SNF_GPRAM + i)); | |
513 | ||
514 | dev_dbg(snf->dev, "SNF TX: %u RX: %u", reg_offs, rx_len); | |
515 | ||
516 | ret = mtk_snand_mac_trigger(snf, reg_offs, rx_len); | |
517 | if (ret) | |
518 | return ret; | |
519 | ||
520 | if (!rx_len) | |
521 | return 0; | |
522 | ||
523 | nfi_read_data(snf, SNF_GPRAM + reg_offs, rx_buf, rx_len); | |
524 | return 0; | |
525 | } | |
526 | ||
527 | static int mtk_snand_setup_pagefmt(struct mtk_snand *snf, u32 page_size, | |
528 | u32 oob_size) | |
529 | { | |
530 | int spare_idx = -1; | |
531 | u32 spare_size, spare_size_shift, pagesize_idx; | |
532 | u32 sector_size_512; | |
533 | u8 nsectors; | |
534 | int i; | |
535 | ||
536 | // skip if it's already configured as required. | |
537 | if (snf->nfi_cfg.page_size == page_size && | |
538 | snf->nfi_cfg.oob_size == oob_size) | |
539 | return 0; | |
540 | ||
541 | nsectors = page_size / snf->caps->sector_size; | |
542 | if (nsectors > snf->caps->max_sectors) { | |
543 | dev_err(snf->dev, "too many sectors required.\n"); | |
544 | goto err; | |
545 | } | |
546 | ||
547 | if (snf->caps->sector_size == 512) { | |
548 | sector_size_512 = NFI_SEC_SEL_512; | |
549 | spare_size_shift = NFI_SPARE_SIZE_S; | |
550 | } else { | |
551 | sector_size_512 = 0; | |
552 | spare_size_shift = NFI_SPARE_SIZE_LS_S; | |
553 | } | |
554 | ||
555 | switch (page_size) { | |
556 | case SZ_512: | |
557 | pagesize_idx = NFI_PAGE_SIZE_512_2K; | |
558 | break; | |
559 | case SZ_2K: | |
560 | if (snf->caps->sector_size == 512) | |
561 | pagesize_idx = NFI_PAGE_SIZE_2K_4K; | |
562 | else | |
563 | pagesize_idx = NFI_PAGE_SIZE_512_2K; | |
564 | break; | |
565 | case SZ_4K: | |
566 | if (snf->caps->sector_size == 512) | |
567 | pagesize_idx = NFI_PAGE_SIZE_4K_8K; | |
568 | else | |
569 | pagesize_idx = NFI_PAGE_SIZE_2K_4K; | |
570 | break; | |
571 | case SZ_8K: | |
572 | if (snf->caps->sector_size == 512) | |
573 | pagesize_idx = NFI_PAGE_SIZE_8K_16K; | |
574 | else | |
575 | pagesize_idx = NFI_PAGE_SIZE_4K_8K; | |
576 | break; | |
577 | case SZ_16K: | |
578 | pagesize_idx = NFI_PAGE_SIZE_8K_16K; | |
579 | break; | |
580 | default: | |
581 | dev_err(snf->dev, "unsupported page size.\n"); | |
582 | goto err; | |
583 | } | |
584 | ||
585 | spare_size = oob_size / nsectors; | |
586 | // If we're using the 1KB sector size, HW will automatically double the | |
587 | // spare size. We should only use half of the value in this case. | |
588 | if (snf->caps->sector_size == 1024) | |
589 | spare_size /= 2; | |
590 | ||
591 | for (i = snf->caps->num_spare_size - 1; i >= 0; i--) { | |
592 | if (snf->caps->spare_sizes[i] <= spare_size) { | |
593 | spare_size = snf->caps->spare_sizes[i]; | |
594 | if (snf->caps->sector_size == 1024) | |
595 | spare_size *= 2; | |
596 | spare_idx = i; | |
597 | break; | |
598 | } | |
599 | } | |
600 | ||
601 | if (spare_idx < 0) { | |
602 | dev_err(snf->dev, "unsupported spare size: %u\n", spare_size); | |
603 | goto err; | |
604 | } | |
605 | ||
606 | nfi_write32(snf, NFI_PAGEFMT, | |
607 | (snf->caps->fdm_ecc_size << NFI_FDM_ECC_NUM_S) | | |
608 | (snf->caps->fdm_size << NFI_FDM_NUM_S) | | |
609 | (spare_idx << spare_size_shift) | | |
610 | (pagesize_idx << NFI_PAGE_SIZE_S) | | |
611 | sector_size_512); | |
612 | ||
613 | snf->nfi_cfg.page_size = page_size; | |
614 | snf->nfi_cfg.oob_size = oob_size; | |
615 | snf->nfi_cfg.nsectors = nsectors; | |
616 | snf->nfi_cfg.spare_size = spare_size; | |
617 | ||
618 | dev_dbg(snf->dev, "page format: (%u + %u) * %u\n", | |
619 | snf->caps->sector_size, spare_size, nsectors); | |
620 | return snand_prepare_bouncebuf(snf, page_size + oob_size); | |
621 | err: | |
622 | dev_err(snf->dev, "page size %u + %u is not supported\n", page_size, | |
623 | oob_size); | |
624 | return -EOPNOTSUPP; | |
625 | } | |
626 | ||
627 | static int mtk_snand_ooblayout_ecc(struct mtd_info *mtd, int section, | |
628 | struct mtd_oob_region *oobecc) | |
629 | { | |
630 | // ECC area is not accessible | |
631 | return -ERANGE; | |
632 | } | |
633 | ||
634 | static int mtk_snand_ooblayout_free(struct mtd_info *mtd, int section, | |
635 | struct mtd_oob_region *oobfree) | |
636 | { | |
637 | struct nand_device *nand = mtd_to_nanddev(mtd); | |
638 | struct mtk_snand *ms = nand_to_mtk_snand(nand); | |
639 | ||
640 | if (section >= ms->nfi_cfg.nsectors) | |
641 | return -ERANGE; | |
642 | ||
643 | oobfree->length = ms->caps->fdm_size - 1; | |
644 | oobfree->offset = section * ms->caps->fdm_size + 1; | |
645 | return 0; | |
646 | } | |
647 | ||
648 | static const struct mtd_ooblayout_ops mtk_snand_ooblayout = { | |
649 | .ecc = mtk_snand_ooblayout_ecc, | |
650 | .free = mtk_snand_ooblayout_free, | |
651 | }; | |
652 | ||
653 | static int mtk_snand_ecc_init_ctx(struct nand_device *nand) | |
654 | { | |
655 | struct mtk_snand *snf = nand_to_mtk_snand(nand); | |
656 | struct nand_ecc_props *conf = &nand->ecc.ctx.conf; | |
657 | struct nand_ecc_props *reqs = &nand->ecc.requirements; | |
658 | struct nand_ecc_props *user = &nand->ecc.user_conf; | |
659 | struct mtd_info *mtd = nanddev_to_mtd(nand); | |
660 | int step_size = 0, strength = 0, desired_correction = 0, steps; | |
661 | bool ecc_user = false; | |
662 | int ret; | |
663 | u32 parity_bits, max_ecc_bytes; | |
664 | struct mtk_ecc_config *ecc_cfg; | |
665 | ||
666 | ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize, | |
667 | nand->memorg.oobsize); | |
668 | if (ret) | |
669 | return ret; | |
670 | ||
671 | ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL); | |
672 | if (!ecc_cfg) | |
673 | return -ENOMEM; | |
674 | ||
675 | nand->ecc.ctx.priv = ecc_cfg; | |
676 | ||
677 | if (user->step_size && user->strength) { | |
678 | step_size = user->step_size; | |
679 | strength = user->strength; | |
680 | ecc_user = true; | |
681 | } else if (reqs->step_size && reqs->strength) { | |
682 | step_size = reqs->step_size; | |
683 | strength = reqs->strength; | |
684 | } | |
685 | ||
686 | if (step_size && strength) { | |
687 | steps = mtd->writesize / step_size; | |
688 | desired_correction = steps * strength; | |
689 | strength = desired_correction / snf->nfi_cfg.nsectors; | |
690 | } | |
691 | ||
692 | ecc_cfg->mode = ECC_NFI_MODE; | |
693 | ecc_cfg->sectors = snf->nfi_cfg.nsectors; | |
694 | ecc_cfg->len = snf->caps->sector_size + snf->caps->fdm_ecc_size; | |
695 | ||
696 | // calculate the max possible strength under current page format | |
697 | parity_bits = mtk_ecc_get_parity_bits(snf->ecc); | |
698 | max_ecc_bytes = snf->nfi_cfg.spare_size - snf->caps->fdm_size; | |
699 | ecc_cfg->strength = max_ecc_bytes * 8 / parity_bits; | |
700 | mtk_ecc_adjust_strength(snf->ecc, &ecc_cfg->strength); | |
701 | ||
702 | // if there's a user requested strength, find the minimum strength that | |
703 | // meets the requirement. Otherwise use the maximum strength which is | |
704 | // expected by BootROM. | |
705 | if (ecc_user && strength) { | |
706 | u32 s_next = ecc_cfg->strength - 1; | |
707 | ||
708 | while (1) { | |
709 | mtk_ecc_adjust_strength(snf->ecc, &s_next); | |
710 | if (s_next >= ecc_cfg->strength) | |
711 | break; | |
712 | if (s_next < strength) | |
713 | break; | |
714 | s_next = ecc_cfg->strength - 1; | |
715 | } | |
716 | } | |
717 | ||
718 | mtd_set_ooblayout(mtd, &mtk_snand_ooblayout); | |
719 | ||
720 | conf->step_size = snf->caps->sector_size; | |
721 | conf->strength = ecc_cfg->strength; | |
722 | ||
723 | if (ecc_cfg->strength < strength) | |
724 | dev_warn(snf->dev, "unable to fulfill ECC of %u bits.\n", | |
725 | strength); | |
726 | dev_info(snf->dev, "ECC strength: %u bits per %u bytes\n", | |
727 | ecc_cfg->strength, snf->caps->sector_size); | |
728 | ||
729 | return 0; | |
730 | } | |
731 | ||
732 | static void mtk_snand_ecc_cleanup_ctx(struct nand_device *nand) | |
733 | { | |
734 | struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand); | |
735 | ||
736 | kfree(ecc_cfg); | |
737 | } | |
738 | ||
739 | static int mtk_snand_ecc_prepare_io_req(struct nand_device *nand, | |
740 | struct nand_page_io_req *req) | |
741 | { | |
742 | struct mtk_snand *snf = nand_to_mtk_snand(nand); | |
743 | struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand); | |
744 | int ret; | |
745 | ||
746 | ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize, | |
747 | nand->memorg.oobsize); | |
748 | if (ret) | |
749 | return ret; | |
750 | snf->autofmt = true; | |
751 | snf->ecc_cfg = ecc_cfg; | |
752 | return 0; | |
753 | } | |
754 | ||
755 | static int mtk_snand_ecc_finish_io_req(struct nand_device *nand, | |
756 | struct nand_page_io_req *req) | |
757 | { | |
758 | struct mtk_snand *snf = nand_to_mtk_snand(nand); | |
759 | struct mtd_info *mtd = nanddev_to_mtd(nand); | |
760 | ||
761 | snf->ecc_cfg = NULL; | |
762 | snf->autofmt = false; | |
763 | if ((req->mode == MTD_OPS_RAW) || (req->type != NAND_PAGE_READ)) | |
764 | return 0; | |
765 | ||
766 | if (snf->ecc_stats.failed) | |
767 | mtd->ecc_stats.failed += snf->ecc_stats.failed; | |
768 | mtd->ecc_stats.corrected += snf->ecc_stats.corrected; | |
769 | return snf->ecc_stats.failed ? -EBADMSG : snf->ecc_stats.bitflips; | |
770 | } | |
771 | ||
772 | static struct nand_ecc_engine_ops mtk_snfi_ecc_engine_ops = { | |
773 | .init_ctx = mtk_snand_ecc_init_ctx, | |
774 | .cleanup_ctx = mtk_snand_ecc_cleanup_ctx, | |
775 | .prepare_io_req = mtk_snand_ecc_prepare_io_req, | |
776 | .finish_io_req = mtk_snand_ecc_finish_io_req, | |
777 | }; | |
778 | ||
779 | static void mtk_snand_read_fdm(struct mtk_snand *snf, u8 *buf) | |
780 | { | |
781 | u32 vall, valm; | |
782 | u8 *oobptr = buf; | |
783 | int i, j; | |
784 | ||
785 | for (i = 0; i < snf->nfi_cfg.nsectors; i++) { | |
786 | vall = nfi_read32(snf, NFI_FDML(i)); | |
787 | valm = nfi_read32(snf, NFI_FDMM(i)); | |
788 | ||
789 | for (j = 0; j < snf->caps->fdm_size; j++) | |
790 | oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8); | |
791 | ||
792 | oobptr += snf->caps->fdm_size; | |
793 | } | |
794 | } | |
795 | ||
796 | static void mtk_snand_write_fdm(struct mtk_snand *snf, const u8 *buf) | |
797 | { | |
798 | u32 fdm_size = snf->caps->fdm_size; | |
799 | const u8 *oobptr = buf; | |
800 | u32 vall, valm; | |
801 | int i, j; | |
802 | ||
803 | for (i = 0; i < snf->nfi_cfg.nsectors; i++) { | |
804 | vall = 0; | |
805 | valm = 0; | |
806 | ||
807 | for (j = 0; j < 8; j++) { | |
808 | if (j < 4) | |
809 | vall |= (j < fdm_size ? oobptr[j] : 0xff) | |
810 | << (j * 8); | |
811 | else | |
812 | valm |= (j < fdm_size ? oobptr[j] : 0xff) | |
813 | << ((j - 4) * 8); | |
814 | } | |
815 | ||
816 | nfi_write32(snf, NFI_FDML(i), vall); | |
817 | nfi_write32(snf, NFI_FDMM(i), valm); | |
818 | ||
819 | oobptr += fdm_size; | |
820 | } | |
821 | } | |
822 | ||
823 | static void mtk_snand_bm_swap(struct mtk_snand *snf, u8 *buf) | |
824 | { | |
825 | u32 buf_bbm_pos, fdm_bbm_pos; | |
826 | ||
827 | if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1) | |
828 | return; | |
829 | ||
830 | // swap [pagesize] byte on nand with the first fdm byte | |
831 | // in the last sector. | |
832 | buf_bbm_pos = snf->nfi_cfg.page_size - | |
833 | (snf->nfi_cfg.nsectors - 1) * snf->nfi_cfg.spare_size; | |
834 | fdm_bbm_pos = snf->nfi_cfg.page_size + | |
835 | (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size; | |
836 | ||
837 | swap(snf->buf[fdm_bbm_pos], buf[buf_bbm_pos]); | |
838 | } | |
839 | ||
840 | static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf) | |
841 | { | |
842 | u32 fdm_bbm_pos1, fdm_bbm_pos2; | |
843 | ||
844 | if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1) | |
845 | return; | |
846 | ||
847 | // swap the first fdm byte in the first and the last sector. | |
848 | fdm_bbm_pos1 = snf->nfi_cfg.page_size; | |
849 | fdm_bbm_pos2 = snf->nfi_cfg.page_size + | |
850 | (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size; | |
851 | swap(snf->buf[fdm_bbm_pos1], snf->buf[fdm_bbm_pos2]); | |
852 | } | |
853 | ||
854 | static int mtk_snand_read_page_cache(struct mtk_snand *snf, | |
855 | const struct spi_mem_op *op) | |
856 | { | |
857 | u8 *buf = snf->buf; | |
858 | u8 *buf_fdm = buf + snf->nfi_cfg.page_size; | |
859 | // the address part to be sent by the controller | |
860 | u32 op_addr = op->addr.val; | |
861 | // where to start copying data from bounce buffer | |
862 | u32 rd_offset = 0; | |
863 | u32 dummy_clk = (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth); | |
864 | u32 op_mode = 0; | |
865 | u32 dma_len = snf->buf_len; | |
866 | int ret = 0; | |
867 | u32 rd_mode, rd_bytes, val; | |
868 | dma_addr_t buf_dma; | |
869 | ||
870 | if (snf->autofmt) { | |
871 | u32 last_bit; | |
872 | u32 mask; | |
873 | ||
874 | dma_len = snf->nfi_cfg.page_size; | |
875 | op_mode = CNFG_AUTO_FMT_EN; | |
876 | if (op->data.ecc) | |
877 | op_mode |= CNFG_HW_ECC_EN; | |
878 | // extract the plane bit: | |
879 | // Find the highest bit set in (pagesize+oobsize). | |
880 | // Bits higher than that in op->addr are kept and sent over SPI | |
881 | // Lower bits are used as an offset for copying data from DMA | |
882 | // bounce buffer. | |
883 | last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size); | |
884 | mask = (1 << last_bit) - 1; | |
885 | rd_offset = op_addr & mask; | |
886 | op_addr &= ~mask; | |
887 | ||
888 | // check if we can dma to the caller memory | |
889 | if (rd_offset == 0 && op->data.nbytes >= snf->nfi_cfg.page_size) | |
890 | buf = op->data.buf.in; | |
891 | } | |
892 | mtk_snand_mac_reset(snf); | |
893 | mtk_nfi_reset(snf); | |
894 | ||
895 | // command and dummy cycles | |
896 | nfi_write32(snf, SNF_RD_CTL2, | |
897 | (dummy_clk << DATA_READ_DUMMY_S) | | |
898 | (op->cmd.opcode << DATA_READ_CMD_S)); | |
899 | ||
900 | // read address | |
901 | nfi_write32(snf, SNF_RD_CTL3, op_addr); | |
902 | ||
903 | // Set read op_mode | |
904 | if (op->data.buswidth == 4) | |
905 | rd_mode = op->addr.buswidth == 4 ? DATA_READ_MODE_QUAD : | |
906 | DATA_READ_MODE_X4; | |
907 | else if (op->data.buswidth == 2) | |
908 | rd_mode = op->addr.buswidth == 2 ? DATA_READ_MODE_DUAL : | |
909 | DATA_READ_MODE_X2; | |
910 | else | |
911 | rd_mode = DATA_READ_MODE_X1; | |
912 | rd_mode <<= DATA_READ_MODE_S; | |
913 | nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE, | |
914 | rd_mode | DATARD_CUSTOM_EN); | |
915 | ||
916 | // Set bytes to read | |
917 | rd_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) * | |
918 | snf->nfi_cfg.nsectors; | |
919 | nfi_write32(snf, SNF_MISC_CTL2, | |
920 | (rd_bytes << PROGRAM_LOAD_BYTE_NUM_S) | rd_bytes); | |
921 | ||
922 | // NFI read prepare | |
923 | nfi_write16(snf, NFI_CNFG, | |
924 | (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) | CNFG_DMA_BURST_EN | | |
925 | CNFG_READ_MODE | CNFG_DMA_MODE | op_mode); | |
926 | ||
927 | nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S)); | |
928 | ||
929 | buf_dma = dma_map_single(snf->dev, buf, dma_len, DMA_FROM_DEVICE); | |
73c1a515 DC |
930 | ret = dma_mapping_error(snf->dev, buf_dma); |
931 | if (ret) { | |
764f1b74 CG |
932 | dev_err(snf->dev, "DMA mapping failed.\n"); |
933 | goto cleanup; | |
934 | } | |
935 | nfi_write32(snf, NFI_STRADDR, buf_dma); | |
936 | if (op->data.ecc) { | |
937 | snf->ecc_cfg->op = ECC_DECODE; | |
938 | ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg); | |
939 | if (ret) | |
940 | goto cleanup_dma; | |
941 | } | |
942 | // Prepare for custom read interrupt | |
943 | nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ); | |
944 | reinit_completion(&snf->op_done); | |
945 | ||
946 | // Trigger NFI into custom mode | |
947 | nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ); | |
948 | ||
949 | // Start DMA read | |
950 | nfi_rmw32(snf, NFI_CON, 0, CON_BRD); | |
951 | nfi_write16(snf, NFI_STRDATA, STR_DATA); | |
952 | ||
953 | if (!wait_for_completion_timeout( | |
954 | &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) { | |
955 | dev_err(snf->dev, "DMA timed out for reading from cache.\n"); | |
956 | ret = -ETIMEDOUT; | |
957 | goto cleanup; | |
958 | } | |
959 | ||
960 | // Wait for BUS_SEC_CNTR returning expected value | |
961 | ret = readl_poll_timeout(snf->nfi_base + NFI_BYTELEN, val, | |
962 | BUS_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0, | |
963 | SNFI_POLL_INTERVAL); | |
964 | if (ret) { | |
965 | dev_err(snf->dev, "Timed out waiting for BUS_SEC_CNTR\n"); | |
966 | goto cleanup2; | |
967 | } | |
968 | ||
969 | // Wait for bus becoming idle | |
970 | ret = readl_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val, | |
971 | !(val & snf->caps->mastersta_mask), 0, | |
972 | SNFI_POLL_INTERVAL); | |
973 | if (ret) { | |
974 | dev_err(snf->dev, "Timed out waiting for bus becoming idle\n"); | |
975 | goto cleanup2; | |
976 | } | |
977 | ||
978 | if (op->data.ecc) { | |
979 | ret = mtk_ecc_wait_done(snf->ecc, ECC_DECODE); | |
980 | if (ret) { | |
981 | dev_err(snf->dev, "wait ecc done timeout\n"); | |
982 | goto cleanup2; | |
983 | } | |
984 | // save status before disabling ecc | |
985 | mtk_ecc_get_stats(snf->ecc, &snf->ecc_stats, | |
986 | snf->nfi_cfg.nsectors); | |
987 | } | |
988 | ||
989 | dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE); | |
990 | ||
991 | if (snf->autofmt) { | |
992 | mtk_snand_read_fdm(snf, buf_fdm); | |
993 | if (snf->caps->bbm_swap) { | |
994 | mtk_snand_bm_swap(snf, buf); | |
995 | mtk_snand_fdm_bm_swap(snf); | |
996 | } | |
997 | } | |
998 | ||
999 | // copy data back | |
1000 | if (nfi_read32(snf, NFI_STA) & READ_EMPTY) { | |
1001 | memset(op->data.buf.in, 0xff, op->data.nbytes); | |
1002 | snf->ecc_stats.bitflips = 0; | |
1003 | snf->ecc_stats.failed = 0; | |
1004 | snf->ecc_stats.corrected = 0; | |
1005 | } else { | |
1006 | if (buf == op->data.buf.in) { | |
1007 | u32 cap_len = snf->buf_len - snf->nfi_cfg.page_size; | |
1008 | u32 req_left = op->data.nbytes - snf->nfi_cfg.page_size; | |
1009 | ||
1010 | if (req_left) | |
1011 | memcpy(op->data.buf.in + snf->nfi_cfg.page_size, | |
1012 | buf_fdm, | |
1013 | cap_len < req_left ? cap_len : req_left); | |
1014 | } else if (rd_offset < snf->buf_len) { | |
1015 | u32 cap_len = snf->buf_len - rd_offset; | |
1016 | ||
1017 | if (op->data.nbytes < cap_len) | |
1018 | cap_len = op->data.nbytes; | |
1019 | memcpy(op->data.buf.in, snf->buf + rd_offset, cap_len); | |
1020 | } | |
1021 | } | |
1022 | cleanup2: | |
1023 | if (op->data.ecc) | |
1024 | mtk_ecc_disable(snf->ecc); | |
1025 | cleanup_dma: | |
1026 | // unmap dma only if any error happens. (otherwise it's done before | |
1027 | // data copying) | |
1028 | if (ret) | |
1029 | dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE); | |
1030 | cleanup: | |
1031 | // Stop read | |
1032 | nfi_write32(snf, NFI_CON, 0); | |
1033 | nfi_write16(snf, NFI_CNFG, 0); | |
1034 | ||
1035 | // Clear SNF done flag | |
1036 | nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE); | |
1037 | nfi_write32(snf, SNF_STA_CTL1, 0); | |
1038 | ||
1039 | // Disable interrupt | |
1040 | nfi_read32(snf, NFI_INTR_STA); | |
1041 | nfi_write32(snf, NFI_INTR_EN, 0); | |
1042 | ||
1043 | nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN, 0); | |
1044 | return ret; | |
1045 | } | |
1046 | ||
1047 | static int mtk_snand_write_page_cache(struct mtk_snand *snf, | |
1048 | const struct spi_mem_op *op) | |
1049 | { | |
1050 | // the address part to be sent by the controller | |
1051 | u32 op_addr = op->addr.val; | |
1052 | // where to start copying data from bounce buffer | |
1053 | u32 wr_offset = 0; | |
1054 | u32 op_mode = 0; | |
1055 | int ret = 0; | |
1056 | u32 wr_mode = 0; | |
1057 | u32 dma_len = snf->buf_len; | |
1058 | u32 wr_bytes, val; | |
1059 | size_t cap_len; | |
1060 | dma_addr_t buf_dma; | |
1061 | ||
1062 | if (snf->autofmt) { | |
1063 | u32 last_bit; | |
1064 | u32 mask; | |
1065 | ||
1066 | dma_len = snf->nfi_cfg.page_size; | |
1067 | op_mode = CNFG_AUTO_FMT_EN; | |
1068 | if (op->data.ecc) | |
1069 | op_mode |= CNFG_HW_ECC_EN; | |
1070 | ||
1071 | last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size); | |
1072 | mask = (1 << last_bit) - 1; | |
1073 | wr_offset = op_addr & mask; | |
1074 | op_addr &= ~mask; | |
1075 | } | |
1076 | mtk_snand_mac_reset(snf); | |
1077 | mtk_nfi_reset(snf); | |
1078 | ||
1079 | if (wr_offset) | |
1080 | memset(snf->buf, 0xff, wr_offset); | |
1081 | ||
1082 | cap_len = snf->buf_len - wr_offset; | |
1083 | if (op->data.nbytes < cap_len) | |
1084 | cap_len = op->data.nbytes; | |
1085 | memcpy(snf->buf + wr_offset, op->data.buf.out, cap_len); | |
1086 | if (snf->autofmt) { | |
1087 | if (snf->caps->bbm_swap) { | |
1088 | mtk_snand_fdm_bm_swap(snf); | |
1089 | mtk_snand_bm_swap(snf, snf->buf); | |
1090 | } | |
1091 | mtk_snand_write_fdm(snf, snf->buf + snf->nfi_cfg.page_size); | |
1092 | } | |
1093 | ||
1094 | // Command | |
1095 | nfi_write32(snf, SNF_PG_CTL1, (op->cmd.opcode << PG_LOAD_CMD_S)); | |
1096 | ||
1097 | // write address | |
1098 | nfi_write32(snf, SNF_PG_CTL2, op_addr); | |
1099 | ||
1100 | // Set read op_mode | |
1101 | if (op->data.buswidth == 4) | |
1102 | wr_mode = PG_LOAD_X4_EN; | |
1103 | ||
1104 | nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN, | |
1105 | wr_mode | PG_LOAD_CUSTOM_EN); | |
1106 | ||
1107 | // Set bytes to write | |
1108 | wr_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) * | |
1109 | snf->nfi_cfg.nsectors; | |
1110 | nfi_write32(snf, SNF_MISC_CTL2, | |
1111 | (wr_bytes << PROGRAM_LOAD_BYTE_NUM_S) | wr_bytes); | |
1112 | ||
1113 | // NFI write prepare | |
1114 | nfi_write16(snf, NFI_CNFG, | |
1115 | (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) | | |
1116 | CNFG_DMA_BURST_EN | CNFG_DMA_MODE | op_mode); | |
1117 | ||
1118 | nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S)); | |
1119 | buf_dma = dma_map_single(snf->dev, snf->buf, dma_len, DMA_TO_DEVICE); | |
73c1a515 DC |
1120 | ret = dma_mapping_error(snf->dev, buf_dma); |
1121 | if (ret) { | |
764f1b74 CG |
1122 | dev_err(snf->dev, "DMA mapping failed.\n"); |
1123 | goto cleanup; | |
1124 | } | |
1125 | nfi_write32(snf, NFI_STRADDR, buf_dma); | |
1126 | if (op->data.ecc) { | |
1127 | snf->ecc_cfg->op = ECC_ENCODE; | |
1128 | ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg); | |
1129 | if (ret) | |
1130 | goto cleanup_dma; | |
1131 | } | |
1132 | // Prepare for custom write interrupt | |
1133 | nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG); | |
1134 | reinit_completion(&snf->op_done); | |
1135 | ; | |
1136 | ||
1137 | // Trigger NFI into custom mode | |
1138 | nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE); | |
1139 | ||
1140 | // Start DMA write | |
1141 | nfi_rmw32(snf, NFI_CON, 0, CON_BWR); | |
1142 | nfi_write16(snf, NFI_STRDATA, STR_DATA); | |
1143 | ||
1144 | if (!wait_for_completion_timeout( | |
1145 | &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) { | |
1146 | dev_err(snf->dev, "DMA timed out for program load.\n"); | |
1147 | ret = -ETIMEDOUT; | |
1148 | goto cleanup_ecc; | |
1149 | } | |
1150 | ||
1151 | // Wait for NFI_SEC_CNTR returning expected value | |
1152 | ret = readl_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val, | |
1153 | NFI_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0, | |
1154 | SNFI_POLL_INTERVAL); | |
1155 | if (ret) | |
1156 | dev_err(snf->dev, "Timed out waiting for NFI_SEC_CNTR\n"); | |
1157 | ||
1158 | cleanup_ecc: | |
1159 | if (op->data.ecc) | |
1160 | mtk_ecc_disable(snf->ecc); | |
1161 | cleanup_dma: | |
1162 | dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_TO_DEVICE); | |
1163 | cleanup: | |
1164 | // Stop write | |
1165 | nfi_write32(snf, NFI_CON, 0); | |
1166 | nfi_write16(snf, NFI_CNFG, 0); | |
1167 | ||
1168 | // Clear SNF done flag | |
1169 | nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE); | |
1170 | nfi_write32(snf, SNF_STA_CTL1, 0); | |
1171 | ||
1172 | // Disable interrupt | |
1173 | nfi_read32(snf, NFI_INTR_STA); | |
1174 | nfi_write32(snf, NFI_INTR_EN, 0); | |
1175 | ||
1176 | nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0); | |
1177 | ||
1178 | return ret; | |
1179 | } | |
1180 | ||
1181 | /** | |
1182 | * mtk_snand_is_page_ops() - check if the op is a controller supported page op. | |
1183 | * @op spi-mem op to check | |
1184 | * | |
1185 | * Check whether op can be executed with read_from_cache or program_load | |
1186 | * mode in the controller. | |
1187 | * This controller can execute typical Read From Cache and Program Load | |
1188 | * instructions found on SPI-NAND with 2-byte address. | |
1189 | * DTR and cmd buswidth & nbytes should be checked before calling this. | |
1190 | * | |
1191 | * Return: true if the op matches the instruction template | |
1192 | */ | |
1193 | static bool mtk_snand_is_page_ops(const struct spi_mem_op *op) | |
1194 | { | |
1195 | if (op->addr.nbytes != 2) | |
1196 | return false; | |
1197 | ||
1198 | if (op->addr.buswidth != 1 && op->addr.buswidth != 2 && | |
1199 | op->addr.buswidth != 4) | |
1200 | return false; | |
1201 | ||
1202 | // match read from page instructions | |
1203 | if (op->data.dir == SPI_MEM_DATA_IN) { | |
1204 | // check dummy cycle first | |
1205 | if (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth > | |
1206 | DATA_READ_MAX_DUMMY) | |
1207 | return false; | |
1208 | // quad io / quad out | |
1209 | if ((op->addr.buswidth == 4 || op->addr.buswidth == 1) && | |
1210 | op->data.buswidth == 4) | |
1211 | return true; | |
1212 | ||
1213 | // dual io / dual out | |
1214 | if ((op->addr.buswidth == 2 || op->addr.buswidth == 1) && | |
1215 | op->data.buswidth == 2) | |
1216 | return true; | |
1217 | ||
1218 | // standard spi | |
1219 | if (op->addr.buswidth == 1 && op->data.buswidth == 1) | |
1220 | return true; | |
1221 | } else if (op->data.dir == SPI_MEM_DATA_OUT) { | |
1222 | // check dummy cycle first | |
1223 | if (op->dummy.nbytes) | |
1224 | return false; | |
1225 | // program load quad out | |
1226 | if (op->addr.buswidth == 1 && op->data.buswidth == 4) | |
1227 | return true; | |
1228 | // standard spi | |
1229 | if (op->addr.buswidth == 1 && op->data.buswidth == 1) | |
1230 | return true; | |
1231 | } | |
1232 | return false; | |
1233 | } | |
1234 | ||
1235 | static bool mtk_snand_supports_op(struct spi_mem *mem, | |
1236 | const struct spi_mem_op *op) | |
1237 | { | |
1238 | if (!spi_mem_default_supports_op(mem, op)) | |
1239 | return false; | |
1240 | if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1) | |
1241 | return false; | |
1242 | if (mtk_snand_is_page_ops(op)) | |
1243 | return true; | |
1244 | return ((op->addr.nbytes == 0 || op->addr.buswidth == 1) && | |
1245 | (op->dummy.nbytes == 0 || op->dummy.buswidth == 1) && | |
1246 | (op->data.nbytes == 0 || op->data.buswidth == 1)); | |
1247 | } | |
1248 | ||
1249 | static int mtk_snand_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) | |
1250 | { | |
1251 | struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master); | |
1252 | // page ops transfer size must be exactly ((sector_size + spare_size) * | |
1253 | // nsectors). Limit the op size if the caller requests more than that. | |
1254 | // exec_op will read more than needed and discard the leftover if the | |
1255 | // caller requests less data. | |
1256 | if (mtk_snand_is_page_ops(op)) { | |
1257 | size_t l; | |
1258 | // skip adjust_op_size for page ops | |
1259 | if (ms->autofmt) | |
1260 | return 0; | |
1261 | l = ms->caps->sector_size + ms->nfi_cfg.spare_size; | |
1262 | l *= ms->nfi_cfg.nsectors; | |
1263 | if (op->data.nbytes > l) | |
1264 | op->data.nbytes = l; | |
1265 | } else { | |
1266 | size_t hl = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; | |
1267 | ||
1268 | if (hl >= SNF_GPRAM_SIZE) | |
1269 | return -EOPNOTSUPP; | |
1270 | if (op->data.nbytes > SNF_GPRAM_SIZE - hl) | |
1271 | op->data.nbytes = SNF_GPRAM_SIZE - hl; | |
1272 | } | |
1273 | return 0; | |
1274 | } | |
1275 | ||
1276 | static int mtk_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) | |
1277 | { | |
1278 | struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master); | |
1279 | ||
1280 | dev_dbg(ms->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode, | |
1281 | op->addr.val, op->addr.buswidth, op->addr.nbytes, | |
1282 | op->data.buswidth, op->data.nbytes); | |
1283 | if (mtk_snand_is_page_ops(op)) { | |
1284 | if (op->data.dir == SPI_MEM_DATA_IN) | |
1285 | return mtk_snand_read_page_cache(ms, op); | |
1286 | else | |
1287 | return mtk_snand_write_page_cache(ms, op); | |
1288 | } else { | |
1289 | return mtk_snand_mac_io(ms, op); | |
1290 | } | |
1291 | } | |
1292 | ||
1293 | static const struct spi_controller_mem_ops mtk_snand_mem_ops = { | |
1294 | .adjust_op_size = mtk_snand_adjust_op_size, | |
1295 | .supports_op = mtk_snand_supports_op, | |
1296 | .exec_op = mtk_snand_exec_op, | |
1297 | }; | |
1298 | ||
1299 | static const struct spi_controller_mem_caps mtk_snand_mem_caps = { | |
1300 | .ecc = true, | |
1301 | }; | |
1302 | ||
1303 | static irqreturn_t mtk_snand_irq(int irq, void *id) | |
1304 | { | |
1305 | struct mtk_snand *snf = id; | |
1306 | u32 sta, ien; | |
1307 | ||
1308 | sta = nfi_read32(snf, NFI_INTR_STA); | |
1309 | ien = nfi_read32(snf, NFI_INTR_EN); | |
1310 | ||
1311 | if (!(sta & ien)) | |
1312 | return IRQ_NONE; | |
1313 | ||
1314 | nfi_write32(snf, NFI_INTR_EN, 0); | |
1315 | complete(&snf->op_done); | |
1316 | return IRQ_HANDLED; | |
1317 | } | |
1318 | ||
1319 | static const struct of_device_id mtk_snand_ids[] = { | |
1320 | { .compatible = "mediatek,mt7622-snand", .data = &mt7622_snand_caps }, | |
1321 | { .compatible = "mediatek,mt7629-snand", .data = &mt7629_snand_caps }, | |
7073888c | 1322 | { .compatible = "mediatek,mt7986-snand", .data = &mt7986_snand_caps }, |
764f1b74 CG |
1323 | {}, |
1324 | }; | |
1325 | ||
1326 | MODULE_DEVICE_TABLE(of, mtk_snand_ids); | |
1327 | ||
1328 | static int mtk_snand_enable_clk(struct mtk_snand *ms) | |
1329 | { | |
1330 | int ret; | |
1331 | ||
1332 | ret = clk_prepare_enable(ms->nfi_clk); | |
1333 | if (ret) { | |
1334 | dev_err(ms->dev, "unable to enable nfi clk\n"); | |
1335 | return ret; | |
1336 | } | |
1337 | ret = clk_prepare_enable(ms->pad_clk); | |
1338 | if (ret) { | |
1339 | dev_err(ms->dev, "unable to enable pad clk\n"); | |
1340 | goto err1; | |
1341 | } | |
1342 | return 0; | |
1343 | err1: | |
1344 | clk_disable_unprepare(ms->nfi_clk); | |
1345 | return ret; | |
1346 | } | |
1347 | ||
1348 | static void mtk_snand_disable_clk(struct mtk_snand *ms) | |
1349 | { | |
1350 | clk_disable_unprepare(ms->pad_clk); | |
1351 | clk_disable_unprepare(ms->nfi_clk); | |
1352 | } | |
1353 | ||
1354 | static int mtk_snand_probe(struct platform_device *pdev) | |
1355 | { | |
1356 | struct device_node *np = pdev->dev.of_node; | |
1357 | const struct of_device_id *dev_id; | |
1358 | struct spi_controller *ctlr; | |
1359 | struct mtk_snand *ms; | |
1360 | int ret; | |
1361 | ||
1362 | dev_id = of_match_node(mtk_snand_ids, np); | |
1363 | if (!dev_id) | |
1364 | return -EINVAL; | |
1365 | ||
1366 | ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*ms)); | |
1367 | if (!ctlr) | |
1368 | return -ENOMEM; | |
1369 | platform_set_drvdata(pdev, ctlr); | |
1370 | ||
1371 | ms = spi_controller_get_devdata(ctlr); | |
1372 | ||
1373 | ms->ctlr = ctlr; | |
1374 | ms->caps = dev_id->data; | |
1375 | ||
1376 | ms->ecc = of_mtk_ecc_get(np); | |
1377 | if (IS_ERR(ms->ecc)) | |
1378 | return PTR_ERR(ms->ecc); | |
1379 | else if (!ms->ecc) | |
1380 | return -ENODEV; | |
1381 | ||
1382 | ms->nfi_base = devm_platform_ioremap_resource(pdev, 0); | |
1383 | if (IS_ERR(ms->nfi_base)) { | |
1384 | ret = PTR_ERR(ms->nfi_base); | |
1385 | goto release_ecc; | |
1386 | } | |
1387 | ||
1388 | ms->dev = &pdev->dev; | |
1389 | ||
1390 | ms->nfi_clk = devm_clk_get(&pdev->dev, "nfi_clk"); | |
1391 | if (IS_ERR(ms->nfi_clk)) { | |
1392 | ret = PTR_ERR(ms->nfi_clk); | |
1393 | dev_err(&pdev->dev, "unable to get nfi_clk, err = %d\n", ret); | |
1394 | goto release_ecc; | |
1395 | } | |
1396 | ||
1397 | ms->pad_clk = devm_clk_get(&pdev->dev, "pad_clk"); | |
1398 | if (IS_ERR(ms->pad_clk)) { | |
1399 | ret = PTR_ERR(ms->pad_clk); | |
1400 | dev_err(&pdev->dev, "unable to get pad_clk, err = %d\n", ret); | |
1401 | goto release_ecc; | |
1402 | } | |
1403 | ||
1404 | ret = mtk_snand_enable_clk(ms); | |
1405 | if (ret) | |
1406 | goto release_ecc; | |
1407 | ||
1408 | init_completion(&ms->op_done); | |
1409 | ||
1410 | ms->irq = platform_get_irq(pdev, 0); | |
1411 | if (ms->irq < 0) { | |
1412 | ret = ms->irq; | |
1413 | goto disable_clk; | |
1414 | } | |
1415 | ret = devm_request_irq(ms->dev, ms->irq, mtk_snand_irq, 0x0, | |
1416 | "mtk-snand", ms); | |
1417 | if (ret) { | |
1418 | dev_err(ms->dev, "failed to request snfi irq\n"); | |
1419 | goto disable_clk; | |
1420 | } | |
1421 | ||
1422 | ret = dma_set_mask(ms->dev, DMA_BIT_MASK(32)); | |
1423 | if (ret) { | |
1424 | dev_err(ms->dev, "failed to set dma mask\n"); | |
1425 | goto disable_clk; | |
1426 | } | |
1427 | ||
1428 | // switch to SNFI mode | |
1429 | nfi_write32(ms, SNF_CFG, SPI_MODE); | |
1430 | ||
1431 | // setup an initial page format for ops matching page_cache_op template | |
1432 | // before ECC is called. | |
1433 | ret = mtk_snand_setup_pagefmt(ms, ms->caps->sector_size, | |
1434 | ms->caps->spare_sizes[0]); | |
1435 | if (ret) { | |
1436 | dev_err(ms->dev, "failed to set initial page format\n"); | |
1437 | goto disable_clk; | |
1438 | } | |
1439 | ||
1440 | // setup ECC engine | |
1441 | ms->ecc_eng.dev = &pdev->dev; | |
1442 | ms->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED; | |
1443 | ms->ecc_eng.ops = &mtk_snfi_ecc_engine_ops; | |
1444 | ms->ecc_eng.priv = ms; | |
1445 | ||
1446 | ret = nand_ecc_register_on_host_hw_engine(&ms->ecc_eng); | |
1447 | if (ret) { | |
1448 | dev_err(&pdev->dev, "failed to register ecc engine.\n"); | |
1449 | goto disable_clk; | |
1450 | } | |
1451 | ||
1452 | ctlr->num_chipselect = 1; | |
1453 | ctlr->mem_ops = &mtk_snand_mem_ops; | |
1454 | ctlr->mem_caps = &mtk_snand_mem_caps; | |
1455 | ctlr->bits_per_word_mask = SPI_BPW_MASK(8); | |
1456 | ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; | |
1457 | ctlr->dev.of_node = pdev->dev.of_node; | |
1458 | ret = spi_register_controller(ctlr); | |
1459 | if (ret) { | |
1460 | dev_err(&pdev->dev, "spi_register_controller failed.\n"); | |
1461 | goto disable_clk; | |
1462 | } | |
1463 | ||
1464 | return 0; | |
1465 | disable_clk: | |
1466 | mtk_snand_disable_clk(ms); | |
1467 | release_ecc: | |
1468 | mtk_ecc_release(ms->ecc); | |
1469 | return ret; | |
1470 | } | |
1471 | ||
1472 | static int mtk_snand_remove(struct platform_device *pdev) | |
1473 | { | |
1474 | struct spi_controller *ctlr = platform_get_drvdata(pdev); | |
1475 | struct mtk_snand *ms = spi_controller_get_devdata(ctlr); | |
1476 | ||
1477 | spi_unregister_controller(ctlr); | |
1478 | mtk_snand_disable_clk(ms); | |
1479 | mtk_ecc_release(ms->ecc); | |
1480 | kfree(ms->buf); | |
1481 | return 0; | |
1482 | } | |
1483 | ||
1484 | static struct platform_driver mtk_snand_driver = { | |
1485 | .probe = mtk_snand_probe, | |
1486 | .remove = mtk_snand_remove, | |
1487 | .driver = { | |
1488 | .name = "mtk-snand", | |
1489 | .of_match_table = mtk_snand_ids, | |
1490 | }, | |
1491 | }; | |
1492 | ||
1493 | module_platform_driver(mtk_snand_driver); | |
1494 | ||
1495 | MODULE_LICENSE("GPL"); | |
1496 | MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>"); | |
1497 | MODULE_DESCRIPTION("MeidaTek SPI-NAND Flash Controller Driver"); |