1 // SPDX-License-Identifier: GPL-2.0
3 * Marvell NAND flash controller driver
5 * Copyright (C) 2017 Marvell
6 * Author: Miquel RAYNAL <miquel.raynal@free-electrons.com>
9 * This NAND controller driver handles two versions of the hardware,
10 * one is called NFCv1 and is available on PXA SoCs and the other is
11 * called NFCv2 and is available on Armada SoCs.
13 * The main visible difference is that NFCv1 only has Hamming ECC
14 * capabilities, while NFCv2 also embeds a BCH ECC engine. Also, DMA
15 * is not used with NFCv2.
17 * The ECC layouts are depicted in details in Marvell AN-379, but here
18 * is a brief description.
20 * When using Hamming, the data is split in 512B chunks (either 1, 2
21 * or 4) and each chunk will have its own ECC "digest" of 6B at the
22 * beginning of the OOB area and eventually the remaining free OOB
23 * bytes (also called "spare" bytes in the driver). This engine
24 * corrects up to 1 bit per chunk and detects reliably an error if
25 * there are at most 2 bitflips. Here is the page layout used by the
26 * controller when Hamming is chosen:
28 * +-------------------------------------------------------------+
29 * | Data 1 | ... | Data N | ECC 1 | ... | ECCN | Free OOB bytes |
30 * +-------------------------------------------------------------+
32 * When using the BCH engine, there are N identical (data + free OOB +
33 * ECC) sections and potentially an extra one to deal with
34 * configurations where the chosen (data + free OOB + ECC) sizes do
35 * not align with the page (data + OOB) size. ECC bytes are always
36 * 30B per ECC chunk. Here is the page layout used by the controller
39 * +-----------------------------------------
40 * | Data 1 | Free OOB bytes 1 | ECC 1 | ...
41 * +-----------------------------------------
43 * -------------------------------------------
44 * ... | Data N | Free OOB bytes N | ECC N |
45 * -------------------------------------------
47 * --------------------------------------------+
48 * Last Data | Last Free OOB bytes | Last ECC |
49 * --------------------------------------------+
51 * In both cases, the layout seen by the user is always: all data
52 * first, then all free OOB bytes and finally all ECC bytes. With BCH,
53 * ECC bytes are 30B long and are padded with 0xFF to align on 32
56 * The controller has certain limitations that are handled by the
58 * - It can only read 2k at a time. To overcome this limitation, the
59 * driver issues data cycles on the bus, without issuing new
60 * CMD + ADDR cycles. The Marvell term is "naked" operations.
61 * - The ECC strength in BCH mode cannot be tuned. It is fixed 16
62 * bits. What can be tuned is the ECC block size as long as it
63 * stays between 512B and 2kiB. It's usually chosen based on the
64 * chip ECC requirements. For instance, using 2kiB ECC chunks
65 * provides 4b/512B correctability.
66 * - The controller will always treat data bytes, free OOB bytes
67 * and ECC bytes in that order, no matter what the real layout is
68 * (which is usually all data then all OOB bytes). The
69 * marvell_nfc_layouts array below contains the currently
71 * - Because of these weird layouts, the Bad Block Markers can be
72 * located in data section. In this case, the NAND_BBT_NO_OOB_BBM
73 * option must be set to prevent scanning/writing bad block
77 #include <linux/module.h>
78 #include <linux/clk.h>
79 #include <linux/mtd/rawnand.h>
80 #include <linux/of_platform.h>
81 #include <linux/iopoll.h>
82 #include <linux/interrupt.h>
83 #include <linux/slab.h>
84 #include <linux/mfd/syscon.h>
85 #include <linux/regmap.h>
86 #include <asm/unaligned.h>
88 #include <linux/dmaengine.h>
89 #include <linux/dma-mapping.h>
90 #include <linux/dma/pxa-dma.h>
91 #include <linux/platform_data/mtd-nand-pxa3xx.h>
93 /* Data FIFO granularity, FIFO reads/writes must be a multiple of this length */
95 #define FIFO_REP(x) (x / sizeof(u32))
96 #define BCH_SEQ_READS (32 / FIFO_DEPTH)
97 /* NFC does not support transfers of larger chunks at a time */
98 #define MAX_CHUNK_SIZE 2112
99 /* NFCv1 cannot read more that 7 bytes of ID */
100 #define NFCV1_READID_LEN 7
101 /* Polling is done at a pace of POLL_PERIOD us until POLL_TIMEOUT is reached */
102 #define POLL_PERIOD 0
103 #define POLL_TIMEOUT 100000
104 /* Interrupt maximum wait period in ms */
105 #define IRQ_TIMEOUT 1000
106 /* Latency in clock cycles between SoC pins and NFC logic */
107 #define MIN_RD_DEL_CNT 3
108 /* Maximum number of contiguous address cycles */
109 #define MAX_ADDRESS_CYC_NFCV1 5
110 #define MAX_ADDRESS_CYC_NFCV2 7
111 /* System control registers/bits to enable the NAND controller on some SoCs */
112 #define GENCONF_SOC_DEVICE_MUX 0x208
113 #define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
114 #define GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST BIT(20)
115 #define GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST BIT(21)
116 #define GENCONF_SOC_DEVICE_MUX_NFC_INT_EN BIT(25)
117 #define GENCONF_CLK_GATING_CTRL 0x220
118 #define GENCONF_CLK_GATING_CTRL_ND_GATE BIT(2)
119 #define GENCONF_ND_CLK_CTRL 0x700
120 #define GENCONF_ND_CLK_CTRL_EN BIT(0)
122 /* NAND controller data flash control register */
124 #define NDCR_ALL_INT GENMASK(11, 0)
125 #define NDCR_CS1_CMDDM BIT(7)
126 #define NDCR_CS0_CMDDM BIT(8)
127 #define NDCR_RDYM BIT(11)
128 #define NDCR_ND_ARB_EN BIT(12)
129 #define NDCR_RA_START BIT(15)
130 #define NDCR_RD_ID_CNT(x) (min_t(unsigned int, x, 0x7) << 16)
131 #define NDCR_PAGE_SZ(x) (x >= 2048 ? BIT(24) : 0)
132 #define NDCR_DWIDTH_M BIT(26)
133 #define NDCR_DWIDTH_C BIT(27)
134 #define NDCR_ND_RUN BIT(28)
135 #define NDCR_DMA_EN BIT(29)
136 #define NDCR_ECC_EN BIT(30)
137 #define NDCR_SPARE_EN BIT(31)
138 #define NDCR_GENERIC_FIELDS_MASK (~(NDCR_RA_START | NDCR_PAGE_SZ(2048) | \
139 NDCR_DWIDTH_M | NDCR_DWIDTH_C))
141 /* NAND interface timing parameter 0 register */
143 #define NDTR0_TRP(x) ((min_t(unsigned int, x, 0xF) & 0x7) << 0)
144 #define NDTR0_TRH(x) (min_t(unsigned int, x, 0x7) << 3)
145 #define NDTR0_ETRP(x) ((min_t(unsigned int, x, 0xF) & 0x8) << 3)
146 #define NDTR0_SEL_NRE_EDGE BIT(7)
147 #define NDTR0_TWP(x) (min_t(unsigned int, x, 0x7) << 8)
148 #define NDTR0_TWH(x) (min_t(unsigned int, x, 0x7) << 11)
149 #define NDTR0_TCS(x) (min_t(unsigned int, x, 0x7) << 16)
150 #define NDTR0_TCH(x) (min_t(unsigned int, x, 0x7) << 19)
151 #define NDTR0_RD_CNT_DEL(x) (min_t(unsigned int, x, 0xF) << 22)
152 #define NDTR0_SELCNTR BIT(26)
153 #define NDTR0_TADL(x) (min_t(unsigned int, x, 0x1F) << 27)
155 /* NAND interface timing parameter 1 register */
157 #define NDTR1_TAR(x) (min_t(unsigned int, x, 0xF) << 0)
158 #define NDTR1_TWHR(x) (min_t(unsigned int, x, 0xF) << 4)
159 #define NDTR1_TRHW(x) (min_t(unsigned int, x / 16, 0x3) << 8)
160 #define NDTR1_PRESCALE BIT(14)
161 #define NDTR1_WAIT_MODE BIT(15)
162 #define NDTR1_TR(x) (min_t(unsigned int, x, 0xFFFF) << 16)
164 /* NAND controller status register */
166 #define NDSR_WRCMDREQ BIT(0)
167 #define NDSR_RDDREQ BIT(1)
168 #define NDSR_WRDREQ BIT(2)
169 #define NDSR_CORERR BIT(3)
170 #define NDSR_UNCERR BIT(4)
171 #define NDSR_CMDD(cs) BIT(8 - cs)
172 #define NDSR_RDY(rb) BIT(11 + rb)
173 #define NDSR_ERRCNT(x) ((x >> 16) & 0x1F)
175 /* NAND ECC control register */
176 #define NDECCCTRL 0x28
177 #define NDECCCTRL_BCH_EN BIT(0)
179 /* NAND controller data buffer register */
182 /* NAND controller command buffer 0 register */
184 #define NDCB0_CMD1(x) ((x & 0xFF) << 0)
185 #define NDCB0_CMD2(x) ((x & 0xFF) << 8)
186 #define NDCB0_ADDR_CYC(x) ((x & 0x7) << 16)
187 #define NDCB0_ADDR_GET_NUM_CYC(x) (((x) >> 16) & 0x7)
188 #define NDCB0_DBC BIT(19)
189 #define NDCB0_CMD_TYPE(x) ((x & 0x7) << 21)
190 #define NDCB0_CSEL BIT(24)
191 #define NDCB0_RDY_BYP BIT(27)
192 #define NDCB0_LEN_OVRD BIT(28)
193 #define NDCB0_CMD_XTYPE(x) ((x & 0x7) << 29)
195 /* NAND controller command buffer 1 register */
197 #define NDCB1_COLS(x) ((x & 0xFFFF) << 0)
198 #define NDCB1_ADDRS_PAGE(x) (x << 16)
200 /* NAND controller command buffer 2 register */
202 #define NDCB2_ADDR5_PAGE(x) (((x >> 16) & 0xFF) << 0)
203 #define NDCB2_ADDR5_CYC(x) ((x & 0xFF) << 0)
205 /* NAND controller command buffer 3 register */
207 #define NDCB3_ADDR6_CYC(x) ((x & 0xFF) << 16)
208 #define NDCB3_ADDR7_CYC(x) ((x & 0xFF) << 24)
210 /* NAND controller command buffer 0 register 'type' and 'xtype' fields */
214 #define TYPE_READ_ID 3
215 #define TYPE_STATUS 4
217 #define TYPE_NAKED_CMD 6
218 #define TYPE_NAKED_ADDR 7
220 #define XTYPE_MONOLITHIC_RW 0
221 #define XTYPE_LAST_NAKED_RW 1
222 #define XTYPE_FINAL_COMMAND 3
224 #define XTYPE_WRITE_DISPATCH 4
225 #define XTYPE_NAKED_RW 5
226 #define XTYPE_COMMAND_DISPATCH 6
230 * Marvell ECC engine works differently than the others, in order to limit the
231 * size of the IP, hardware engineers chose to set a fixed strength at 16 bits
232 * per subpage, and depending on a the desired strength needed by the NAND chip,
233 * a particular layout mixing data/spare/ecc is defined, with a possible last
234 * chunk smaller that the others.
236 * @writesize: Full page size on which the layout applies
237 * @chunk: Desired ECC chunk size on which the layout applies
238 * @strength: Desired ECC strength (per chunk size bytes) on which the
240 * @nchunks: Total number of chunks
241 * @full_chunk_cnt: Number of full-sized chunks, which is the number of
242 * repetitions of the pattern:
243 * (data_bytes + spare_bytes + ecc_bytes).
244 * @data_bytes: Number of data bytes per chunk
245 * @spare_bytes: Number of spare bytes per chunk
246 * @ecc_bytes: Number of ecc bytes per chunk
247 * @last_data_bytes: Number of data bytes in the last chunk
248 * @last_spare_bytes: Number of spare bytes in the last chunk
249 * @last_ecc_bytes: Number of ecc bytes in the last chunk
251 struct marvell_hw_ecc_layout {
256 /* Corresponding layout */
263 int last_spare_bytes;
267 #define MARVELL_LAYOUT(ws, dc, ds, nc, fcc, db, sb, eb, ldb, lsb, leb) \
273 .full_chunk_cnt = fcc, \
277 .last_data_bytes = ldb, \
278 .last_spare_bytes = lsb, \
279 .last_ecc_bytes = leb, \
282 /* Layouts explained in AN-379_Marvell_SoC_NFC_ECC */
283 static const struct marvell_hw_ecc_layout marvell_nfc_layouts[] = {
284 MARVELL_LAYOUT( 512, 512, 1, 1, 1, 512, 8, 8, 0, 0, 0),
285 MARVELL_LAYOUT( 2048, 512, 1, 1, 1, 2048, 40, 24, 0, 0, 0),
286 MARVELL_LAYOUT( 2048, 512, 4, 1, 1, 2048, 32, 30, 0, 0, 0),
287 MARVELL_LAYOUT( 4096, 512, 4, 2, 2, 2048, 32, 30, 0, 0, 0),
288 MARVELL_LAYOUT( 4096, 512, 8, 5, 4, 1024, 0, 30, 0, 64, 30),
292 * The Nand Flash Controller has up to 4 CE and 2 RB pins. The CE selection
293 * is made by a field in NDCB0 register, and in another field in NDCB2 register.
294 * The datasheet describes the logic with an error: ADDR5 field is once
295 * declared at the beginning of NDCB2, and another time at its end. Because the
296 * ADDR5 field of NDCB2 may be used by other bytes, it would be more logical
297 * to use the last bit of this field instead of the first ones.
299 * @cs: Wanted CE lane.
300 * @ndcb0_csel: Value of the NDCB0 register with or without the flag
301 * selecting the wanted CE lane. This is set once when
302 * the Device Tree is probed.
303 * @rb: Ready/Busy pin for the flash chip
305 struct marvell_nand_chip_sel {
312 * NAND chip structure: stores NAND chip device related information
314 * @chip: Base NAND chip structure
315 * @node: Used to store NAND chips into a list
316 * @layout NAND layout when using hardware ECC
317 * @ndcr: Controller register value for this NAND chip
318 * @ndtr0: Timing registers 0 value for this NAND chip
319 * @ndtr1: Timing registers 1 value for this NAND chip
320 * @selected_die: Current active CS
321 * @nsels: Number of CS lines required by the NAND chip
322 * @sels: Array of CS lines descriptions
324 struct marvell_nand_chip {
325 struct nand_chip chip;
326 struct list_head node;
327 const struct marvell_hw_ecc_layout *layout;
334 struct marvell_nand_chip_sel sels[0];
337 static inline struct marvell_nand_chip *to_marvell_nand(struct nand_chip *chip)
339 return container_of(chip, struct marvell_nand_chip, chip);
342 static inline struct marvell_nand_chip_sel *to_nand_sel(struct marvell_nand_chip
345 return &nand->sels[nand->selected_die];
349 * NAND controller capabilities for distinction between compatible strings
351 * @max_cs_nb: Number of Chip Select lines available
352 * @max_rb_nb: Number of Ready/Busy lines available
353 * @need_system_controller: Indicates if the SoC needs to have access to the
354 * system controller (ie. to enable the NAND controller)
355 * @legacy_of_bindings: Indicates if DT parsing must be done using the old
357 * @is_nfcv2: NFCv2 has numerous enhancements compared to NFCv1, ie.
358 * BCH error detection and correction algorithm,
359 * NDCB3 register has been added
360 * @use_dma: Use dma for data transfers
362 struct marvell_nfc_caps {
363 unsigned int max_cs_nb;
364 unsigned int max_rb_nb;
365 bool need_system_controller;
366 bool legacy_of_bindings;
372 * NAND controller structure: stores Marvell NAND controller information
374 * @controller: Base controller structure
375 * @dev: Parent device (used to print error messages)
376 * @regs: NAND controller registers
377 * @core_clk: Core clock
378 * @reg_clk: Regiters clock
379 * @complete: Completion object to wait for NAND controller events
380 * @assigned_cs: Bitmask describing already assigned CS lines
381 * @chips: List containing all the NAND chips attached to
382 * this NAND controller
383 * @caps: NAND controller capabilities for each compatible string
384 * @dma_chan: DMA channel (NFCv1 only)
385 * @dma_buf: 32-bit aligned buffer for DMA transfers (NFCv1 only)
388 struct nand_controller controller;
391 struct clk *core_clk;
393 struct completion complete;
394 unsigned long assigned_cs;
395 struct list_head chips;
396 struct nand_chip *selected_chip;
397 const struct marvell_nfc_caps *caps;
399 /* DMA (NFCv1 only) */
401 struct dma_chan *dma_chan;
405 static inline struct marvell_nfc *to_marvell_nfc(struct nand_controller *ctrl)
407 return container_of(ctrl, struct marvell_nfc, controller);
411 * NAND controller timings expressed in NAND Controller clock cycles
413 * @tRP: ND_nRE pulse width
414 * @tRH: ND_nRE high duration
415 * @tWP: ND_nWE pulse time
416 * @tWH: ND_nWE high duration
417 * @tCS: Enable signal setup time
418 * @tCH: Enable signal hold time
419 * @tADL: Address to write data delay
420 * @tAR: ND_ALE low to ND_nRE low delay
421 * @tWHR: ND_nWE high to ND_nRE low for status read
422 * @tRHW: ND_nRE high duration, read to write delay
423 * @tR: ND_nWE high to ND_nRE low for read
425 struct marvell_nfc_timings {
442 * Derives a duration in numbers of clock cycles.
444 * @ps: Duration in pico-seconds
445 * @period_ns: Clock period in nano-seconds
447 * Convert the duration in nano-seconds, then divide by the period and
448 * return the number of clock periods.
450 #define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP(ps / 1000, period_ns))
451 #define TO_CYCLES64(ps, period_ns) (DIV_ROUND_UP_ULL(div_u64(ps, 1000), \
455 * NAND driver structure filled during the parsing of the ->exec_op() subop
456 * subset of instructions.
458 * @ndcb: Array of values written to NDCBx registers
459 * @cle_ale_delay_ns: Optional delay after the last CMD or ADDR cycle
460 * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin
461 * @rdy_delay_ns: Optional delay after waiting for the RB pin
462 * @data_delay_ns: Optional delay after the data xfer
463 * @data_instr_idx: Index of the data instruction in the subop
464 * @data_instr: Pointer to the data instruction in the subop
466 struct marvell_nfc_op {
468 unsigned int cle_ale_delay_ns;
469 unsigned int rdy_timeout_ms;
470 unsigned int rdy_delay_ns;
471 unsigned int data_delay_ns;
472 unsigned int data_instr_idx;
473 const struct nand_op_instr *data_instr;
477 * Internal helper to conditionnally apply a delay (from the above structure,
480 static void cond_delay(unsigned int ns)
488 udelay(DIV_ROUND_UP(ns, 1000));
492 * The controller has many flags that could generate interrupts, most of them
493 * are disabled and polling is used. For the very slow signals, using interrupts
494 * may relax the CPU charge.
496 static void marvell_nfc_disable_int(struct marvell_nfc *nfc, u32 int_mask)
500 /* Writing 1 disables the interrupt */
501 reg = readl_relaxed(nfc->regs + NDCR);
502 writel_relaxed(reg | int_mask, nfc->regs + NDCR);
505 static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask)
509 /* Writing 0 enables the interrupt */
510 reg = readl_relaxed(nfc->regs + NDCR);
511 writel_relaxed(reg & ~int_mask, nfc->regs + NDCR);
514 static void marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
516 writel_relaxed(int_mask, nfc->regs + NDSR);
519 static void marvell_nfc_force_byte_access(struct nand_chip *chip,
522 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
526 * Callers of this function do not verify if the NAND is using a 16-bit
527 * an 8-bit bus for normal operations, so we need to take care of that
528 * here by leaving the configuration unchanged if the NAND does not have
529 * the NAND_BUSWIDTH_16 flag set.
531 if (!(chip->options & NAND_BUSWIDTH_16))
534 ndcr = readl_relaxed(nfc->regs + NDCR);
537 ndcr &= ~(NDCR_DWIDTH_M | NDCR_DWIDTH_C);
539 ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
541 writel_relaxed(ndcr, nfc->regs + NDCR);
544 static int marvell_nfc_wait_ndrun(struct nand_chip *chip)
546 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
551 * The command is being processed, wait for the ND_RUN bit to be
552 * cleared by the NFC. If not, we must clear it by hand.
554 ret = readl_relaxed_poll_timeout(nfc->regs + NDCR, val,
555 (val & NDCR_ND_RUN) == 0,
556 POLL_PERIOD, POLL_TIMEOUT);
558 dev_err(nfc->dev, "Timeout on NAND controller run mode\n");
559 writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
568 * Any time a command has to be sent to the controller, the following sequence
569 * has to be followed:
570 * - call marvell_nfc_prepare_cmd()
571 * -> activate the ND_RUN bit that will kind of 'start a job'
572 * -> wait the signal indicating the NFC is waiting for a command
573 * - send the command (cmd and address cycles)
574 * - enventually send or receive the data
575 * - call marvell_nfc_end_cmd() with the corresponding flag
576 * -> wait the flag to be triggered or cancel the job with a timeout
578 * The following helpers are here to factorize the code a bit so that
579 * specialized functions responsible for executing the actual NAND
580 * operations do not have to replicate the same code blocks.
582 static int marvell_nfc_prepare_cmd(struct nand_chip *chip)
584 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
588 /* Poll ND_RUN and clear NDSR before issuing any command */
589 ret = marvell_nfc_wait_ndrun(chip);
591 dev_err(nfc->dev, "Last operation did not succeed\n");
595 ndcr = readl_relaxed(nfc->regs + NDCR);
596 writel_relaxed(readl(nfc->regs + NDSR), nfc->regs + NDSR);
598 /* Assert ND_RUN bit and wait the NFC to be ready */
599 writel_relaxed(ndcr | NDCR_ND_RUN, nfc->regs + NDCR);
600 ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
602 POLL_PERIOD, POLL_TIMEOUT);
604 dev_err(nfc->dev, "Timeout on WRCMDRE\n");
608 /* Command may be written, clear WRCMDREQ status bit */
609 writel_relaxed(NDSR_WRCMDREQ, nfc->regs + NDSR);
614 static void marvell_nfc_send_cmd(struct nand_chip *chip,
615 struct marvell_nfc_op *nfc_op)
617 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
618 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
620 dev_dbg(nfc->dev, "\nNDCR: 0x%08x\n"
621 "NDCB0: 0x%08x\nNDCB1: 0x%08x\nNDCB2: 0x%08x\nNDCB3: 0x%08x\n",
622 (u32)readl_relaxed(nfc->regs + NDCR), nfc_op->ndcb[0],
623 nfc_op->ndcb[1], nfc_op->ndcb[2], nfc_op->ndcb[3]);
625 writel_relaxed(to_nand_sel(marvell_nand)->ndcb0_csel | nfc_op->ndcb[0],
627 writel_relaxed(nfc_op->ndcb[1], nfc->regs + NDCB0);
628 writel(nfc_op->ndcb[2], nfc->regs + NDCB0);
631 * Write NDCB0 four times only if LEN_OVRD is set or if ADDR6 or ADDR7
632 * fields are used (only available on NFCv2).
634 if (nfc_op->ndcb[0] & NDCB0_LEN_OVRD ||
635 NDCB0_ADDR_GET_NUM_CYC(nfc_op->ndcb[0]) >= 6) {
636 if (!WARN_ON_ONCE(!nfc->caps->is_nfcv2))
637 writel(nfc_op->ndcb[3], nfc->regs + NDCB0);
641 static int marvell_nfc_end_cmd(struct nand_chip *chip, int flag,
644 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
648 ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
650 POLL_PERIOD, POLL_TIMEOUT);
653 dev_err(nfc->dev, "Timeout on %s (NDSR: 0x%08x)\n",
656 dmaengine_terminate_all(nfc->dma_chan);
661 * DMA function uses this helper to poll on CMDD bits without wanting
662 * them to be cleared.
664 if (nfc->use_dma && (readl_relaxed(nfc->regs + NDCR) & NDCR_DMA_EN))
667 writel_relaxed(flag, nfc->regs + NDSR);
672 static int marvell_nfc_wait_cmdd(struct nand_chip *chip)
674 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
675 int cs_flag = NDSR_CMDD(to_nand_sel(marvell_nand)->ndcb0_csel);
677 return marvell_nfc_end_cmd(chip, cs_flag, "CMDD");
680 static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
682 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
685 /* Timeout is expressed in ms */
687 timeout_ms = IRQ_TIMEOUT;
689 init_completion(&nfc->complete);
691 marvell_nfc_enable_int(nfc, NDCR_RDYM);
692 ret = wait_for_completion_timeout(&nfc->complete,
693 msecs_to_jiffies(timeout_ms));
694 marvell_nfc_disable_int(nfc, NDCR_RDYM);
695 marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
697 dev_err(nfc->dev, "Timeout waiting for RB signal\n");
704 static void marvell_nfc_select_chip(struct mtd_info *mtd, int die_nr)
706 struct nand_chip *chip = mtd_to_nand(mtd);
707 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
708 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
711 if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
714 if (die_nr < 0 || die_nr >= marvell_nand->nsels) {
715 nfc->selected_chip = NULL;
716 marvell_nand->selected_die = -1;
720 writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
721 writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
724 * Reset the NDCR register to a clean state for this particular chip,
725 * also clear ND_RUN bit.
727 ndcr_generic = readl_relaxed(nfc->regs + NDCR) &
728 NDCR_GENERIC_FIELDS_MASK & ~NDCR_ND_RUN;
729 writel_relaxed(ndcr_generic | marvell_nand->ndcr, nfc->regs + NDCR);
731 /* Also reset the interrupt status register */
732 marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
734 nfc->selected_chip = chip;
735 marvell_nand->selected_die = die_nr;
738 static irqreturn_t marvell_nfc_isr(int irq, void *dev_id)
740 struct marvell_nfc *nfc = dev_id;
741 u32 st = readl_relaxed(nfc->regs + NDSR);
742 u32 ien = (~readl_relaxed(nfc->regs + NDCR)) & NDCR_ALL_INT;
745 * RDY interrupt mask is one bit in NDCR while there are two status
746 * bit in NDSR (RDY[cs0/cs2] and RDY[cs1/cs3]).
748 if (st & NDSR_RDY(1))
754 marvell_nfc_disable_int(nfc, st & NDCR_ALL_INT);
756 if (!(st & (NDSR_RDDREQ | NDSR_WRDREQ | NDSR_WRCMDREQ)))
757 complete(&nfc->complete);
762 /* HW ECC related functions */
763 static void marvell_nfc_enable_hw_ecc(struct nand_chip *chip)
765 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
766 u32 ndcr = readl_relaxed(nfc->regs + NDCR);
768 if (!(ndcr & NDCR_ECC_EN)) {
769 writel_relaxed(ndcr | NDCR_ECC_EN, nfc->regs + NDCR);
772 * When enabling BCH, set threshold to 0 to always know the
773 * number of corrected bitflips.
775 if (chip->ecc.algo == NAND_ECC_BCH)
776 writel_relaxed(NDECCCTRL_BCH_EN, nfc->regs + NDECCCTRL);
780 static void marvell_nfc_disable_hw_ecc(struct nand_chip *chip)
782 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
783 u32 ndcr = readl_relaxed(nfc->regs + NDCR);
785 if (ndcr & NDCR_ECC_EN) {
786 writel_relaxed(ndcr & ~NDCR_ECC_EN, nfc->regs + NDCR);
787 if (chip->ecc.algo == NAND_ECC_BCH)
788 writel_relaxed(0, nfc->regs + NDECCCTRL);
792 /* DMA related helpers */
793 static void marvell_nfc_enable_dma(struct marvell_nfc *nfc)
797 reg = readl_relaxed(nfc->regs + NDCR);
798 writel_relaxed(reg | NDCR_DMA_EN, nfc->regs + NDCR);
801 static void marvell_nfc_disable_dma(struct marvell_nfc *nfc)
805 reg = readl_relaxed(nfc->regs + NDCR);
806 writel_relaxed(reg & ~NDCR_DMA_EN, nfc->regs + NDCR);
809 /* Read/write PIO/DMA accessors */
810 static int marvell_nfc_xfer_data_dma(struct marvell_nfc *nfc,
811 enum dma_data_direction direction,
814 unsigned int dma_len = min_t(int, ALIGN(len, 32), MAX_CHUNK_SIZE);
815 struct dma_async_tx_descriptor *tx;
816 struct scatterlist sg;
820 marvell_nfc_enable_dma(nfc);
821 /* Prepare the DMA transfer */
822 sg_init_one(&sg, nfc->dma_buf, dma_len);
823 dma_map_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
824 tx = dmaengine_prep_slave_sg(nfc->dma_chan, &sg, 1,
825 direction == DMA_FROM_DEVICE ?
826 DMA_DEV_TO_MEM : DMA_MEM_TO_DEV,
829 dev_err(nfc->dev, "Could not prepare DMA S/G list\n");
833 /* Do the task and wait for it to finish */
834 cookie = dmaengine_submit(tx);
835 ret = dma_submit_error(cookie);
839 dma_async_issue_pending(nfc->dma_chan);
840 ret = marvell_nfc_wait_cmdd(nfc->selected_chip);
841 dma_unmap_sg(nfc->dma_chan->device->dev, &sg, 1, direction);
842 marvell_nfc_disable_dma(nfc);
844 dev_err(nfc->dev, "Timeout waiting for DMA (status: %d)\n",
845 dmaengine_tx_status(nfc->dma_chan, cookie, NULL));
846 dmaengine_terminate_all(nfc->dma_chan);
853 static int marvell_nfc_xfer_data_in_pio(struct marvell_nfc *nfc, u8 *in,
856 unsigned int last_len = len % FIFO_DEPTH;
857 unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
860 for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
861 ioread32_rep(nfc->regs + NDDB, in + i, FIFO_REP(FIFO_DEPTH));
864 u8 tmp_buf[FIFO_DEPTH];
866 ioread32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
867 memcpy(in + last_full_offset, tmp_buf, last_len);
873 static int marvell_nfc_xfer_data_out_pio(struct marvell_nfc *nfc, const u8 *out,
876 unsigned int last_len = len % FIFO_DEPTH;
877 unsigned int last_full_offset = round_down(len, FIFO_DEPTH);
880 for (i = 0; i < last_full_offset; i += FIFO_DEPTH)
881 iowrite32_rep(nfc->regs + NDDB, out + i, FIFO_REP(FIFO_DEPTH));
884 u8 tmp_buf[FIFO_DEPTH];
886 memcpy(tmp_buf, out + last_full_offset, last_len);
887 iowrite32_rep(nfc->regs + NDDB, tmp_buf, FIFO_REP(FIFO_DEPTH));
893 static void marvell_nfc_check_empty_chunk(struct nand_chip *chip,
894 u8 *data, int data_len,
895 u8 *spare, int spare_len,
896 u8 *ecc, int ecc_len,
897 unsigned int *max_bitflips)
899 struct mtd_info *mtd = nand_to_mtd(chip);
903 * Blank pages (all 0xFF) that have not been written may be recognized
904 * as bad if bitflips occur, so whenever an uncorrectable error occurs,
905 * check if the entire page (with ECC bytes) is actually blank or not.
914 bf = nand_check_erased_ecc_chunk(data, data_len, ecc, ecc_len,
915 spare, spare_len, chip->ecc.strength);
917 mtd->ecc_stats.failed++;
921 /* Update the stats and max_bitflips */
922 mtd->ecc_stats.corrected += bf;
923 *max_bitflips = max_t(unsigned int, *max_bitflips, bf);
927 * Check a chunk is correct or not according to hardware ECC engine.
928 * mtd->ecc_stats.corrected is updated, as well as max_bitflips, however
929 * mtd->ecc_stats.failure is not, the function will instead return a non-zero
930 * value indicating that a check on the emptyness of the subpage must be
931 * performed before declaring the subpage corrupted.
933 static int marvell_nfc_hw_ecc_correct(struct nand_chip *chip,
934 unsigned int *max_bitflips)
936 struct mtd_info *mtd = nand_to_mtd(chip);
937 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
941 ndsr = readl_relaxed(nfc->regs + NDSR);
943 /* Check uncorrectable error flag */
944 if (ndsr & NDSR_UNCERR) {
945 writel_relaxed(ndsr, nfc->regs + NDSR);
948 * Do not increment ->ecc_stats.failed now, instead, return a
949 * non-zero value to indicate that this chunk was apparently
950 * bad, and it should be check to see if it empty or not. If
951 * the chunk (with ECC bytes) is not declared empty, the calling
952 * function must increment the failure count.
957 /* Check correctable error flag */
958 if (ndsr & NDSR_CORERR) {
959 writel_relaxed(ndsr, nfc->regs + NDSR);
961 if (chip->ecc.algo == NAND_ECC_BCH)
962 bf = NDSR_ERRCNT(ndsr);
967 /* Update the stats and max_bitflips */
968 mtd->ecc_stats.corrected += bf;
969 *max_bitflips = max_t(unsigned int, *max_bitflips, bf);
974 /* Hamming read helpers */
975 static int marvell_nfc_hw_ecc_hmg_do_read_page(struct nand_chip *chip,
976 u8 *data_buf, u8 *oob_buf,
979 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
980 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
981 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
982 struct marvell_nfc_op nfc_op = {
983 .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
984 NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
986 NDCB0_CMD1(NAND_CMD_READ0) |
987 NDCB0_CMD2(NAND_CMD_READSTART),
988 .ndcb[1] = NDCB1_ADDRS_PAGE(page),
989 .ndcb[2] = NDCB2_ADDR5_PAGE(page),
991 unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
994 /* NFCv2 needs more information about the operation being executed */
995 if (nfc->caps->is_nfcv2)
996 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
998 ret = marvell_nfc_prepare_cmd(chip);
1002 marvell_nfc_send_cmd(chip, &nfc_op);
1003 ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
1004 "RDDREQ while draining FIFO (data/oob)");
1009 * Read the page then the OOB area. Unlike what is shown in current
1010 * documentation, spare bytes are protected by the ECC engine, and must
1011 * be at the beginning of the OOB area or running this driver on legacy
1012 * systems will prevent the discovery of the BBM/BBT.
1015 marvell_nfc_xfer_data_dma(nfc, DMA_FROM_DEVICE,
1016 lt->data_bytes + oob_bytes);
1017 memcpy(data_buf, nfc->dma_buf, lt->data_bytes);
1018 memcpy(oob_buf, nfc->dma_buf + lt->data_bytes, oob_bytes);
1020 marvell_nfc_xfer_data_in_pio(nfc, data_buf, lt->data_bytes);
1021 marvell_nfc_xfer_data_in_pio(nfc, oob_buf, oob_bytes);
1024 ret = marvell_nfc_wait_cmdd(chip);
1029 static int marvell_nfc_hw_ecc_hmg_read_page_raw(struct mtd_info *mtd,
1030 struct nand_chip *chip, u8 *buf,
1031 int oob_required, int page)
1033 return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi,
1037 static int marvell_nfc_hw_ecc_hmg_read_page(struct mtd_info *mtd,
1038 struct nand_chip *chip,
1039 u8 *buf, int oob_required,
1042 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1043 unsigned int full_sz = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
1044 int max_bitflips = 0, ret;
1047 marvell_nfc_enable_hw_ecc(chip);
1048 marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, false,
1050 ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
1051 marvell_nfc_disable_hw_ecc(chip);
1054 return max_bitflips;
1057 * When ECC failures are detected, check if the full page has been
1058 * written or not. Ignore the failure if it is actually empty.
1060 raw_buf = kmalloc(full_sz, GFP_KERNEL);
1064 marvell_nfc_hw_ecc_hmg_do_read_page(chip, raw_buf, raw_buf +
1065 lt->data_bytes, true, page);
1066 marvell_nfc_check_empty_chunk(chip, raw_buf, full_sz, NULL, 0, NULL, 0,
1070 return max_bitflips;
1074 * Spare area in Hamming layouts is not protected by the ECC engine (even if
1075 * it appears before the ECC bytes when reading), the ->read_oob_raw() function
1076 * also stands for ->read_oob().
1078 static int marvell_nfc_hw_ecc_hmg_read_oob_raw(struct mtd_info *mtd,
1079 struct nand_chip *chip, int page)
1081 /* Invalidate page cache */
1084 return marvell_nfc_hw_ecc_hmg_do_read_page(chip, chip->data_buf,
1085 chip->oob_poi, true, page);
1088 /* Hamming write helpers */
1089 static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
1091 const u8 *oob_buf, bool raw,
1094 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
1095 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1096 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1097 struct marvell_nfc_op nfc_op = {
1098 .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) |
1099 NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
1100 NDCB0_CMD1(NAND_CMD_SEQIN) |
1101 NDCB0_CMD2(NAND_CMD_PAGEPROG) |
1103 .ndcb[1] = NDCB1_ADDRS_PAGE(page),
1104 .ndcb[2] = NDCB2_ADDR5_PAGE(page),
1106 unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
1109 /* NFCv2 needs more information about the operation being executed */
1110 if (nfc->caps->is_nfcv2)
1111 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
1113 ret = marvell_nfc_prepare_cmd(chip);
1117 marvell_nfc_send_cmd(chip, &nfc_op);
1118 ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
1119 "WRDREQ while loading FIFO (data)");
1123 /* Write the page then the OOB area */
1125 memcpy(nfc->dma_buf, data_buf, lt->data_bytes);
1126 memcpy(nfc->dma_buf + lt->data_bytes, oob_buf, oob_bytes);
1127 marvell_nfc_xfer_data_dma(nfc, DMA_TO_DEVICE, lt->data_bytes +
1128 lt->ecc_bytes + lt->spare_bytes);
1130 marvell_nfc_xfer_data_out_pio(nfc, data_buf, lt->data_bytes);
1131 marvell_nfc_xfer_data_out_pio(nfc, oob_buf, oob_bytes);
1134 ret = marvell_nfc_wait_cmdd(chip);
1138 ret = marvell_nfc_wait_op(chip,
1139 PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
1143 static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct mtd_info *mtd,
1144 struct nand_chip *chip,
1146 int oob_required, int page)
1148 return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
1152 static int marvell_nfc_hw_ecc_hmg_write_page(struct mtd_info *mtd,
1153 struct nand_chip *chip,
1155 int oob_required, int page)
1159 marvell_nfc_enable_hw_ecc(chip);
1160 ret = marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
1162 marvell_nfc_disable_hw_ecc(chip);
1168 * Spare area in Hamming layouts is not protected by the ECC engine (even if
1169 * it appears before the ECC bytes when reading), the ->write_oob_raw() function
1170 * also stands for ->write_oob().
1172 static int marvell_nfc_hw_ecc_hmg_write_oob_raw(struct mtd_info *mtd,
1173 struct nand_chip *chip,
1176 /* Invalidate page cache */
1179 memset(chip->data_buf, 0xFF, mtd->writesize);
1181 return marvell_nfc_hw_ecc_hmg_do_write_page(chip, chip->data_buf,
1182 chip->oob_poi, true, page);
1185 /* BCH read helpers */
1186 static int marvell_nfc_hw_ecc_bch_read_page_raw(struct mtd_info *mtd,
1187 struct nand_chip *chip, u8 *buf,
1188 int oob_required, int page)
1190 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1191 u8 *oob = chip->oob_poi;
1192 int chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
1193 int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
1194 lt->last_spare_bytes;
1195 int data_len = lt->data_bytes;
1196 int spare_len = lt->spare_bytes;
1197 int ecc_len = lt->ecc_bytes;
1201 memset(chip->oob_poi, 0xFF, mtd->oobsize);
1203 nand_read_page_op(chip, page, 0, NULL, 0);
1205 for (chunk = 0; chunk < lt->nchunks; chunk++) {
1206 /* Update last chunk length */
1207 if (chunk >= lt->full_chunk_cnt) {
1208 data_len = lt->last_data_bytes;
1209 spare_len = lt->last_spare_bytes;
1210 ecc_len = lt->last_ecc_bytes;
1213 /* Read data bytes*/
1214 nand_change_read_column_op(chip, chunk * chunk_size,
1215 buf + (lt->data_bytes * chunk),
1218 /* Read spare bytes */
1219 nand_read_data_op(chip, oob + (lt->spare_bytes * chunk),
1222 /* Read ECC bytes */
1223 nand_read_data_op(chip, oob + ecc_offset +
1224 (ALIGN(lt->ecc_bytes, 32) * chunk),
1231 static void marvell_nfc_hw_ecc_bch_read_chunk(struct nand_chip *chip, int chunk,
1232 u8 *data, unsigned int data_len,
1233 u8 *spare, unsigned int spare_len,
1236 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
1237 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1238 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1240 struct marvell_nfc_op nfc_op = {
1241 .ndcb[0] = NDCB0_CMD_TYPE(TYPE_READ) |
1242 NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
1244 .ndcb[1] = NDCB1_ADDRS_PAGE(page),
1245 .ndcb[2] = NDCB2_ADDR5_PAGE(page),
1246 .ndcb[3] = data_len + spare_len,
1249 ret = marvell_nfc_prepare_cmd(chip);
1254 nfc_op.ndcb[0] |= NDCB0_DBC |
1255 NDCB0_CMD1(NAND_CMD_READ0) |
1256 NDCB0_CMD2(NAND_CMD_READSTART);
1259 * Trigger the monolithic read on the first chunk, then naked read on
1260 * intermediate chunks and finally a last naked read on the last chunk.
1263 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
1264 else if (chunk < lt->nchunks - 1)
1265 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
1267 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
1269 marvell_nfc_send_cmd(chip, &nfc_op);
1272 * According to the datasheet, when reading from NDDB
1273 * with BCH enabled, after each 32 bytes reads, we
1274 * have to make sure that the NDSR.RDDREQ bit is set.
1276 * Drain the FIFO, 8 32-bit reads at a time, and skip
1277 * the polling on the last read.
1279 * Length is a multiple of 32 bytes, hence it is a multiple of 8 too.
1281 for (i = 0; i < data_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
1282 marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
1283 "RDDREQ while draining FIFO (data)");
1284 marvell_nfc_xfer_data_in_pio(nfc, data,
1285 FIFO_DEPTH * BCH_SEQ_READS);
1286 data += FIFO_DEPTH * BCH_SEQ_READS;
1289 for (i = 0; i < spare_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
1290 marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
1291 "RDDREQ while draining FIFO (OOB)");
1292 marvell_nfc_xfer_data_in_pio(nfc, spare,
1293 FIFO_DEPTH * BCH_SEQ_READS);
1294 spare += FIFO_DEPTH * BCH_SEQ_READS;
1298 static int marvell_nfc_hw_ecc_bch_read_page(struct mtd_info *mtd,
1299 struct nand_chip *chip,
1300 u8 *buf, int oob_required,
1303 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1304 int data_len = lt->data_bytes, spare_len = lt->spare_bytes, ecc_len;
1305 u8 *data = buf, *spare = chip->oob_poi, *ecc;
1306 int max_bitflips = 0;
1307 u32 failure_mask = 0;
1308 int chunk, ecc_offset_in_page, ret;
1311 * With BCH, OOB is not fully used (and thus not read entirely), not
1312 * expected bytes could show up at the end of the OOB buffer if not
1313 * explicitly erased.
1316 memset(chip->oob_poi, 0xFF, mtd->oobsize);
1318 marvell_nfc_enable_hw_ecc(chip);
1320 for (chunk = 0; chunk < lt->nchunks; chunk++) {
1321 /* Update length for the last chunk */
1322 if (chunk >= lt->full_chunk_cnt) {
1323 data_len = lt->last_data_bytes;
1324 spare_len = lt->last_spare_bytes;
1327 /* Read the chunk and detect number of bitflips */
1328 marvell_nfc_hw_ecc_bch_read_chunk(chip, chunk, data, data_len,
1329 spare, spare_len, page);
1330 ret = marvell_nfc_hw_ecc_correct(chip, &max_bitflips);
1332 failure_mask |= BIT(chunk);
1338 marvell_nfc_disable_hw_ecc(chip);
1341 return max_bitflips;
1344 * Please note that dumping the ECC bytes during a normal read with OOB
1345 * area would add a significant overhead as ECC bytes are "consumed" by
1346 * the controller in normal mode and must be re-read in raw mode. To
1347 * avoid dropping the performances, we prefer not to include them. The
1348 * user should re-read the page in raw mode if ECC bytes are required.
1350 * However, for any subpage read error reported by ->correct(), the ECC
1351 * bytes must be read in raw mode and the full subpage must be checked
1352 * to see if it is entirely empty of if there was an actual error.
1354 for (chunk = 0; chunk < lt->nchunks; chunk++) {
1355 /* No failure reported for this chunk, move to the next one */
1356 if (!(failure_mask & BIT(chunk)))
1359 /* Derive ECC bytes positions (in page/buffer) and length */
1360 ecc = chip->oob_poi +
1361 (lt->full_chunk_cnt * lt->spare_bytes) +
1362 lt->last_spare_bytes +
1363 (chunk * ALIGN(lt->ecc_bytes, 32));
1364 ecc_offset_in_page =
1365 (chunk * (lt->data_bytes + lt->spare_bytes +
1367 (chunk < lt->full_chunk_cnt ?
1368 lt->data_bytes + lt->spare_bytes :
1369 lt->last_data_bytes + lt->last_spare_bytes);
1370 ecc_len = chunk < lt->full_chunk_cnt ?
1371 lt->ecc_bytes : lt->last_ecc_bytes;
1373 /* Do the actual raw read of the ECC bytes */
1374 nand_change_read_column_op(chip, ecc_offset_in_page,
1375 ecc, ecc_len, false);
1377 /* Derive data/spare bytes positions (in buffer) and length */
1378 data = buf + (chunk * lt->data_bytes);
1379 data_len = chunk < lt->full_chunk_cnt ?
1380 lt->data_bytes : lt->last_data_bytes;
1381 spare = chip->oob_poi + (chunk * (lt->spare_bytes +
1383 spare_len = chunk < lt->full_chunk_cnt ?
1384 lt->spare_bytes : lt->last_spare_bytes;
1386 /* Check the entire chunk (data + spare + ecc) for emptyness */
1387 marvell_nfc_check_empty_chunk(chip, data, data_len, spare,
1388 spare_len, ecc, ecc_len,
1392 return max_bitflips;
1395 static int marvell_nfc_hw_ecc_bch_read_oob_raw(struct mtd_info *mtd,
1396 struct nand_chip *chip, int page)
1398 /* Invalidate page cache */
1401 return chip->ecc.read_page_raw(mtd, chip, chip->data_buf, true, page);
1404 static int marvell_nfc_hw_ecc_bch_read_oob(struct mtd_info *mtd,
1405 struct nand_chip *chip, int page)
1407 /* Invalidate page cache */
1410 return chip->ecc.read_page(mtd, chip, chip->data_buf, true, page);
1413 /* BCH write helpers */
1414 static int marvell_nfc_hw_ecc_bch_write_page_raw(struct mtd_info *mtd,
1415 struct nand_chip *chip,
1417 int oob_required, int page)
1419 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1420 int full_chunk_size = lt->data_bytes + lt->spare_bytes + lt->ecc_bytes;
1421 int data_len = lt->data_bytes;
1422 int spare_len = lt->spare_bytes;
1423 int ecc_len = lt->ecc_bytes;
1424 int spare_offset = 0;
1425 int ecc_offset = (lt->full_chunk_cnt * lt->spare_bytes) +
1426 lt->last_spare_bytes;
1429 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1431 for (chunk = 0; chunk < lt->nchunks; chunk++) {
1432 if (chunk >= lt->full_chunk_cnt) {
1433 data_len = lt->last_data_bytes;
1434 spare_len = lt->last_spare_bytes;
1435 ecc_len = lt->last_ecc_bytes;
1438 /* Point to the column of the next chunk */
1439 nand_change_write_column_op(chip, chunk * full_chunk_size,
1442 /* Write the data */
1443 nand_write_data_op(chip, buf + (chunk * lt->data_bytes),
1449 /* Write the spare bytes */
1451 nand_write_data_op(chip, chip->oob_poi + spare_offset,
1454 /* Write the ECC bytes */
1456 nand_write_data_op(chip, chip->oob_poi + ecc_offset,
1459 spare_offset += spare_len;
1460 ecc_offset += ALIGN(ecc_len, 32);
1463 return nand_prog_page_end_op(chip);
1467 marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
1468 const u8 *data, unsigned int data_len,
1469 const u8 *spare, unsigned int spare_len,
1472 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
1473 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1474 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1477 struct marvell_nfc_op nfc_op = {
1478 .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD,
1479 .ndcb[3] = data_len + spare_len,
1483 * First operation dispatches the CMD_SEQIN command, issue the address
1484 * cycles and asks for the first chunk of data.
1485 * All operations in the middle (if any) will issue a naked write and
1486 * also ask for data.
1487 * Last operation (if any) asks for the last chunk of data through a
1491 if (lt->nchunks == 1)
1492 xtype = XTYPE_MONOLITHIC_RW;
1494 xtype = XTYPE_WRITE_DISPATCH;
1496 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(xtype) |
1497 NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
1498 NDCB0_CMD1(NAND_CMD_SEQIN);
1499 nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page);
1500 nfc_op.ndcb[2] |= NDCB2_ADDR5_PAGE(page);
1501 } else if (chunk < lt->nchunks - 1) {
1502 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW);
1504 nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
1507 /* Always dispatch the PAGEPROG command on the last chunk */
1508 if (chunk == lt->nchunks - 1)
1509 nfc_op.ndcb[0] |= NDCB0_CMD2(NAND_CMD_PAGEPROG) | NDCB0_DBC;
1511 ret = marvell_nfc_prepare_cmd(chip);
1515 marvell_nfc_send_cmd(chip, &nfc_op);
1516 ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ,
1517 "WRDREQ while loading FIFO (data)");
1521 /* Transfer the contents */
1522 iowrite32_rep(nfc->regs + NDDB, data, FIFO_REP(data_len));
1523 iowrite32_rep(nfc->regs + NDDB, spare, FIFO_REP(spare_len));
1528 static int marvell_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd,
1529 struct nand_chip *chip,
1531 int oob_required, int page)
1533 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
1534 const u8 *data = buf;
1535 const u8 *spare = chip->oob_poi;
1536 int data_len = lt->data_bytes;
1537 int spare_len = lt->spare_bytes;
1540 /* Spare data will be written anyway, so clear it to avoid garbage */
1542 memset(chip->oob_poi, 0xFF, mtd->oobsize);
1544 marvell_nfc_enable_hw_ecc(chip);
1546 for (chunk = 0; chunk < lt->nchunks; chunk++) {
1547 if (chunk >= lt->full_chunk_cnt) {
1548 data_len = lt->last_data_bytes;
1549 spare_len = lt->last_spare_bytes;
1552 marvell_nfc_hw_ecc_bch_write_chunk(chip, chunk, data, data_len,
1553 spare, spare_len, page);
1558 * Waiting only for CMDD or PAGED is not enough, ECC are
1559 * partially written. No flag is set once the operation is
1560 * really finished but the ND_RUN bit is cleared, so wait for it
1561 * before stepping into the next command.
1563 marvell_nfc_wait_ndrun(chip);
1566 ret = marvell_nfc_wait_op(chip,
1567 PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
1569 marvell_nfc_disable_hw_ecc(chip);
1577 static int marvell_nfc_hw_ecc_bch_write_oob_raw(struct mtd_info *mtd,
1578 struct nand_chip *chip,
1581 /* Invalidate page cache */
1584 memset(chip->data_buf, 0xFF, mtd->writesize);
1586 return chip->ecc.write_page_raw(mtd, chip, chip->data_buf, true, page);
1589 static int marvell_nfc_hw_ecc_bch_write_oob(struct mtd_info *mtd,
1590 struct nand_chip *chip, int page)
1592 /* Invalidate page cache */
1595 memset(chip->data_buf, 0xFF, mtd->writesize);
1597 return chip->ecc.write_page(mtd, chip, chip->data_buf, true, page);
1600 /* NAND framework ->exec_op() hooks and related helpers */
1601 static void marvell_nfc_parse_instructions(struct nand_chip *chip,
1602 const struct nand_subop *subop,
1603 struct marvell_nfc_op *nfc_op)
1605 const struct nand_op_instr *instr = NULL;
1606 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1607 bool first_cmd = true;
1611 /* Reset the input structure as most of its fields will be OR'ed */
1612 memset(nfc_op, 0, sizeof(struct marvell_nfc_op));
1614 for (op_id = 0; op_id < subop->ninstrs; op_id++) {
1615 unsigned int offset, naddrs;
1617 int len = nand_subop_get_data_len(subop, op_id);
1619 instr = &subop->instrs[op_id];
1621 switch (instr->type) {
1622 case NAND_OP_CMD_INSTR:
1625 NDCB0_CMD1(instr->ctx.cmd.opcode);
1628 NDCB0_CMD2(instr->ctx.cmd.opcode) |
1631 nfc_op->cle_ale_delay_ns = instr->delay_ns;
1635 case NAND_OP_ADDR_INSTR:
1636 offset = nand_subop_get_addr_start_off(subop, op_id);
1637 naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
1638 addrs = &instr->ctx.addr.addrs[offset];
1640 nfc_op->ndcb[0] |= NDCB0_ADDR_CYC(naddrs);
1642 for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
1643 nfc_op->ndcb[1] |= addrs[i] << (8 * i);
1646 nfc_op->ndcb[2] |= NDCB2_ADDR5_CYC(addrs[4]);
1648 nfc_op->ndcb[3] |= NDCB3_ADDR6_CYC(addrs[5]);
1650 nfc_op->ndcb[3] |= NDCB3_ADDR7_CYC(addrs[6]);
1652 nfc_op->cle_ale_delay_ns = instr->delay_ns;
1655 case NAND_OP_DATA_IN_INSTR:
1656 nfc_op->data_instr = instr;
1657 nfc_op->data_instr_idx = op_id;
1658 nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ);
1659 if (nfc->caps->is_nfcv2) {
1661 NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
1663 nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
1665 nfc_op->data_delay_ns = instr->delay_ns;
1668 case NAND_OP_DATA_OUT_INSTR:
1669 nfc_op->data_instr = instr;
1670 nfc_op->data_instr_idx = op_id;
1671 nfc_op->ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE);
1672 if (nfc->caps->is_nfcv2) {
1674 NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
1676 nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
1678 nfc_op->data_delay_ns = instr->delay_ns;
1681 case NAND_OP_WAITRDY_INSTR:
1682 nfc_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
1683 nfc_op->rdy_delay_ns = instr->delay_ns;
1689 static int marvell_nfc_xfer_data_pio(struct nand_chip *chip,
1690 const struct nand_subop *subop,
1691 struct marvell_nfc_op *nfc_op)
1693 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1694 const struct nand_op_instr *instr = nfc_op->data_instr;
1695 unsigned int op_id = nfc_op->data_instr_idx;
1696 unsigned int len = nand_subop_get_data_len(subop, op_id);
1697 unsigned int offset = nand_subop_get_data_start_off(subop, op_id);
1698 bool reading = (instr->type == NAND_OP_DATA_IN_INSTR);
1701 if (instr->ctx.data.force_8bit)
1702 marvell_nfc_force_byte_access(chip, true);
1705 u8 *in = instr->ctx.data.buf.in + offset;
1707 ret = marvell_nfc_xfer_data_in_pio(nfc, in, len);
1709 const u8 *out = instr->ctx.data.buf.out + offset;
1711 ret = marvell_nfc_xfer_data_out_pio(nfc, out, len);
1714 if (instr->ctx.data.force_8bit)
1715 marvell_nfc_force_byte_access(chip, false);
1720 static int marvell_nfc_monolithic_access_exec(struct nand_chip *chip,
1721 const struct nand_subop *subop)
1723 struct marvell_nfc_op nfc_op;
1727 marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1728 reading = (nfc_op.data_instr->type == NAND_OP_DATA_IN_INSTR);
1730 ret = marvell_nfc_prepare_cmd(chip);
1734 marvell_nfc_send_cmd(chip, &nfc_op);
1735 ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
1736 "RDDREQ/WRDREQ while draining raw data");
1740 cond_delay(nfc_op.cle_ale_delay_ns);
1743 if (nfc_op.rdy_timeout_ms) {
1744 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1749 cond_delay(nfc_op.rdy_delay_ns);
1752 marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
1753 ret = marvell_nfc_wait_cmdd(chip);
1757 cond_delay(nfc_op.data_delay_ns);
1760 if (nfc_op.rdy_timeout_ms) {
1761 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1766 cond_delay(nfc_op.rdy_delay_ns);
1770 * NDCR ND_RUN bit should be cleared automatically at the end of each
1771 * operation but experience shows that the behavior is buggy when it
1772 * comes to writes (with LEN_OVRD). Clear it by hand in this case.
1775 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1777 writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
1784 static int marvell_nfc_naked_access_exec(struct nand_chip *chip,
1785 const struct nand_subop *subop)
1787 struct marvell_nfc_op nfc_op;
1790 marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1793 * Naked access are different in that they need to be flagged as naked
1794 * by the controller. Reset the controller registers fields that inform
1795 * on the type and refill them according to the ongoing operation.
1797 nfc_op.ndcb[0] &= ~(NDCB0_CMD_TYPE(TYPE_MASK) |
1798 NDCB0_CMD_XTYPE(XTYPE_MASK));
1799 switch (subop->instrs[0].type) {
1800 case NAND_OP_CMD_INSTR:
1801 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_CMD);
1803 case NAND_OP_ADDR_INSTR:
1804 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_ADDR);
1806 case NAND_OP_DATA_IN_INSTR:
1807 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ) |
1808 NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
1810 case NAND_OP_DATA_OUT_INSTR:
1811 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE) |
1812 NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
1815 /* This should never happen */
1819 ret = marvell_nfc_prepare_cmd(chip);
1823 marvell_nfc_send_cmd(chip, &nfc_op);
1825 if (!nfc_op.data_instr) {
1826 ret = marvell_nfc_wait_cmdd(chip);
1827 cond_delay(nfc_op.cle_ale_delay_ns);
1831 ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ,
1832 "RDDREQ/WRDREQ while draining raw data");
1836 marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
1837 ret = marvell_nfc_wait_cmdd(chip);
1842 * NDCR ND_RUN bit should be cleared automatically at the end of each
1843 * operation but experience shows that the behavior is buggy when it
1844 * comes to writes (with LEN_OVRD). Clear it by hand in this case.
1846 if (subop->instrs[0].type == NAND_OP_DATA_OUT_INSTR) {
1847 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
1849 writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
1856 static int marvell_nfc_naked_waitrdy_exec(struct nand_chip *chip,
1857 const struct nand_subop *subop)
1859 struct marvell_nfc_op nfc_op;
1862 marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1864 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1865 cond_delay(nfc_op.rdy_delay_ns);
1870 static int marvell_nfc_read_id_type_exec(struct nand_chip *chip,
1871 const struct nand_subop *subop)
1873 struct marvell_nfc_op nfc_op;
1876 marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1877 nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
1878 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ_ID);
1880 ret = marvell_nfc_prepare_cmd(chip);
1884 marvell_nfc_send_cmd(chip, &nfc_op);
1885 ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
1886 "RDDREQ while reading ID");
1890 cond_delay(nfc_op.cle_ale_delay_ns);
1892 if (nfc_op.rdy_timeout_ms) {
1893 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1898 cond_delay(nfc_op.rdy_delay_ns);
1900 marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
1901 ret = marvell_nfc_wait_cmdd(chip);
1905 cond_delay(nfc_op.data_delay_ns);
1910 static int marvell_nfc_read_status_exec(struct nand_chip *chip,
1911 const struct nand_subop *subop)
1913 struct marvell_nfc_op nfc_op;
1916 marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1917 nfc_op.ndcb[0] &= ~NDCB0_CMD_TYPE(TYPE_READ);
1918 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_STATUS);
1920 ret = marvell_nfc_prepare_cmd(chip);
1924 marvell_nfc_send_cmd(chip, &nfc_op);
1925 ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ,
1926 "RDDREQ while reading status");
1930 cond_delay(nfc_op.cle_ale_delay_ns);
1932 if (nfc_op.rdy_timeout_ms) {
1933 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1938 cond_delay(nfc_op.rdy_delay_ns);
1940 marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
1941 ret = marvell_nfc_wait_cmdd(chip);
1945 cond_delay(nfc_op.data_delay_ns);
1950 static int marvell_nfc_reset_cmd_type_exec(struct nand_chip *chip,
1951 const struct nand_subop *subop)
1953 struct marvell_nfc_op nfc_op;
1956 marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1957 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_RESET);
1959 ret = marvell_nfc_prepare_cmd(chip);
1963 marvell_nfc_send_cmd(chip, &nfc_op);
1964 ret = marvell_nfc_wait_cmdd(chip);
1968 cond_delay(nfc_op.cle_ale_delay_ns);
1970 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
1974 cond_delay(nfc_op.rdy_delay_ns);
1979 static int marvell_nfc_erase_cmd_type_exec(struct nand_chip *chip,
1980 const struct nand_subop *subop)
1982 struct marvell_nfc_op nfc_op;
1985 marvell_nfc_parse_instructions(chip, subop, &nfc_op);
1986 nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_ERASE);
1988 ret = marvell_nfc_prepare_cmd(chip);
1992 marvell_nfc_send_cmd(chip, &nfc_op);
1993 ret = marvell_nfc_wait_cmdd(chip);
1997 cond_delay(nfc_op.cle_ale_delay_ns);
1999 ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms);
2003 cond_delay(nfc_op.rdy_delay_ns);
2008 static const struct nand_op_parser marvell_nfcv2_op_parser = NAND_OP_PARSER(
2009 /* Monolithic reads/writes */
2010 NAND_OP_PARSER_PATTERN(
2011 marvell_nfc_monolithic_access_exec,
2012 NAND_OP_PARSER_PAT_CMD_ELEM(false),
2013 NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYC_NFCV2),
2014 NAND_OP_PARSER_PAT_CMD_ELEM(true),
2015 NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
2016 NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
2017 NAND_OP_PARSER_PATTERN(
2018 marvell_nfc_monolithic_access_exec,
2019 NAND_OP_PARSER_PAT_CMD_ELEM(false),
2020 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2),
2021 NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE),
2022 NAND_OP_PARSER_PAT_CMD_ELEM(true),
2023 NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)),
2024 /* Naked commands */
2025 NAND_OP_PARSER_PATTERN(
2026 marvell_nfc_naked_access_exec,
2027 NAND_OP_PARSER_PAT_CMD_ELEM(false)),
2028 NAND_OP_PARSER_PATTERN(
2029 marvell_nfc_naked_access_exec,
2030 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV2)),
2031 NAND_OP_PARSER_PATTERN(
2032 marvell_nfc_naked_access_exec,
2033 NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_CHUNK_SIZE)),
2034 NAND_OP_PARSER_PATTERN(
2035 marvell_nfc_naked_access_exec,
2036 NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_CHUNK_SIZE)),
2037 NAND_OP_PARSER_PATTERN(
2038 marvell_nfc_naked_waitrdy_exec,
2039 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
2042 static const struct nand_op_parser marvell_nfcv1_op_parser = NAND_OP_PARSER(
2043 /* Naked commands not supported, use a function for each pattern */
2044 NAND_OP_PARSER_PATTERN(
2045 marvell_nfc_read_id_type_exec,
2046 NAND_OP_PARSER_PAT_CMD_ELEM(false),
2047 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
2048 NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
2049 NAND_OP_PARSER_PATTERN(
2050 marvell_nfc_erase_cmd_type_exec,
2051 NAND_OP_PARSER_PAT_CMD_ELEM(false),
2052 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC_NFCV1),
2053 NAND_OP_PARSER_PAT_CMD_ELEM(false),
2054 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
2055 NAND_OP_PARSER_PATTERN(
2056 marvell_nfc_read_status_exec,
2057 NAND_OP_PARSER_PAT_CMD_ELEM(false),
2058 NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
2059 NAND_OP_PARSER_PATTERN(
2060 marvell_nfc_reset_cmd_type_exec,
2061 NAND_OP_PARSER_PAT_CMD_ELEM(false),
2062 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
2063 NAND_OP_PARSER_PATTERN(
2064 marvell_nfc_naked_waitrdy_exec,
2065 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
2068 static int marvell_nfc_exec_op(struct nand_chip *chip,
2069 const struct nand_operation *op,
2072 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
2074 if (nfc->caps->is_nfcv2)
2075 return nand_op_parser_exec_op(chip, &marvell_nfcv2_op_parser,
2078 return nand_op_parser_exec_op(chip, &marvell_nfcv1_op_parser,
2083 * Layouts were broken in old pxa3xx_nand driver, these are supposed to be
2086 static int marvell_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2087 struct mtd_oob_region *oobregion)
2089 struct nand_chip *chip = mtd_to_nand(mtd);
2090 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
2095 oobregion->length = (lt->full_chunk_cnt * lt->ecc_bytes) +
2097 oobregion->offset = mtd->oobsize - oobregion->length;
2102 static int marvell_nand_ooblayout_free(struct mtd_info *mtd, int section,
2103 struct mtd_oob_region *oobregion)
2105 struct nand_chip *chip = mtd_to_nand(mtd);
2106 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
2112 * Bootrom looks in bytes 0 & 5 for bad blocks for the
2113 * 4KB page / 4bit BCH combination.
2115 if (mtd->writesize == SZ_4K && lt->data_bytes == SZ_2K)
2116 oobregion->offset = 6;
2118 oobregion->offset = 2;
2120 oobregion->length = (lt->full_chunk_cnt * lt->spare_bytes) +
2121 lt->last_spare_bytes - oobregion->offset;
2126 static const struct mtd_ooblayout_ops marvell_nand_ooblayout_ops = {
2127 .ecc = marvell_nand_ooblayout_ecc,
2128 .free = marvell_nand_ooblayout_free,
2131 static int marvell_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
2132 struct nand_ecc_ctrl *ecc)
2134 struct nand_chip *chip = mtd_to_nand(mtd);
2135 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
2136 const struct marvell_hw_ecc_layout *l;
2139 if (!nfc->caps->is_nfcv2 &&
2140 (mtd->writesize + mtd->oobsize > MAX_CHUNK_SIZE)) {
2142 "NFCv1: writesize (%d) cannot be bigger than a chunk (%d)\n",
2143 mtd->writesize, MAX_CHUNK_SIZE - mtd->oobsize);
2147 to_marvell_nand(chip)->layout = NULL;
2148 for (i = 0; i < ARRAY_SIZE(marvell_nfc_layouts); i++) {
2149 l = &marvell_nfc_layouts[i];
2150 if (mtd->writesize == l->writesize &&
2151 ecc->size == l->chunk && ecc->strength == l->strength) {
2152 to_marvell_nand(chip)->layout = l;
2157 if (!to_marvell_nand(chip)->layout ||
2158 (!nfc->caps->is_nfcv2 && ecc->strength > 1)) {
2160 "ECC strength %d at page size %d is not supported\n",
2161 ecc->strength, mtd->writesize);
2165 mtd_set_ooblayout(mtd, &marvell_nand_ooblayout_ops);
2166 ecc->steps = l->nchunks;
2167 ecc->size = l->data_bytes;
2169 if (ecc->strength == 1) {
2170 chip->ecc.algo = NAND_ECC_HAMMING;
2171 ecc->read_page_raw = marvell_nfc_hw_ecc_hmg_read_page_raw;
2172 ecc->read_page = marvell_nfc_hw_ecc_hmg_read_page;
2173 ecc->read_oob_raw = marvell_nfc_hw_ecc_hmg_read_oob_raw;
2174 ecc->read_oob = ecc->read_oob_raw;
2175 ecc->write_page_raw = marvell_nfc_hw_ecc_hmg_write_page_raw;
2176 ecc->write_page = marvell_nfc_hw_ecc_hmg_write_page;
2177 ecc->write_oob_raw = marvell_nfc_hw_ecc_hmg_write_oob_raw;
2178 ecc->write_oob = ecc->write_oob_raw;
2180 chip->ecc.algo = NAND_ECC_BCH;
2182 ecc->read_page_raw = marvell_nfc_hw_ecc_bch_read_page_raw;
2183 ecc->read_page = marvell_nfc_hw_ecc_bch_read_page;
2184 ecc->read_oob_raw = marvell_nfc_hw_ecc_bch_read_oob_raw;
2185 ecc->read_oob = marvell_nfc_hw_ecc_bch_read_oob;
2186 ecc->write_page_raw = marvell_nfc_hw_ecc_bch_write_page_raw;
2187 ecc->write_page = marvell_nfc_hw_ecc_bch_write_page;
2188 ecc->write_oob_raw = marvell_nfc_hw_ecc_bch_write_oob_raw;
2189 ecc->write_oob = marvell_nfc_hw_ecc_bch_write_oob;
2195 static int marvell_nand_ecc_init(struct mtd_info *mtd,
2196 struct nand_ecc_ctrl *ecc)
2198 struct nand_chip *chip = mtd_to_nand(mtd);
2199 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
2202 if (ecc->mode != NAND_ECC_NONE && (!ecc->size || !ecc->strength)) {
2203 if (chip->ecc_step_ds && chip->ecc_strength_ds) {
2204 ecc->size = chip->ecc_step_ds;
2205 ecc->strength = chip->ecc_strength_ds;
2208 "No minimum ECC strength, using 1b/512B\n");
2214 switch (ecc->mode) {
2216 ret = marvell_nand_hw_ecc_ctrl_init(mtd, ecc);
2222 case NAND_ECC_ON_DIE:
2223 if (!nfc->caps->is_nfcv2 && mtd->writesize != SZ_512 &&
2224 mtd->writesize != SZ_2K) {
2225 dev_err(nfc->dev, "NFCv1 cannot write %d bytes pages\n",
2237 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
2238 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
2240 static struct nand_bbt_descr bbt_main_descr = {
2241 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
2242 NAND_BBT_2BIT | NAND_BBT_VERSION,
2246 .maxblocks = 8, /* Last 8 blocks in each chip */
2247 .pattern = bbt_pattern
2250 static struct nand_bbt_descr bbt_mirror_descr = {
2251 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
2252 NAND_BBT_2BIT | NAND_BBT_VERSION,
2256 .maxblocks = 8, /* Last 8 blocks in each chip */
2257 .pattern = bbt_mirror_pattern
2260 static int marvell_nfc_setup_data_interface(struct mtd_info *mtd, int chipnr,
2261 const struct nand_data_interface
2264 struct nand_chip *chip = mtd_to_nand(mtd);
2265 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
2266 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
2267 unsigned int period_ns = 1000000000 / clk_get_rate(nfc->core_clk) * 2;
2268 const struct nand_sdr_timings *sdr;
2269 struct marvell_nfc_timings nfc_tmg;
2272 sdr = nand_get_sdr_timings(conf);
2274 return PTR_ERR(sdr);
2277 * SDR timings are given in pico-seconds while NFC timings must be
2278 * expressed in NAND controller clock cycles, which is half of the
2279 * frequency of the accessible ECC clock retrieved by clk_get_rate().
2280 * This is not written anywhere in the datasheet but was observed
2281 * with an oscilloscope.
2283 * NFC datasheet gives equations from which thoses calculations
2284 * are derived, they tend to be slightly more restrictives than the
2285 * given core timings and may improve the overall speed.
2287 nfc_tmg.tRP = TO_CYCLES(DIV_ROUND_UP(sdr->tRC_min, 2), period_ns) - 1;
2288 nfc_tmg.tRH = nfc_tmg.tRP;
2289 nfc_tmg.tWP = TO_CYCLES(DIV_ROUND_UP(sdr->tWC_min, 2), period_ns) - 1;
2290 nfc_tmg.tWH = nfc_tmg.tWP;
2291 nfc_tmg.tCS = TO_CYCLES(sdr->tCS_min, period_ns);
2292 nfc_tmg.tCH = TO_CYCLES(sdr->tCH_min, period_ns) - 1;
2293 nfc_tmg.tADL = TO_CYCLES(sdr->tADL_min, period_ns);
2295 * Read delay is the time of propagation from SoC pins to NFC internal
2296 * logic. With non-EDO timings, this is MIN_RD_DEL_CNT clock cycles. In
2297 * EDO mode, an additional delay of tRH must be taken into account so
2298 * the data is sampled on the falling edge instead of the rising edge.
2300 read_delay = sdr->tRC_min >= 30000 ?
2301 MIN_RD_DEL_CNT : MIN_RD_DEL_CNT + nfc_tmg.tRH;
2303 nfc_tmg.tAR = TO_CYCLES(sdr->tAR_min, period_ns);
2305 * tWHR and tRHW are supposed to be read to write delays (and vice
2306 * versa) but in some cases, ie. when doing a change column, they must
2307 * be greater than that to be sure tCCS delay is respected.
2309 nfc_tmg.tWHR = TO_CYCLES(max_t(int, sdr->tWHR_min, sdr->tCCS_min),
2311 nfc_tmg.tRHW = TO_CYCLES(max_t(int, sdr->tRHW_min, sdr->tCCS_min),
2315 * NFCv2: Use WAIT_MODE (wait for RB line), do not rely only on delays.
2316 * NFCv1: No WAIT_MODE, tR must be maximal.
2318 if (nfc->caps->is_nfcv2) {
2319 nfc_tmg.tR = TO_CYCLES(sdr->tWB_max, period_ns);
2321 nfc_tmg.tR = TO_CYCLES64(sdr->tWB_max + sdr->tR_max,
2323 if (nfc_tmg.tR + 3 > nfc_tmg.tCH)
2324 nfc_tmg.tR = nfc_tmg.tCH - 3;
2332 marvell_nand->ndtr0 =
2333 NDTR0_TRP(nfc_tmg.tRP) |
2334 NDTR0_TRH(nfc_tmg.tRH) |
2335 NDTR0_ETRP(nfc_tmg.tRP) |
2336 NDTR0_TWP(nfc_tmg.tWP) |
2337 NDTR0_TWH(nfc_tmg.tWH) |
2338 NDTR0_TCS(nfc_tmg.tCS) |
2339 NDTR0_TCH(nfc_tmg.tCH);
2341 marvell_nand->ndtr1 =
2342 NDTR1_TAR(nfc_tmg.tAR) |
2343 NDTR1_TWHR(nfc_tmg.tWHR) |
2344 NDTR1_TR(nfc_tmg.tR);
2346 if (nfc->caps->is_nfcv2) {
2347 marvell_nand->ndtr0 |=
2348 NDTR0_RD_CNT_DEL(read_delay) |
2350 NDTR0_TADL(nfc_tmg.tADL);
2352 marvell_nand->ndtr1 |=
2353 NDTR1_TRHW(nfc_tmg.tRHW) |
2360 static int marvell_nand_attach_chip(struct nand_chip *chip)
2362 struct mtd_info *mtd = nand_to_mtd(chip);
2363 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
2364 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
2365 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(nfc->dev);
2368 if (pdata && pdata->flash_bbt)
2369 chip->bbt_options |= NAND_BBT_USE_FLASH;
2371 if (chip->bbt_options & NAND_BBT_USE_FLASH) {
2373 * We'll use a bad block table stored in-flash and don't
2374 * allow writing the bad block marker to the flash.
2376 chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
2377 chip->bbt_td = &bbt_main_descr;
2378 chip->bbt_md = &bbt_mirror_descr;
2381 /* Save the chip-specific fields of NDCR */
2382 marvell_nand->ndcr = NDCR_PAGE_SZ(mtd->writesize);
2383 if (chip->options & NAND_BUSWIDTH_16)
2384 marvell_nand->ndcr |= NDCR_DWIDTH_M | NDCR_DWIDTH_C;
2387 * On small page NANDs, only one cycle is needed to pass the
2390 if (mtd->writesize <= 512) {
2391 marvell_nand->addr_cyc = 1;
2393 marvell_nand->addr_cyc = 2;
2394 marvell_nand->ndcr |= NDCR_RA_START;
2398 * Now add the number of cycles needed to pass the row
2401 * Addressing a chip using CS 2 or 3 should also need the third row
2402 * cycle but due to inconsistance in the documentation and lack of
2403 * hardware to test this situation, this case is not supported.
2405 if (chip->options & NAND_ROW_ADDR_3)
2406 marvell_nand->addr_cyc += 3;
2408 marvell_nand->addr_cyc += 2;
2411 chip->ecc.size = pdata->ecc_step_size;
2412 chip->ecc.strength = pdata->ecc_strength;
2415 ret = marvell_nand_ecc_init(mtd, &chip->ecc);
2417 dev_err(nfc->dev, "ECC init failed: %d\n", ret);
2421 if (chip->ecc.mode == NAND_ECC_HW) {
2423 * Subpage write not available with hardware ECC, prohibit also
2424 * subpage read as in userspace subpage access would still be
2425 * allowed and subpage write, if used, would lead to numerous
2426 * uncorrectable ECC errors.
2428 chip->options |= NAND_NO_SUBPAGE_WRITE;
2431 if (pdata || nfc->caps->legacy_of_bindings) {
2433 * We keep the MTD name unchanged to avoid breaking platforms
2434 * where the MTD cmdline parser is used and the bootloader
2435 * has not been updated to use the new naming scheme.
2437 mtd->name = "pxa3xx_nand-0";
2438 } else if (!mtd->name) {
2440 * If the new bindings are used and the bootloader has not been
2441 * updated to pass a new mtdparts parameter on the cmdline, you
2442 * should define the following property in your NAND node, ie:
2444 * label = "main-storage";
2446 * This way, mtd->name will be set by the core when
2447 * nand_set_flash_node() is called.
2449 mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL,
2450 "%s:nand.%d", dev_name(nfc->dev),
2451 marvell_nand->sels[0].cs);
2453 dev_err(nfc->dev, "Failed to allocate mtd->name\n");
2461 static const struct nand_controller_ops marvell_nand_controller_ops = {
2462 .attach_chip = marvell_nand_attach_chip,
2465 static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
2466 struct device_node *np)
2468 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(dev);
2469 struct marvell_nand_chip *marvell_nand;
2470 struct mtd_info *mtd;
2471 struct nand_chip *chip;
2476 * The legacy "num-cs" property indicates the number of CS on the only
2477 * chip connected to the controller (legacy bindings does not support
2478 * more than one chip). The CS and RB pins are always the #0.
2480 * When not using legacy bindings, a couple of "reg" and "nand-rb"
2481 * properties must be filled. For each chip, expressed as a subnode,
2482 * "reg" points to the CS lines and "nand-rb" to the RB line.
2484 if (pdata || nfc->caps->legacy_of_bindings) {
2487 nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
2489 dev_err(dev, "missing/invalid reg property\n");
2494 /* Alloc the nand chip structure */
2495 marvell_nand = devm_kzalloc(dev, sizeof(*marvell_nand) +
2497 sizeof(struct marvell_nand_chip_sel)),
2499 if (!marvell_nand) {
2500 dev_err(dev, "could not allocate chip structure\n");
2504 marvell_nand->nsels = nsels;
2505 marvell_nand->selected_die = -1;
2507 for (i = 0; i < nsels; i++) {
2508 if (pdata || nfc->caps->legacy_of_bindings) {
2510 * Legacy bindings use the CS lines in natural
2515 /* Retrieve CS id */
2516 ret = of_property_read_u32_index(np, "reg", i, &cs);
2518 dev_err(dev, "could not retrieve reg property: %d\n",
2524 if (cs >= nfc->caps->max_cs_nb) {
2525 dev_err(dev, "invalid reg value: %u (max CS = %d)\n",
2526 cs, nfc->caps->max_cs_nb);
2530 if (test_and_set_bit(cs, &nfc->assigned_cs)) {
2531 dev_err(dev, "CS %d already assigned\n", cs);
2536 * The cs variable represents the chip select id, which must be
2537 * converted in bit fields for NDCB0 and NDCB2 to select the
2538 * right chip. Unfortunately, due to a lack of information on
2539 * the subject and incoherent documentation, the user should not
2540 * use CS1 and CS3 at all as asserting them is not supported in
2541 * a reliable way (due to multiplexing inside ADDR5 field).
2543 marvell_nand->sels[i].cs = cs;
2547 marvell_nand->sels[i].ndcb0_csel = 0;
2551 marvell_nand->sels[i].ndcb0_csel = NDCB0_CSEL;
2557 /* Retrieve RB id */
2558 if (pdata || nfc->caps->legacy_of_bindings) {
2559 /* Legacy bindings always use RB #0 */
2562 ret = of_property_read_u32_index(np, "nand-rb", i,
2566 "could not retrieve RB property: %d\n",
2572 if (rb >= nfc->caps->max_rb_nb) {
2573 dev_err(dev, "invalid reg value: %u (max RB = %d)\n",
2574 rb, nfc->caps->max_rb_nb);
2578 marvell_nand->sels[i].rb = rb;
2581 chip = &marvell_nand->chip;
2582 chip->controller = &nfc->controller;
2583 nand_set_flash_node(chip, np);
2585 chip->exec_op = marvell_nfc_exec_op;
2586 chip->select_chip = marvell_nfc_select_chip;
2587 if (!of_property_read_bool(np, "marvell,nand-keep-config"))
2588 chip->setup_data_interface = marvell_nfc_setup_data_interface;
2590 mtd = nand_to_mtd(chip);
2591 mtd->dev.parent = dev;
2594 * Default to HW ECC engine mode. If the nand-ecc-mode property is given
2595 * in the DT node, this entry will be overwritten in nand_scan_ident().
2597 chip->ecc.mode = NAND_ECC_HW;
2600 * Save a reference value for timing registers before
2601 * ->setup_data_interface() is called.
2603 marvell_nand->ndtr0 = readl_relaxed(nfc->regs + NDTR0);
2604 marvell_nand->ndtr1 = readl_relaxed(nfc->regs + NDTR1);
2606 chip->options |= NAND_BUSWIDTH_AUTO;
2608 ret = nand_scan(chip, marvell_nand->nsels);
2610 dev_err(dev, "could not scan the nand chip\n");
2615 /* Legacy bindings support only one chip */
2616 ret = mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
2618 ret = mtd_device_register(mtd, NULL, 0);
2620 dev_err(dev, "failed to register mtd device: %d\n", ret);
2625 list_add_tail(&marvell_nand->node, &nfc->chips);
2630 static int marvell_nand_chips_init(struct device *dev, struct marvell_nfc *nfc)
2632 struct device_node *np = dev->of_node;
2633 struct device_node *nand_np;
2634 int max_cs = nfc->caps->max_cs_nb;
2641 nchips = of_get_child_count(np);
2643 if (nchips > max_cs) {
2644 dev_err(dev, "too many NAND chips: %d (max = %d CS)\n", nchips,
2650 * Legacy bindings do not use child nodes to exhibit NAND chip
2651 * properties and layout. Instead, NAND properties are mixed with the
2652 * controller ones, and partitions are defined as direct subnodes of the
2653 * NAND controller node.
2655 if (nfc->caps->legacy_of_bindings) {
2656 ret = marvell_nand_chip_init(dev, nfc, np);
2660 for_each_child_of_node(np, nand_np) {
2661 ret = marvell_nand_chip_init(dev, nfc, nand_np);
2663 of_node_put(nand_np);
2671 static void marvell_nand_chips_cleanup(struct marvell_nfc *nfc)
2673 struct marvell_nand_chip *entry, *temp;
2675 list_for_each_entry_safe(entry, temp, &nfc->chips, node) {
2676 nand_release(nand_to_mtd(&entry->chip));
2677 list_del(&entry->node);
2681 static int marvell_nfc_init_dma(struct marvell_nfc *nfc)
2683 struct platform_device *pdev = container_of(nfc->dev,
2684 struct platform_device,
2686 struct dma_slave_config config = {};
2690 if (!IS_ENABLED(CONFIG_PXA_DMA)) {
2692 "DMA not enabled in configuration\n");
2696 ret = dma_set_mask_and_coherent(nfc->dev, DMA_BIT_MASK(32));
2700 nfc->dma_chan = dma_request_slave_channel(nfc->dev, "data");
2701 if (!nfc->dma_chan) {
2703 "Unable to request data DMA channel\n");
2707 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2711 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2712 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2713 config.src_addr = r->start + NDDB;
2714 config.dst_addr = r->start + NDDB;
2715 config.src_maxburst = 32;
2716 config.dst_maxburst = 32;
2717 ret = dmaengine_slave_config(nfc->dma_chan, &config);
2719 dev_err(nfc->dev, "Failed to configure DMA channel\n");
2724 * DMA must act on length multiple of 32 and this length may be
2725 * bigger than the destination buffer. Use this buffer instead
2726 * for DMA transfers and then copy the desired amount of data to
2727 * the provided buffer.
2729 nfc->dma_buf = kmalloc(MAX_CHUNK_SIZE, GFP_KERNEL | GFP_DMA);
2733 nfc->use_dma = true;
2738 static void marvell_nfc_reset(struct marvell_nfc *nfc)
2741 * ECC operations and interruptions are only enabled when specifically
2742 * needed. ECC shall not be activated in the early stages (fails probe).
2743 * Arbiter flag, even if marked as "reserved", must be set (empirical).
2744 * SPARE_EN bit must always be set or ECC bytes will not be at the same
2745 * offset in the read page and this will fail the protection.
2747 writel_relaxed(NDCR_ALL_INT | NDCR_ND_ARB_EN | NDCR_SPARE_EN |
2748 NDCR_RD_ID_CNT(NFCV1_READID_LEN), nfc->regs + NDCR);
2749 writel_relaxed(0xFFFFFFFF, nfc->regs + NDSR);
2750 writel_relaxed(0, nfc->regs + NDECCCTRL);
2753 static int marvell_nfc_init(struct marvell_nfc *nfc)
2755 struct device_node *np = nfc->dev->of_node;
2758 * Some SoCs like A7k/A8k need to enable manually the NAND
2759 * controller, gated clocks and reset bits to avoid being bootloader
2760 * dependent. This is done through the use of the System Functions
2763 if (nfc->caps->need_system_controller) {
2764 struct regmap *sysctrl_base =
2765 syscon_regmap_lookup_by_phandle(np,
2766 "marvell,system-controller");
2768 if (IS_ERR(sysctrl_base))
2769 return PTR_ERR(sysctrl_base);
2771 regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX,
2772 GENCONF_SOC_DEVICE_MUX_NFC_EN |
2773 GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST |
2774 GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST |
2775 GENCONF_SOC_DEVICE_MUX_NFC_INT_EN);
2777 regmap_update_bits(sysctrl_base, GENCONF_CLK_GATING_CTRL,
2778 GENCONF_CLK_GATING_CTRL_ND_GATE,
2779 GENCONF_CLK_GATING_CTRL_ND_GATE);
2781 regmap_update_bits(sysctrl_base, GENCONF_ND_CLK_CTRL,
2782 GENCONF_ND_CLK_CTRL_EN,
2783 GENCONF_ND_CLK_CTRL_EN);
2786 /* Configure the DMA if appropriate */
2787 if (!nfc->caps->is_nfcv2)
2788 marvell_nfc_init_dma(nfc);
2790 marvell_nfc_reset(nfc);
2795 static int marvell_nfc_probe(struct platform_device *pdev)
2797 struct device *dev = &pdev->dev;
2799 struct marvell_nfc *nfc;
2803 nfc = devm_kzalloc(&pdev->dev, sizeof(struct marvell_nfc),
2809 nand_controller_init(&nfc->controller);
2810 nfc->controller.ops = &marvell_nand_controller_ops;
2811 INIT_LIST_HEAD(&nfc->chips);
2813 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2814 nfc->regs = devm_ioremap_resource(dev, r);
2815 if (IS_ERR(nfc->regs))
2816 return PTR_ERR(nfc->regs);
2818 irq = platform_get_irq(pdev, 0);
2820 dev_err(dev, "failed to retrieve irq\n");
2824 nfc->core_clk = devm_clk_get(&pdev->dev, "core");
2826 /* Managed the legacy case (when the first clock was not named) */
2827 if (nfc->core_clk == ERR_PTR(-ENOENT))
2828 nfc->core_clk = devm_clk_get(&pdev->dev, NULL);
2830 if (IS_ERR(nfc->core_clk))
2831 return PTR_ERR(nfc->core_clk);
2833 ret = clk_prepare_enable(nfc->core_clk);
2837 nfc->reg_clk = devm_clk_get(&pdev->dev, "reg");
2838 if (IS_ERR(nfc->reg_clk)) {
2839 if (PTR_ERR(nfc->reg_clk) != -ENOENT) {
2840 ret = PTR_ERR(nfc->reg_clk);
2841 goto unprepare_core_clk;
2844 nfc->reg_clk = NULL;
2847 ret = clk_prepare_enable(nfc->reg_clk);
2849 goto unprepare_core_clk;
2851 marvell_nfc_disable_int(nfc, NDCR_ALL_INT);
2852 marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
2853 ret = devm_request_irq(dev, irq, marvell_nfc_isr,
2854 0, "marvell-nfc", nfc);
2856 goto unprepare_reg_clk;
2858 /* Get NAND controller capabilities */
2860 nfc->caps = (void *)pdev->id_entry->driver_data;
2862 nfc->caps = of_device_get_match_data(&pdev->dev);
2865 dev_err(dev, "Could not retrieve NFC caps\n");
2867 goto unprepare_reg_clk;
2870 /* Init the controller and then probe the chips */
2871 ret = marvell_nfc_init(nfc);
2873 goto unprepare_reg_clk;
2875 platform_set_drvdata(pdev, nfc);
2877 ret = marvell_nand_chips_init(dev, nfc);
2879 goto unprepare_reg_clk;
2884 clk_disable_unprepare(nfc->reg_clk);
2886 clk_disable_unprepare(nfc->core_clk);
2891 static int marvell_nfc_remove(struct platform_device *pdev)
2893 struct marvell_nfc *nfc = platform_get_drvdata(pdev);
2895 marvell_nand_chips_cleanup(nfc);
2898 dmaengine_terminate_all(nfc->dma_chan);
2899 dma_release_channel(nfc->dma_chan);
2902 clk_disable_unprepare(nfc->reg_clk);
2903 clk_disable_unprepare(nfc->core_clk);
2908 static int __maybe_unused marvell_nfc_suspend(struct device *dev)
2910 struct marvell_nfc *nfc = dev_get_drvdata(dev);
2911 struct marvell_nand_chip *chip;
2913 list_for_each_entry(chip, &nfc->chips, node)
2914 marvell_nfc_wait_ndrun(&chip->chip);
2916 clk_disable_unprepare(nfc->reg_clk);
2917 clk_disable_unprepare(nfc->core_clk);
2922 static int __maybe_unused marvell_nfc_resume(struct device *dev)
2924 struct marvell_nfc *nfc = dev_get_drvdata(dev);
2927 ret = clk_prepare_enable(nfc->core_clk);
2931 ret = clk_prepare_enable(nfc->reg_clk);
2936 * Reset nfc->selected_chip so the next command will cause the timing
2937 * registers to be restored in marvell_nfc_select_chip().
2939 nfc->selected_chip = NULL;
2941 /* Reset registers that have lost their contents */
2942 marvell_nfc_reset(nfc);
2947 static const struct dev_pm_ops marvell_nfc_pm_ops = {
2948 SET_SYSTEM_SLEEP_PM_OPS(marvell_nfc_suspend, marvell_nfc_resume)
2951 static const struct marvell_nfc_caps marvell_armada_8k_nfc_caps = {
2954 .need_system_controller = true,
2958 static const struct marvell_nfc_caps marvell_armada370_nfc_caps = {
2964 static const struct marvell_nfc_caps marvell_pxa3xx_nfc_caps = {
2970 static const struct marvell_nfc_caps marvell_armada_8k_nfc_legacy_caps = {
2973 .need_system_controller = true,
2974 .legacy_of_bindings = true,
2978 static const struct marvell_nfc_caps marvell_armada370_nfc_legacy_caps = {
2981 .legacy_of_bindings = true,
2985 static const struct marvell_nfc_caps marvell_pxa3xx_nfc_legacy_caps = {
2988 .legacy_of_bindings = true,
2992 static const struct platform_device_id marvell_nfc_platform_ids[] = {
2994 .name = "pxa3xx-nand",
2995 .driver_data = (kernel_ulong_t)&marvell_pxa3xx_nfc_legacy_caps,
2999 MODULE_DEVICE_TABLE(platform, marvell_nfc_platform_ids);
3001 static const struct of_device_id marvell_nfc_of_ids[] = {
3003 .compatible = "marvell,armada-8k-nand-controller",
3004 .data = &marvell_armada_8k_nfc_caps,
3007 .compatible = "marvell,armada370-nand-controller",
3008 .data = &marvell_armada370_nfc_caps,
3011 .compatible = "marvell,pxa3xx-nand-controller",
3012 .data = &marvell_pxa3xx_nfc_caps,
3014 /* Support for old/deprecated bindings: */
3016 .compatible = "marvell,armada-8k-nand",
3017 .data = &marvell_armada_8k_nfc_legacy_caps,
3020 .compatible = "marvell,armada370-nand",
3021 .data = &marvell_armada370_nfc_legacy_caps,
3024 .compatible = "marvell,pxa3xx-nand",
3025 .data = &marvell_pxa3xx_nfc_legacy_caps,
3029 MODULE_DEVICE_TABLE(of, marvell_nfc_of_ids);
3031 static struct platform_driver marvell_nfc_driver = {
3033 .name = "marvell-nfc",
3034 .of_match_table = marvell_nfc_of_ids,
3035 .pm = &marvell_nfc_pm_ops,
3037 .id_table = marvell_nfc_platform_ids,
3038 .probe = marvell_nfc_probe,
3039 .remove = marvell_nfc_remove,
3041 module_platform_driver(marvell_nfc_driver);
3043 MODULE_LICENSE("GPL");
3044 MODULE_DESCRIPTION("Marvell NAND controller driver");