mtd: pxa3xx_nand: add a default chunk size
[linux-2.6-block.git] / drivers / mtd / nand / pxa3xx_nand.c
CommitLineData
fe69af00 1/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
de484a38
EG
10 *
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
fe69af00 12 */
13
a88bdbb5 14#include <linux/kernel.h>
fe69af00 15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18#include <linux/dma-mapping.h>
19#include <linux/delay.h>
20#include <linux/clk.h>
21#include <linux/mtd/mtd.h>
22#include <linux/mtd/nand.h>
23#include <linux/mtd/partitions.h>
a1c06ee1 24#include <linux/io.h>
afca11ec 25#include <linux/iopoll.h>
a1c06ee1 26#include <linux/irq.h>
5a0e3ad6 27#include <linux/slab.h>
1e7ba630
DM
28#include <linux/of.h>
29#include <linux/of_device.h>
776f265e 30#include <linux/of_mtd.h>
fe69af00 31
ce914e6b 32#if defined(CONFIG_ARM) && (defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP))
f4db2e3a
EG
33#define ARCH_HAS_DMA
34#endif
35
36#ifdef ARCH_HAS_DMA
afb5b5c9 37#include <mach/dma.h>
f4db2e3a
EG
38#endif
39
293b2da1 40#include <linux/platform_data/mtd-nand-pxa3xx.h>
fe69af00 41
e5860c18
NMG
42#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
43#define NAND_STOP_DELAY msecs_to_jiffies(40)
4eb2da89 44#define PAGE_CHUNK_SIZE (2048)
fe69af00 45
62e8b851
EG
46/*
47 * Define a buffer size for the initial command that detects the flash device:
c1634097
EG
48 * STATUS, READID and PARAM.
49 * ONFI param page is 256 bytes, and there are three redundant copies
50 * to be read. JEDEC param page is 512 bytes, and there are also three
51 * redundant copies to be read.
52 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
62e8b851 53 */
c1634097 54#define INIT_BUFFER_SIZE 2048
62e8b851 55
fe69af00 56/* registers and bit definitions */
57#define NDCR (0x00) /* Control register */
58#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
59#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
60#define NDSR (0x14) /* Status Register */
61#define NDPCR (0x18) /* Page Count Register */
62#define NDBDR0 (0x1C) /* Bad Block Register 0 */
63#define NDBDR1 (0x20) /* Bad Block Register 1 */
43bcfd2b 64#define NDECCCTRL (0x28) /* ECC control */
fe69af00 65#define NDDB (0x40) /* Data Buffer */
66#define NDCB0 (0x48) /* Command Buffer0 */
67#define NDCB1 (0x4C) /* Command Buffer1 */
68#define NDCB2 (0x50) /* Command Buffer2 */
69
70#define NDCR_SPARE_EN (0x1 << 31)
71#define NDCR_ECC_EN (0x1 << 30)
72#define NDCR_DMA_EN (0x1 << 29)
73#define NDCR_ND_RUN (0x1 << 28)
74#define NDCR_DWIDTH_C (0x1 << 27)
75#define NDCR_DWIDTH_M (0x1 << 26)
76#define NDCR_PAGE_SZ (0x1 << 24)
77#define NDCR_NCSX (0x1 << 23)
78#define NDCR_ND_MODE (0x3 << 21)
79#define NDCR_NAND_MODE (0x0)
80#define NDCR_CLR_PG_CNT (0x1 << 20)
f8155a40 81#define NDCR_STOP_ON_UNCOR (0x1 << 19)
fe69af00 82#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
83#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
84
85#define NDCR_RA_START (0x1 << 15)
86#define NDCR_PG_PER_BLK (0x1 << 14)
87#define NDCR_ND_ARB_EN (0x1 << 12)
f8155a40 88#define NDCR_INT_MASK (0xFFF)
fe69af00 89
90#define NDSR_MASK (0xfff)
87f5336e
EG
91#define NDSR_ERR_CNT_OFF (16)
92#define NDSR_ERR_CNT_MASK (0x1f)
93#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
f8155a40
LW
94#define NDSR_RDY (0x1 << 12)
95#define NDSR_FLASH_RDY (0x1 << 11)
fe69af00 96#define NDSR_CS0_PAGED (0x1 << 10)
97#define NDSR_CS1_PAGED (0x1 << 9)
98#define NDSR_CS0_CMDD (0x1 << 8)
99#define NDSR_CS1_CMDD (0x1 << 7)
100#define NDSR_CS0_BBD (0x1 << 6)
101#define NDSR_CS1_BBD (0x1 << 5)
87f5336e
EG
102#define NDSR_UNCORERR (0x1 << 4)
103#define NDSR_CORERR (0x1 << 3)
fe69af00 104#define NDSR_WRDREQ (0x1 << 2)
105#define NDSR_RDDREQ (0x1 << 1)
106#define NDSR_WRCMDREQ (0x1)
107
41a63430 108#define NDCB0_LEN_OVRD (0x1 << 28)
4eb2da89 109#define NDCB0_ST_ROW_EN (0x1 << 26)
fe69af00 110#define NDCB0_AUTO_RS (0x1 << 25)
111#define NDCB0_CSEL (0x1 << 24)
70ed8523
EG
112#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
113#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
fe69af00 114#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
115#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
116#define NDCB0_NC (0x1 << 20)
117#define NDCB0_DBC (0x1 << 19)
118#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
119#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
120#define NDCB0_CMD2_MASK (0xff << 8)
121#define NDCB0_CMD1_MASK (0xff)
122#define NDCB0_ADDR_CYC_SHIFT (16)
123
70ed8523
EG
124#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
125#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
126#define EXT_CMD_TYPE_READ 4 /* Read */
127#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
128#define EXT_CMD_TYPE_FINAL 3 /* Final command */
129#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
130#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
131
fe69af00 132/* macros for registers read/write */
133#define nand_writel(info, off, val) \
b7e46062 134 writel_relaxed((val), (info)->mmio_base + (off))
fe69af00 135
136#define nand_readl(info, off) \
b7e46062 137 readl_relaxed((info)->mmio_base + (off))
fe69af00 138
139/* error code and state */
140enum {
141 ERR_NONE = 0,
142 ERR_DMABUSERR = -1,
143 ERR_SENDCMD = -2,
87f5336e 144 ERR_UNCORERR = -3,
fe69af00 145 ERR_BBERR = -4,
87f5336e 146 ERR_CORERR = -5,
fe69af00 147};
148
149enum {
f8155a40 150 STATE_IDLE = 0,
d456882b 151 STATE_PREPARED,
fe69af00 152 STATE_CMD_HANDLE,
153 STATE_DMA_READING,
154 STATE_DMA_WRITING,
155 STATE_DMA_DONE,
156 STATE_PIO_READING,
157 STATE_PIO_WRITING,
f8155a40
LW
158 STATE_CMD_DONE,
159 STATE_READY,
fe69af00 160};
161
c0f3b864
EG
162enum pxa3xx_nand_variant {
163 PXA3XX_NAND_VARIANT_PXA,
164 PXA3XX_NAND_VARIANT_ARMADA370,
165};
166
d456882b
LW
167struct pxa3xx_nand_host {
168 struct nand_chip chip;
d456882b
LW
169 struct mtd_info *mtd;
170 void *info_data;
171
172 /* page size of attached chip */
d456882b 173 int use_ecc;
f3c8cfc2 174 int cs;
fe69af00 175
d456882b
LW
176 /* calculated from pxa3xx_nand_flash data */
177 unsigned int col_addr_cycles;
178 unsigned int row_addr_cycles;
179 size_t read_id_bytes;
180
d456882b
LW
181};
182
183struct pxa3xx_nand_info {
401e67e2 184 struct nand_hw_control controller;
fe69af00 185 struct platform_device *pdev;
fe69af00 186
187 struct clk *clk;
188 void __iomem *mmio_base;
8638fac8 189 unsigned long mmio_phys;
55d9fd6e 190 struct completion cmd_complete, dev_ready;
fe69af00 191
192 unsigned int buf_start;
193 unsigned int buf_count;
62e8b851 194 unsigned int buf_size;
fa543bef
EG
195 unsigned int data_buff_pos;
196 unsigned int oob_buff_pos;
fe69af00 197
198 /* DMA information */
199 int drcmr_dat;
200 int drcmr_cmd;
201
202 unsigned char *data_buff;
18c81b18 203 unsigned char *oob_buff;
fe69af00 204 dma_addr_t data_buff_phys;
fe69af00 205 int data_dma_ch;
206 struct pxa_dma_desc *data_desc;
207 dma_addr_t data_desc_addr;
208
f3c8cfc2 209 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
fe69af00 210 unsigned int state;
211
c0f3b864
EG
212 /*
213 * This driver supports NFCv1 (as found in PXA SoC)
214 * and NFCv2 (as found in Armada 370/XP SoC).
215 */
216 enum pxa3xx_nand_variant variant;
217
f3c8cfc2 218 int cs;
fe69af00 219 int use_ecc; /* use HW ECC ? */
43bcfd2b 220 int ecc_bch; /* using BCH ECC? */
fe69af00 221 int use_dma; /* use DMA ? */
5bb653e8 222 int use_spare; /* use spare ? */
55d9fd6e 223 int need_wait;
fe69af00 224
2128b08c 225 unsigned int data_size; /* data to be read from FIFO */
70ed8523 226 unsigned int chunk_size; /* split commands chunk size */
d456882b 227 unsigned int oob_size;
43bcfd2b
EG
228 unsigned int spare_size;
229 unsigned int ecc_size;
87f5336e
EG
230 unsigned int ecc_err_cnt;
231 unsigned int max_bitflips;
fe69af00 232 int retcode;
fe69af00 233
48cf7efa
EG
234 /* cached register value */
235 uint32_t reg_ndcr;
236 uint32_t ndtr0cs0;
237 uint32_t ndtr1cs0;
238
fe69af00 239 /* generated NDCBx register values */
240 uint32_t ndcb0;
241 uint32_t ndcb1;
242 uint32_t ndcb2;
3a1a344a 243 uint32_t ndcb3;
fe69af00 244};
245
90ab5ee9 246static bool use_dma = 1;
fe69af00 247module_param(use_dma, bool, 0444);
25985edc 248MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
fe69af00 249
c1f82478 250static struct pxa3xx_nand_timing timing[] = {
227a886c
LW
251 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
252 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
253 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
254 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
d3490dfd
HZ
255};
256
c1f82478 257static struct pxa3xx_nand_flash builtin_flash_types[] = {
4332c116
LW
258{ "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] },
259{ "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] },
260{ "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] },
261{ "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] },
262{ "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] },
263{ "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] },
264{ "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] },
265{ "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] },
266{ "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] },
d3490dfd
HZ
267};
268
776f265e
EG
269static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
270static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
271
272static struct nand_bbt_descr bbt_main_descr = {
273 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
274 | NAND_BBT_2BIT | NAND_BBT_VERSION,
275 .offs = 8,
276 .len = 6,
277 .veroffs = 14,
278 .maxblocks = 8, /* Last 8 blocks in each chip */
279 .pattern = bbt_pattern
280};
281
282static struct nand_bbt_descr bbt_mirror_descr = {
283 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
284 | NAND_BBT_2BIT | NAND_BBT_VERSION,
285 .offs = 8,
286 .len = 6,
287 .veroffs = 14,
288 .maxblocks = 8, /* Last 8 blocks in each chip */
289 .pattern = bbt_mirror_pattern
290};
291
3db227b6
RG
292static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
293 .eccbytes = 32,
294 .eccpos = {
295 32, 33, 34, 35, 36, 37, 38, 39,
296 40, 41, 42, 43, 44, 45, 46, 47,
297 48, 49, 50, 51, 52, 53, 54, 55,
298 56, 57, 58, 59, 60, 61, 62, 63},
299 .oobfree = { {2, 30} }
300};
301
70ed8523
EG
302static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
303 .eccbytes = 64,
304 .eccpos = {
305 32, 33, 34, 35, 36, 37, 38, 39,
306 40, 41, 42, 43, 44, 45, 46, 47,
307 48, 49, 50, 51, 52, 53, 54, 55,
308 56, 57, 58, 59, 60, 61, 62, 63,
309 96, 97, 98, 99, 100, 101, 102, 103,
310 104, 105, 106, 107, 108, 109, 110, 111,
311 112, 113, 114, 115, 116, 117, 118, 119,
312 120, 121, 122, 123, 124, 125, 126, 127},
313 /* Bootrom looks in bytes 0 & 5 for bad blocks */
314 .oobfree = { {6, 26}, { 64, 32} }
315};
316
317static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
318 .eccbytes = 128,
319 .eccpos = {
320 32, 33, 34, 35, 36, 37, 38, 39,
321 40, 41, 42, 43, 44, 45, 46, 47,
322 48, 49, 50, 51, 52, 53, 54, 55,
323 56, 57, 58, 59, 60, 61, 62, 63},
324 .oobfree = { }
325};
326
227a886c
LW
327/* Define a default flash type setting serve as flash detecting only */
328#define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
329
fe69af00 330#define NDTR0_tCH(c) (min((c), 7) << 19)
331#define NDTR0_tCS(c) (min((c), 7) << 16)
332#define NDTR0_tWH(c) (min((c), 7) << 11)
333#define NDTR0_tWP(c) (min((c), 7) << 8)
334#define NDTR0_tRH(c) (min((c), 7) << 3)
335#define NDTR0_tRP(c) (min((c), 7) << 0)
336
337#define NDTR1_tR(c) (min((c), 65535) << 16)
338#define NDTR1_tWHR(c) (min((c), 15) << 4)
339#define NDTR1_tAR(c) (min((c), 15) << 0)
340
341/* convert nano-seconds to nand flash controller clock cycles */
93b352fc 342#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
fe69af00 343
17754ad6 344static const struct of_device_id pxa3xx_nand_dt_ids[] = {
c7e9c7e7
EG
345 {
346 .compatible = "marvell,pxa3xx-nand",
347 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
348 },
1963ff97
EG
349 {
350 .compatible = "marvell,armada370-nand",
351 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
352 },
c7e9c7e7
EG
353 {}
354};
355MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
356
357static enum pxa3xx_nand_variant
358pxa3xx_nand_get_variant(struct platform_device *pdev)
359{
360 const struct of_device_id *of_id =
361 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
362 if (!of_id)
363 return PXA3XX_NAND_VARIANT_PXA;
364 return (enum pxa3xx_nand_variant)of_id->data;
365}
366
d456882b 367static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
7dad482e 368 const struct pxa3xx_nand_timing *t)
fe69af00 369{
d456882b 370 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 371 unsigned long nand_clk = clk_get_rate(info->clk);
372 uint32_t ndtr0, ndtr1;
373
374 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
375 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
376 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
377 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
378 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
379 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
380
381 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
382 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
383 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
384
48cf7efa
EG
385 info->ndtr0cs0 = ndtr0;
386 info->ndtr1cs0 = ndtr1;
fe69af00 387 nand_writel(info, NDTR0CS0, ndtr0);
388 nand_writel(info, NDTR1CS0, ndtr1);
389}
390
6a3e4865
EG
391/*
392 * Set the data and OOB size, depending on the selected
393 * spare and ECC configuration.
394 * Only applicable to READ0, READOOB and PAGEPROG commands.
395 */
fa543bef
EG
396static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
397 struct mtd_info *mtd)
fe69af00 398{
48cf7efa 399 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
9d8b1043 400
fa543bef 401 info->data_size = mtd->writesize;
43bcfd2b 402 if (!oob_enable)
9d8b1043 403 return;
9d8b1043 404
43bcfd2b
EG
405 info->oob_size = info->spare_size;
406 if (!info->use_ecc)
407 info->oob_size += info->ecc_size;
18c81b18
LW
408}
409
f8155a40
LW
410/**
411 * NOTE: it is a must to set ND_RUN firstly, then write
412 * command buffer, otherwise, it does not work.
413 * We enable all the interrupt at the same time, and
414 * let pxa3xx_nand_irq to handle all logic.
415 */
416static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
417{
418 uint32_t ndcr;
419
48cf7efa 420 ndcr = info->reg_ndcr;
cd9d1182 421
43bcfd2b 422 if (info->use_ecc) {
cd9d1182 423 ndcr |= NDCR_ECC_EN;
43bcfd2b
EG
424 if (info->ecc_bch)
425 nand_writel(info, NDECCCTRL, 0x1);
426 } else {
cd9d1182 427 ndcr &= ~NDCR_ECC_EN;
43bcfd2b
EG
428 if (info->ecc_bch)
429 nand_writel(info, NDECCCTRL, 0x0);
430 }
cd9d1182
EG
431
432 if (info->use_dma)
433 ndcr |= NDCR_DMA_EN;
434 else
435 ndcr &= ~NDCR_DMA_EN;
436
5bb653e8
EG
437 if (info->use_spare)
438 ndcr |= NDCR_SPARE_EN;
439 else
440 ndcr &= ~NDCR_SPARE_EN;
441
f8155a40
LW
442 ndcr |= NDCR_ND_RUN;
443
444 /* clear status bits and run */
445 nand_writel(info, NDCR, 0);
446 nand_writel(info, NDSR, NDSR_MASK);
447 nand_writel(info, NDCR, ndcr);
448}
449
450static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
451{
452 uint32_t ndcr;
453 int timeout = NAND_STOP_DELAY;
454
455 /* wait RUN bit in NDCR become 0 */
456 ndcr = nand_readl(info, NDCR);
457 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
458 ndcr = nand_readl(info, NDCR);
459 udelay(1);
460 }
461
462 if (timeout <= 0) {
463 ndcr &= ~NDCR_ND_RUN;
464 nand_writel(info, NDCR, ndcr);
465 }
466 /* clear status bits */
467 nand_writel(info, NDSR, NDSR_MASK);
468}
469
57ff88f0
EG
470static void __maybe_unused
471enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
fe69af00 472{
473 uint32_t ndcr;
474
475 ndcr = nand_readl(info, NDCR);
476 nand_writel(info, NDCR, ndcr & ~int_mask);
477}
478
479static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
480{
481 uint32_t ndcr;
482
483 ndcr = nand_readl(info, NDCR);
484 nand_writel(info, NDCR, ndcr | int_mask);
485}
486
8dad0386
MR
487static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
488{
489 if (info->ecc_bch) {
afca11ec
MR
490 u32 val;
491 int ret;
8dad0386
MR
492
493 /*
494 * According to the datasheet, when reading from NDDB
495 * with BCH enabled, after each 32 bytes reads, we
496 * have to make sure that the NDSR.RDDREQ bit is set.
497 *
498 * Drain the FIFO 8 32 bits reads at a time, and skip
499 * the polling on the last read.
500 */
501 while (len > 8) {
ce914e6b 502 readsl(info->mmio_base + NDDB, data, 8);
8dad0386 503
afca11ec
MR
504 ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
505 val & NDSR_RDDREQ, 1000, 5000);
506 if (ret) {
507 dev_err(&info->pdev->dev,
508 "Timeout on RDDREQ while draining the FIFO\n");
509 return;
8dad0386
MR
510 }
511
512 data += 32;
513 len -= 8;
514 }
515 }
516
ce914e6b 517 readsl(info->mmio_base + NDDB, data, len);
8dad0386
MR
518}
519
f8155a40 520static void handle_data_pio(struct pxa3xx_nand_info *info)
fe69af00 521{
70ed8523 522 unsigned int do_bytes = min(info->data_size, info->chunk_size);
fa543bef 523
fe69af00 524 switch (info->state) {
525 case STATE_PIO_WRITING:
ce914e6b
RH
526 writesl(info->mmio_base + NDDB,
527 info->data_buff + info->data_buff_pos,
528 DIV_ROUND_UP(do_bytes, 4));
fa543bef 529
9d8b1043 530 if (info->oob_size > 0)
ce914e6b
RH
531 writesl(info->mmio_base + NDDB,
532 info->oob_buff + info->oob_buff_pos,
533 DIV_ROUND_UP(info->oob_size, 4));
fe69af00 534 break;
535 case STATE_PIO_READING:
8dad0386
MR
536 drain_fifo(info,
537 info->data_buff + info->data_buff_pos,
538 DIV_ROUND_UP(do_bytes, 4));
fa543bef 539
9d8b1043 540 if (info->oob_size > 0)
8dad0386
MR
541 drain_fifo(info,
542 info->oob_buff + info->oob_buff_pos,
543 DIV_ROUND_UP(info->oob_size, 4));
fe69af00 544 break;
545 default:
da675b4e 546 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
fe69af00 547 info->state);
f8155a40 548 BUG();
fe69af00 549 }
fa543bef
EG
550
551 /* Update buffer pointers for multi-page read/write */
552 info->data_buff_pos += do_bytes;
553 info->oob_buff_pos += info->oob_size;
554 info->data_size -= do_bytes;
fe69af00 555}
556
f4db2e3a 557#ifdef ARCH_HAS_DMA
f8155a40 558static void start_data_dma(struct pxa3xx_nand_info *info)
fe69af00 559{
560 struct pxa_dma_desc *desc = info->data_desc;
9d8b1043 561 int dma_len = ALIGN(info->data_size + info->oob_size, 32);
fe69af00 562
563 desc->ddadr = DDADR_STOP;
564 desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
565
f8155a40
LW
566 switch (info->state) {
567 case STATE_DMA_WRITING:
fe69af00 568 desc->dsadr = info->data_buff_phys;
8638fac8 569 desc->dtadr = info->mmio_phys + NDDB;
fe69af00 570 desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
f8155a40
LW
571 break;
572 case STATE_DMA_READING:
fe69af00 573 desc->dtadr = info->data_buff_phys;
8638fac8 574 desc->dsadr = info->mmio_phys + NDDB;
fe69af00 575 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
f8155a40
LW
576 break;
577 default:
da675b4e 578 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
f8155a40
LW
579 info->state);
580 BUG();
fe69af00 581 }
582
583 DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
584 DDADR(info->data_dma_ch) = info->data_desc_addr;
585 DCSR(info->data_dma_ch) |= DCSR_RUN;
586}
587
588static void pxa3xx_nand_data_dma_irq(int channel, void *data)
589{
590 struct pxa3xx_nand_info *info = data;
591 uint32_t dcsr;
592
593 dcsr = DCSR(channel);
594 DCSR(channel) = dcsr;
595
596 if (dcsr & DCSR_BUSERR) {
597 info->retcode = ERR_DMABUSERR;
fe69af00 598 }
599
f8155a40
LW
600 info->state = STATE_DMA_DONE;
601 enable_int(info, NDCR_INT_MASK);
602 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
fe69af00 603}
f4db2e3a
EG
604#else
605static void start_data_dma(struct pxa3xx_nand_info *info)
606{}
607#endif
fe69af00 608
24542257
RJ
609static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
610{
611 struct pxa3xx_nand_info *info = data;
612
613 handle_data_pio(info);
614
615 info->state = STATE_CMD_DONE;
616 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
617
618 return IRQ_HANDLED;
619}
620
fe69af00 621static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
622{
623 struct pxa3xx_nand_info *info = devid;
55d9fd6e 624 unsigned int status, is_completed = 0, is_ready = 0;
f3c8cfc2 625 unsigned int ready, cmd_done;
24542257 626 irqreturn_t ret = IRQ_HANDLED;
f3c8cfc2
LW
627
628 if (info->cs == 0) {
629 ready = NDSR_FLASH_RDY;
630 cmd_done = NDSR_CS0_CMDD;
631 } else {
632 ready = NDSR_RDY;
633 cmd_done = NDSR_CS1_CMDD;
634 }
fe69af00 635
636 status = nand_readl(info, NDSR);
637
87f5336e
EG
638 if (status & NDSR_UNCORERR)
639 info->retcode = ERR_UNCORERR;
640 if (status & NDSR_CORERR) {
641 info->retcode = ERR_CORERR;
642 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
643 info->ecc_bch)
644 info->ecc_err_cnt = NDSR_ERR_CNT(status);
645 else
646 info->ecc_err_cnt = 1;
647
648 /*
649 * Each chunk composing a page is corrected independently,
650 * and we need to store maximum number of corrected bitflips
651 * to return it to the MTD layer in ecc.read_page().
652 */
653 info->max_bitflips = max_t(unsigned int,
654 info->max_bitflips,
655 info->ecc_err_cnt);
656 }
f8155a40
LW
657 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
658 /* whether use dma to transfer data */
fe69af00 659 if (info->use_dma) {
f8155a40
LW
660 disable_int(info, NDCR_INT_MASK);
661 info->state = (status & NDSR_RDDREQ) ?
662 STATE_DMA_READING : STATE_DMA_WRITING;
663 start_data_dma(info);
664 goto NORMAL_IRQ_EXIT;
fe69af00 665 } else {
f8155a40
LW
666 info->state = (status & NDSR_RDDREQ) ?
667 STATE_PIO_READING : STATE_PIO_WRITING;
24542257
RJ
668 ret = IRQ_WAKE_THREAD;
669 goto NORMAL_IRQ_EXIT;
fe69af00 670 }
fe69af00 671 }
f3c8cfc2 672 if (status & cmd_done) {
f8155a40
LW
673 info->state = STATE_CMD_DONE;
674 is_completed = 1;
fe69af00 675 }
f3c8cfc2 676 if (status & ready) {
f8155a40 677 info->state = STATE_READY;
55d9fd6e 678 is_ready = 1;
401e67e2 679 }
fe69af00 680
f8155a40
LW
681 if (status & NDSR_WRCMDREQ) {
682 nand_writel(info, NDSR, NDSR_WRCMDREQ);
683 status &= ~NDSR_WRCMDREQ;
684 info->state = STATE_CMD_HANDLE;
3a1a344a
EG
685
686 /*
687 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
688 * must be loaded by writing directly either 12 or 16
689 * bytes directly to NDCB0, four bytes at a time.
690 *
691 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
692 * but each NDCBx register can be read.
693 */
f8155a40
LW
694 nand_writel(info, NDCB0, info->ndcb0);
695 nand_writel(info, NDCB0, info->ndcb1);
696 nand_writel(info, NDCB0, info->ndcb2);
3a1a344a
EG
697
698 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
699 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
700 nand_writel(info, NDCB0, info->ndcb3);
fe69af00 701 }
702
f8155a40
LW
703 /* clear NDSR to let the controller exit the IRQ */
704 nand_writel(info, NDSR, status);
705 if (is_completed)
706 complete(&info->cmd_complete);
55d9fd6e
EG
707 if (is_ready)
708 complete(&info->dev_ready);
f8155a40 709NORMAL_IRQ_EXIT:
24542257 710 return ret;
fe69af00 711}
712
fe69af00 713static inline int is_buf_blank(uint8_t *buf, size_t len)
714{
715 for (; len > 0; len--)
716 if (*buf++ != 0xff)
717 return 0;
718 return 1;
719}
720
86beebae
EG
721static void set_command_address(struct pxa3xx_nand_info *info,
722 unsigned int page_size, uint16_t column, int page_addr)
723{
724 /* small page addr setting */
725 if (page_size < PAGE_CHUNK_SIZE) {
726 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
727 | (column & 0xFF);
728
729 info->ndcb2 = 0;
730 } else {
731 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
732 | (column & 0xFFFF);
733
734 if (page_addr & 0xFF0000)
735 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
736 else
737 info->ndcb2 = 0;
738 }
739}
740
c39ff03a 741static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
fe69af00 742{
39f83d15
EG
743 struct pxa3xx_nand_host *host = info->host[info->cs];
744 struct mtd_info *mtd = host->mtd;
745
4eb2da89 746 /* reset data and oob column point to handle data */
401e67e2
LW
747 info->buf_start = 0;
748 info->buf_count = 0;
4eb2da89 749 info->oob_size = 0;
fa543bef
EG
750 info->data_buff_pos = 0;
751 info->oob_buff_pos = 0;
4eb2da89 752 info->use_ecc = 0;
5bb653e8 753 info->use_spare = 1;
4eb2da89 754 info->retcode = ERR_NONE;
87f5336e 755 info->ecc_err_cnt = 0;
f0e6a32e 756 info->ndcb3 = 0;
d20d0a6c 757 info->need_wait = 0;
fe69af00 758
759 switch (command) {
4eb2da89
LW
760 case NAND_CMD_READ0:
761 case NAND_CMD_PAGEPROG:
762 info->use_ecc = 1;
fe69af00 763 case NAND_CMD_READOOB:
fa543bef 764 pxa3xx_set_datasize(info, mtd);
fe69af00 765 break;
41a63430
EG
766 case NAND_CMD_PARAM:
767 info->use_spare = 0;
768 break;
4eb2da89
LW
769 default:
770 info->ndcb1 = 0;
771 info->ndcb2 = 0;
772 break;
773 }
39f83d15
EG
774
775 /*
776 * If we are about to issue a read command, or about to set
777 * the write address, then clean the data buffer.
778 */
779 if (command == NAND_CMD_READ0 ||
780 command == NAND_CMD_READOOB ||
781 command == NAND_CMD_SEQIN) {
782
783 info->buf_count = mtd->writesize + mtd->oobsize;
784 memset(info->data_buff, 0xFF, info->buf_count);
785 }
786
c39ff03a
EG
787}
788
789static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
70ed8523 790 int ext_cmd_type, uint16_t column, int page_addr)
c39ff03a
EG
791{
792 int addr_cycle, exec_cmd;
793 struct pxa3xx_nand_host *host;
794 struct mtd_info *mtd;
795
796 host = info->host[info->cs];
797 mtd = host->mtd;
798 addr_cycle = 0;
799 exec_cmd = 1;
800
801 if (info->cs != 0)
802 info->ndcb0 = NDCB0_CSEL;
803 else
804 info->ndcb0 = 0;
805
806 if (command == NAND_CMD_SEQIN)
807 exec_cmd = 0;
4eb2da89 808
d456882b
LW
809 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
810 + host->col_addr_cycles);
fe69af00 811
4eb2da89
LW
812 switch (command) {
813 case NAND_CMD_READOOB:
fe69af00 814 case NAND_CMD_READ0:
ec82135a
EG
815 info->buf_start = column;
816 info->ndcb0 |= NDCB0_CMD_TYPE(0)
817 | addr_cycle
818 | NAND_CMD_READ0;
819
4eb2da89 820 if (command == NAND_CMD_READOOB)
ec82135a 821 info->buf_start += mtd->writesize;
4eb2da89 822
70ed8523
EG
823 /*
824 * Multiple page read needs an 'extended command type' field,
825 * which is either naked-read or last-read according to the
826 * state.
827 */
828 if (mtd->writesize == PAGE_CHUNK_SIZE) {
ec82135a 829 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
70ed8523
EG
830 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
831 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
832 | NDCB0_LEN_OVRD
833 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
834 info->ndcb3 = info->chunk_size +
835 info->oob_size;
836 }
fe69af00 837
01d9947e 838 set_command_address(info, mtd->writesize, column, page_addr);
01d9947e
EG
839 break;
840
fe69af00 841 case NAND_CMD_SEQIN:
4eb2da89 842
e7f9a6a4
EG
843 info->buf_start = column;
844 set_command_address(info, mtd->writesize, 0, page_addr);
535cb57a
EG
845
846 /*
847 * Multiple page programming needs to execute the initial
848 * SEQIN command that sets the page address.
849 */
850 if (mtd->writesize > PAGE_CHUNK_SIZE) {
851 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
852 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
853 | addr_cycle
854 | command;
855 /* No data transfer in this case */
856 info->data_size = 0;
857 exec_cmd = 1;
858 }
fe69af00 859 break;
4eb2da89 860
fe69af00 861 case NAND_CMD_PAGEPROG:
4eb2da89
LW
862 if (is_buf_blank(info->data_buff,
863 (mtd->writesize + mtd->oobsize))) {
864 exec_cmd = 0;
865 break;
866 }
fe69af00 867
535cb57a
EG
868 /* Second command setting for large pages */
869 if (mtd->writesize > PAGE_CHUNK_SIZE) {
870 /*
871 * Multiple page write uses the 'extended command'
872 * field. This can be used to issue a command dispatch
873 * or a naked-write depending on the current stage.
874 */
875 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
876 | NDCB0_LEN_OVRD
877 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
878 info->ndcb3 = info->chunk_size +
879 info->oob_size;
880
881 /*
882 * This is the command dispatch that completes a chunked
883 * page program operation.
884 */
885 if (info->data_size == 0) {
886 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
887 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
888 | command;
889 info->ndcb1 = 0;
890 info->ndcb2 = 0;
891 info->ndcb3 = 0;
892 }
893 } else {
894 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
895 | NDCB0_AUTO_RS
896 | NDCB0_ST_ROW_EN
897 | NDCB0_DBC
898 | (NAND_CMD_PAGEPROG << 8)
899 | NAND_CMD_SEQIN
900 | addr_cycle;
901 }
fe69af00 902 break;
4eb2da89 903
ce0268f6 904 case NAND_CMD_PARAM:
c1634097 905 info->buf_count = INIT_BUFFER_SIZE;
ce0268f6
EG
906 info->ndcb0 |= NDCB0_CMD_TYPE(0)
907 | NDCB0_ADDR_CYC(1)
41a63430 908 | NDCB0_LEN_OVRD
ec82135a 909 | command;
ce0268f6 910 info->ndcb1 = (column & 0xFF);
c1634097
EG
911 info->ndcb3 = INIT_BUFFER_SIZE;
912 info->data_size = INIT_BUFFER_SIZE;
ce0268f6
EG
913 break;
914
fe69af00 915 case NAND_CMD_READID:
d456882b 916 info->buf_count = host->read_id_bytes;
4eb2da89
LW
917 info->ndcb0 |= NDCB0_CMD_TYPE(3)
918 | NDCB0_ADDR_CYC(1)
ec82135a 919 | command;
d14231f1 920 info->ndcb1 = (column & 0xFF);
4eb2da89
LW
921
922 info->data_size = 8;
923 break;
fe69af00 924 case NAND_CMD_STATUS:
4eb2da89
LW
925 info->buf_count = 1;
926 info->ndcb0 |= NDCB0_CMD_TYPE(4)
927 | NDCB0_ADDR_CYC(1)
ec82135a 928 | command;
4eb2da89
LW
929
930 info->data_size = 8;
931 break;
932
933 case NAND_CMD_ERASE1:
4eb2da89
LW
934 info->ndcb0 |= NDCB0_CMD_TYPE(2)
935 | NDCB0_AUTO_RS
936 | NDCB0_ADDR_CYC(3)
937 | NDCB0_DBC
ec82135a
EG
938 | (NAND_CMD_ERASE2 << 8)
939 | NAND_CMD_ERASE1;
4eb2da89
LW
940 info->ndcb1 = page_addr;
941 info->ndcb2 = 0;
942
fe69af00 943 break;
944 case NAND_CMD_RESET:
4eb2da89 945 info->ndcb0 |= NDCB0_CMD_TYPE(5)
ec82135a 946 | command;
4eb2da89
LW
947
948 break;
949
950 case NAND_CMD_ERASE2:
951 exec_cmd = 0;
fe69af00 952 break;
4eb2da89 953
fe69af00 954 default:
4eb2da89 955 exec_cmd = 0;
da675b4e
LW
956 dev_err(&info->pdev->dev, "non-supported command %x\n",
957 command);
fe69af00 958 break;
959 }
960
4eb2da89
LW
961 return exec_cmd;
962}
963
5cbbdc6a
EG
964static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
965 int column, int page_addr)
4eb2da89 966{
d456882b
LW
967 struct pxa3xx_nand_host *host = mtd->priv;
968 struct pxa3xx_nand_info *info = host->info_data;
e5860c18 969 int exec_cmd;
4eb2da89
LW
970
971 /*
972 * if this is a x16 device ,then convert the input
973 * "byte" address into a "word" address appropriate
974 * for indexing a word-oriented device
975 */
48cf7efa 976 if (info->reg_ndcr & NDCR_DWIDTH_M)
4eb2da89
LW
977 column /= 2;
978
f3c8cfc2
LW
979 /*
980 * There may be different NAND chip hooked to
981 * different chip select, so check whether
982 * chip select has been changed, if yes, reset the timing
983 */
984 if (info->cs != host->cs) {
985 info->cs = host->cs;
48cf7efa
EG
986 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
987 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
f3c8cfc2
LW
988 }
989
c39ff03a
EG
990 prepare_start_command(info, command);
991
d456882b 992 info->state = STATE_PREPARED;
70ed8523
EG
993 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
994
f8155a40
LW
995 if (exec_cmd) {
996 init_completion(&info->cmd_complete);
55d9fd6e
EG
997 init_completion(&info->dev_ready);
998 info->need_wait = 1;
f8155a40
LW
999 pxa3xx_nand_start(info);
1000
e5860c18
NMG
1001 if (!wait_for_completion_timeout(&info->cmd_complete,
1002 CHIP_DELAY_TIMEOUT)) {
da675b4e 1003 dev_err(&info->pdev->dev, "Wait time out!!!\n");
f8155a40
LW
1004 /* Stop State Machine for next command cycle */
1005 pxa3xx_nand_stop(info);
1006 }
f8155a40 1007 }
d456882b 1008 info->state = STATE_IDLE;
f8155a40
LW
1009}
1010
5cbbdc6a
EG
1011static void nand_cmdfunc_extended(struct mtd_info *mtd,
1012 const unsigned command,
1013 int column, int page_addr)
70ed8523
EG
1014{
1015 struct pxa3xx_nand_host *host = mtd->priv;
1016 struct pxa3xx_nand_info *info = host->info_data;
e5860c18 1017 int exec_cmd, ext_cmd_type;
70ed8523
EG
1018
1019 /*
1020 * if this is a x16 device then convert the input
1021 * "byte" address into a "word" address appropriate
1022 * for indexing a word-oriented device
1023 */
1024 if (info->reg_ndcr & NDCR_DWIDTH_M)
1025 column /= 2;
1026
1027 /*
1028 * There may be different NAND chip hooked to
1029 * different chip select, so check whether
1030 * chip select has been changed, if yes, reset the timing
1031 */
1032 if (info->cs != host->cs) {
1033 info->cs = host->cs;
1034 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1035 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1036 }
1037
1038 /* Select the extended command for the first command */
1039 switch (command) {
1040 case NAND_CMD_READ0:
1041 case NAND_CMD_READOOB:
1042 ext_cmd_type = EXT_CMD_TYPE_MONO;
1043 break;
535cb57a
EG
1044 case NAND_CMD_SEQIN:
1045 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1046 break;
1047 case NAND_CMD_PAGEPROG:
1048 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1049 break;
70ed8523
EG
1050 default:
1051 ext_cmd_type = 0;
535cb57a 1052 break;
70ed8523
EG
1053 }
1054
1055 prepare_start_command(info, command);
1056
1057 /*
1058 * Prepare the "is ready" completion before starting a command
1059 * transaction sequence. If the command is not executed the
1060 * completion will be completed, see below.
1061 *
1062 * We can do that inside the loop because the command variable
1063 * is invariant and thus so is the exec_cmd.
1064 */
1065 info->need_wait = 1;
1066 init_completion(&info->dev_ready);
1067 do {
1068 info->state = STATE_PREPARED;
1069 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1070 column, page_addr);
1071 if (!exec_cmd) {
1072 info->need_wait = 0;
1073 complete(&info->dev_ready);
1074 break;
1075 }
1076
1077 init_completion(&info->cmd_complete);
1078 pxa3xx_nand_start(info);
1079
e5860c18
NMG
1080 if (!wait_for_completion_timeout(&info->cmd_complete,
1081 CHIP_DELAY_TIMEOUT)) {
70ed8523
EG
1082 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1083 /* Stop State Machine for next command cycle */
1084 pxa3xx_nand_stop(info);
1085 break;
1086 }
1087
1088 /* Check if the sequence is complete */
535cb57a
EG
1089 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1090 break;
1091
1092 /*
1093 * After a splitted program command sequence has issued
1094 * the command dispatch, the command sequence is complete.
1095 */
1096 if (info->data_size == 0 &&
1097 command == NAND_CMD_PAGEPROG &&
1098 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
70ed8523
EG
1099 break;
1100
1101 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1102 /* Last read: issue a 'last naked read' */
1103 if (info->data_size == info->chunk_size)
1104 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1105 else
1106 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
535cb57a
EG
1107
1108 /*
1109 * If a splitted program command has no more data to transfer,
1110 * the command dispatch must be issued to complete.
1111 */
1112 } else if (command == NAND_CMD_PAGEPROG &&
1113 info->data_size == 0) {
1114 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
70ed8523
EG
1115 }
1116 } while (1);
1117
1118 info->state = STATE_IDLE;
1119}
1120
fdbad98d 1121static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1fbb938d 1122 struct nand_chip *chip, const uint8_t *buf, int oob_required)
f8155a40
LW
1123{
1124 chip->write_buf(mtd, buf, mtd->writesize);
1125 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
fdbad98d
JW
1126
1127 return 0;
f8155a40
LW
1128}
1129
1130static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1fbb938d
BN
1131 struct nand_chip *chip, uint8_t *buf, int oob_required,
1132 int page)
f8155a40 1133{
d456882b
LW
1134 struct pxa3xx_nand_host *host = mtd->priv;
1135 struct pxa3xx_nand_info *info = host->info_data;
f8155a40
LW
1136
1137 chip->read_buf(mtd, buf, mtd->writesize);
1138 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1139
87f5336e
EG
1140 if (info->retcode == ERR_CORERR && info->use_ecc) {
1141 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1142
1143 } else if (info->retcode == ERR_UNCORERR) {
f8155a40
LW
1144 /*
1145 * for blank page (all 0xff), HW will calculate its ECC as
1146 * 0, which is different from the ECC information within
87f5336e 1147 * OOB, ignore such uncorrectable errors
f8155a40
LW
1148 */
1149 if (is_buf_blank(buf, mtd->writesize))
543e32d5
DM
1150 info->retcode = ERR_NONE;
1151 else
f8155a40 1152 mtd->ecc_stats.failed++;
fe69af00 1153 }
f8155a40 1154
87f5336e 1155 return info->max_bitflips;
fe69af00 1156}
1157
1158static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1159{
d456882b
LW
1160 struct pxa3xx_nand_host *host = mtd->priv;
1161 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1162 char retval = 0xFF;
1163
1164 if (info->buf_start < info->buf_count)
1165 /* Has just send a new command? */
1166 retval = info->data_buff[info->buf_start++];
1167
1168 return retval;
1169}
1170
1171static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1172{
d456882b
LW
1173 struct pxa3xx_nand_host *host = mtd->priv;
1174 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1175 u16 retval = 0xFFFF;
1176
1177 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1178 retval = *((u16 *)(info->data_buff+info->buf_start));
1179 info->buf_start += 2;
1180 }
1181 return retval;
1182}
1183
1184static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1185{
d456882b
LW
1186 struct pxa3xx_nand_host *host = mtd->priv;
1187 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1188 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1189
1190 memcpy(buf, info->data_buff + info->buf_start, real_len);
1191 info->buf_start += real_len;
1192}
1193
1194static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1195 const uint8_t *buf, int len)
1196{
d456882b
LW
1197 struct pxa3xx_nand_host *host = mtd->priv;
1198 struct pxa3xx_nand_info *info = host->info_data;
fe69af00 1199 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1200
1201 memcpy(info->data_buff + info->buf_start, buf, real_len);
1202 info->buf_start += real_len;
1203}
1204
fe69af00 1205static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1206{
1207 return;
1208}
1209
1210static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1211{
d456882b
LW
1212 struct pxa3xx_nand_host *host = mtd->priv;
1213 struct pxa3xx_nand_info *info = host->info_data;
55d9fd6e
EG
1214
1215 if (info->need_wait) {
55d9fd6e 1216 info->need_wait = 0;
e5860c18
NMG
1217 if (!wait_for_completion_timeout(&info->dev_ready,
1218 CHIP_DELAY_TIMEOUT)) {
55d9fd6e
EG
1219 dev_err(&info->pdev->dev, "Ready time out!!!\n");
1220 return NAND_STATUS_FAIL;
1221 }
1222 }
fe69af00 1223
1224 /* pxa3xx_nand_send_command has waited for command complete */
1225 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1226 if (info->retcode == ERR_NONE)
1227 return 0;
55d9fd6e
EG
1228 else
1229 return NAND_STATUS_FAIL;
fe69af00 1230 }
1231
55d9fd6e 1232 return NAND_STATUS_READY;
fe69af00 1233}
1234
fe69af00 1235static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
c8c17c88 1236 const struct pxa3xx_nand_flash *f)
fe69af00 1237{
1238 struct platform_device *pdev = info->pdev;
453810b7 1239 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
f3c8cfc2 1240 struct pxa3xx_nand_host *host = info->host[info->cs];
f8155a40 1241 uint32_t ndcr = 0x0; /* enable all interrupts */
fe69af00 1242
da675b4e
LW
1243 if (f->page_size != 2048 && f->page_size != 512) {
1244 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
fe69af00 1245 return -EINVAL;
da675b4e 1246 }
fe69af00 1247
da675b4e
LW
1248 if (f->flash_width != 16 && f->flash_width != 8) {
1249 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
fe69af00 1250 return -EINVAL;
da675b4e 1251 }
fe69af00 1252
1253 /* calculate flash information */
d456882b 1254 host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
fe69af00 1255
1256 /* calculate addressing information */
d456882b 1257 host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
fe69af00 1258
1259 if (f->num_blocks * f->page_per_block > 65536)
d456882b 1260 host->row_addr_cycles = 3;
fe69af00 1261 else
d456882b 1262 host->row_addr_cycles = 2;
fe69af00 1263
1264 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
d456882b 1265 ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
fe69af00 1266 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1267 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1268 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1269 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1270
d456882b 1271 ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
fe69af00 1272 ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1273
48cf7efa 1274 info->reg_ndcr = ndcr;
fe69af00 1275
d456882b 1276 pxa3xx_nand_set_timing(host, f->timing);
fe69af00 1277 return 0;
1278}
1279
f271049e
MR
1280static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1281{
f3c8cfc2
LW
1282 /*
1283 * We set 0 by hard coding here, for we don't support keep_config
1284 * when there is more than one chip attached to the controller
1285 */
1286 struct pxa3xx_nand_host *host = info->host[0];
f271049e 1287 uint32_t ndcr = nand_readl(info, NDCR);
f271049e 1288
d456882b 1289 if (ndcr & NDCR_PAGE_SZ) {
2128b08c 1290 /* Controller's FIFO size */
70ed8523 1291 info->chunk_size = 2048;
d456882b
LW
1292 host->read_id_bytes = 4;
1293 } else {
70ed8523 1294 info->chunk_size = 512;
d456882b
LW
1295 host->read_id_bytes = 2;
1296 }
1297
70ed8523 1298 /* Set an initial chunk size */
48cf7efa
EG
1299 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1300 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1301 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
f271049e
MR
1302 return 0;
1303}
1304
f4db2e3a 1305#ifdef ARCH_HAS_DMA
fe69af00 1306static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1307{
1308 struct platform_device *pdev = info->pdev;
62e8b851 1309 int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
fe69af00 1310
1311 if (use_dma == 0) {
62e8b851 1312 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
fe69af00 1313 if (info->data_buff == NULL)
1314 return -ENOMEM;
1315 return 0;
1316 }
1317
62e8b851 1318 info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
fe69af00 1319 &info->data_buff_phys, GFP_KERNEL);
1320 if (info->data_buff == NULL) {
1321 dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1322 return -ENOMEM;
1323 }
1324
fe69af00 1325 info->data_desc = (void *)info->data_buff + data_desc_offset;
1326 info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1327
1328 info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1329 pxa3xx_nand_data_dma_irq, info);
1330 if (info->data_dma_ch < 0) {
1331 dev_err(&pdev->dev, "failed to request data dma\n");
62e8b851 1332 dma_free_coherent(&pdev->dev, info->buf_size,
fe69af00 1333 info->data_buff, info->data_buff_phys);
1334 return info->data_dma_ch;
1335 }
1336
95b26563
EG
1337 /*
1338 * Now that DMA buffers are allocated we turn on
1339 * DMA proper for I/O operations.
1340 */
1341 info->use_dma = 1;
fe69af00 1342 return 0;
1343}
1344
498b6145
EG
1345static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1346{
1347 struct platform_device *pdev = info->pdev;
15b540c7 1348 if (info->use_dma) {
498b6145 1349 pxa_free_dma(info->data_dma_ch);
62e8b851 1350 dma_free_coherent(&pdev->dev, info->buf_size,
498b6145
EG
1351 info->data_buff, info->data_buff_phys);
1352 } else {
1353 kfree(info->data_buff);
1354 }
1355}
f4db2e3a
EG
1356#else
1357static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1358{
62e8b851 1359 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
f4db2e3a
EG
1360 if (info->data_buff == NULL)
1361 return -ENOMEM;
1362 return 0;
1363}
1364
1365static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1366{
1367 kfree(info->data_buff);
1368}
1369#endif
498b6145 1370
401e67e2
LW
1371static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1372{
f3c8cfc2 1373 struct mtd_info *mtd;
2d79ab16 1374 struct nand_chip *chip;
d456882b 1375 int ret;
2d79ab16 1376
f3c8cfc2 1377 mtd = info->host[info->cs]->mtd;
2d79ab16
EG
1378 chip = mtd->priv;
1379
401e67e2 1380 /* use the common timing to make a try */
d456882b
LW
1381 ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1382 if (ret)
1383 return ret;
1384
2d79ab16 1385 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
56704d85
EG
1386 ret = chip->waitfunc(mtd, chip);
1387 if (ret & NAND_STATUS_FAIL)
1388 return -ENODEV;
d456882b 1389
56704d85 1390 return 0;
401e67e2 1391}
fe69af00 1392
43bcfd2b
EG
1393static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1394 struct nand_ecc_ctrl *ecc,
30b2afc8 1395 int strength, int ecc_stepsize, int page_size)
43bcfd2b 1396{
30b2afc8 1397 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
70ed8523 1398 info->chunk_size = 2048;
43bcfd2b
EG
1399 info->spare_size = 40;
1400 info->ecc_size = 24;
1401 ecc->mode = NAND_ECC_HW;
1402 ecc->size = 512;
1403 ecc->strength = 1;
43bcfd2b 1404
30b2afc8 1405 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
70ed8523 1406 info->chunk_size = 512;
43bcfd2b
EG
1407 info->spare_size = 8;
1408 info->ecc_size = 8;
1409 ecc->mode = NAND_ECC_HW;
1410 ecc->size = 512;
1411 ecc->strength = 1;
43bcfd2b 1412
6033a949
BN
1413 /*
1414 * Required ECC: 4-bit correction per 512 bytes
1415 * Select: 16-bit correction per 2048 bytes
1416 */
3db227b6
RG
1417 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1418 info->ecc_bch = 1;
1419 info->chunk_size = 2048;
1420 info->spare_size = 32;
1421 info->ecc_size = 32;
1422 ecc->mode = NAND_ECC_HW;
1423 ecc->size = info->chunk_size;
1424 ecc->layout = &ecc_layout_2KB_bch4bit;
1425 ecc->strength = 16;
3db227b6 1426
30b2afc8 1427 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
70ed8523
EG
1428 info->ecc_bch = 1;
1429 info->chunk_size = 2048;
1430 info->spare_size = 32;
1431 info->ecc_size = 32;
1432 ecc->mode = NAND_ECC_HW;
1433 ecc->size = info->chunk_size;
1434 ecc->layout = &ecc_layout_4KB_bch4bit;
1435 ecc->strength = 16;
70ed8523 1436
6033a949
BN
1437 /*
1438 * Required ECC: 8-bit correction per 512 bytes
1439 * Select: 16-bit correction per 1024 bytes
1440 */
1441 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
70ed8523
EG
1442 info->ecc_bch = 1;
1443 info->chunk_size = 1024;
1444 info->spare_size = 0;
1445 info->ecc_size = 32;
1446 ecc->mode = NAND_ECC_HW;
1447 ecc->size = info->chunk_size;
1448 ecc->layout = &ecc_layout_4KB_bch8bit;
1449 ecc->strength = 16;
eee0166d
EG
1450 } else {
1451 dev_err(&info->pdev->dev,
1452 "ECC strength %d at page size %d is not supported\n",
1453 strength, page_size);
1454 return -ENODEV;
70ed8523 1455 }
eee0166d
EG
1456
1457 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1458 ecc->strength, ecc->size);
43bcfd2b
EG
1459 return 0;
1460}
1461
401e67e2 1462static int pxa3xx_nand_scan(struct mtd_info *mtd)
fe69af00 1463{
d456882b
LW
1464 struct pxa3xx_nand_host *host = mtd->priv;
1465 struct pxa3xx_nand_info *info = host->info_data;
401e67e2 1466 struct platform_device *pdev = info->pdev;
453810b7 1467 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
0fab028b 1468 struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
401e67e2
LW
1469 const struct pxa3xx_nand_flash *f = NULL;
1470 struct nand_chip *chip = mtd->priv;
1471 uint32_t id = -1;
4332c116 1472 uint64_t chipsize;
401e67e2 1473 int i, ret, num;
30b2afc8 1474 uint16_t ecc_strength, ecc_step;
401e67e2
LW
1475
1476 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
4332c116 1477 goto KEEP_CONFIG;
401e67e2 1478
bc3e00f0
AT
1479 /* Set a default chunk size */
1480 info->chunk_size = 512;
1481
401e67e2 1482 ret = pxa3xx_nand_sensing(info);
d456882b 1483 if (ret) {
f3c8cfc2
LW
1484 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1485 info->cs);
401e67e2 1486
d456882b 1487 return ret;
401e67e2
LW
1488 }
1489
1490 chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1491 id = *((uint16_t *)(info->data_buff));
1492 if (id != 0)
da675b4e 1493 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
401e67e2 1494 else {
da675b4e
LW
1495 dev_warn(&info->pdev->dev,
1496 "Read out ID 0, potential timing set wrong!!\n");
401e67e2
LW
1497
1498 return -EINVAL;
1499 }
1500
1501 num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1;
1502 for (i = 0; i < num; i++) {
1503 if (i < pdata->num_flash)
1504 f = pdata->flash + i;
1505 else
1506 f = &builtin_flash_types[i - pdata->num_flash + 1];
1507
1508 /* find the chip in default list */
4332c116 1509 if (f->chip_id == id)
401e67e2 1510 break;
401e67e2
LW
1511 }
1512
4332c116 1513 if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
da675b4e 1514 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
401e67e2
LW
1515
1516 return -EINVAL;
1517 }
1518
d456882b
LW
1519 ret = pxa3xx_nand_config_flash(info, f);
1520 if (ret) {
1521 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1522 return ret;
1523 }
1524
7c2f7176
AT
1525 memset(pxa3xx_flash_ids, 0, sizeof(pxa3xx_flash_ids));
1526
4332c116 1527 pxa3xx_flash_ids[0].name = f->name;
68aa352d 1528 pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
4332c116
LW
1529 pxa3xx_flash_ids[0].pagesize = f->page_size;
1530 chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1531 pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1532 pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1533 if (f->flash_width == 16)
1534 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
0fab028b
LW
1535 pxa3xx_flash_ids[1].name = NULL;
1536 def = pxa3xx_flash_ids;
4332c116 1537KEEP_CONFIG:
48cf7efa 1538 if (info->reg_ndcr & NDCR_DWIDTH_M)
d456882b
LW
1539 chip->options |= NAND_BUSWIDTH_16;
1540
43bcfd2b
EG
1541 /* Device detection must be done with ECC disabled */
1542 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1543 nand_writel(info, NDECCCTRL, 0x0);
1544
0fab028b 1545 if (nand_scan_ident(mtd, 1, def))
4332c116 1546 return -ENODEV;
776f265e
EG
1547
1548 if (pdata->flash_bbt) {
1549 /*
1550 * We'll use a bad block table stored in-flash and don't
1551 * allow writing the bad block marker to the flash.
1552 */
1553 chip->bbt_options |= NAND_BBT_USE_FLASH |
1554 NAND_BBT_NO_OOB_BBM;
1555 chip->bbt_td = &bbt_main_descr;
1556 chip->bbt_md = &bbt_mirror_descr;
1557 }
1558
5cbbdc6a
EG
1559 /*
1560 * If the page size is bigger than the FIFO size, let's check
1561 * we are given the right variant and then switch to the extended
1562 * (aka splitted) command handling,
1563 */
1564 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1565 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1566 chip->cmdfunc = nand_cmdfunc_extended;
1567 } else {
1568 dev_err(&info->pdev->dev,
1569 "unsupported page size on this variant\n");
1570 return -ENODEV;
1571 }
1572 }
1573
5b3e5078
EG
1574 if (pdata->ecc_strength && pdata->ecc_step_size) {
1575 ecc_strength = pdata->ecc_strength;
1576 ecc_step = pdata->ecc_step_size;
1577 } else {
1578 ecc_strength = chip->ecc_strength_ds;
1579 ecc_step = chip->ecc_step_ds;
1580 }
30b2afc8
EG
1581
1582 /* Set default ECC strength requirements on non-ONFI devices */
1583 if (ecc_strength < 1 && ecc_step < 1) {
1584 ecc_strength = 1;
1585 ecc_step = 512;
1586 }
1587
1588 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1589 ecc_step, mtd->writesize);
eee0166d
EG
1590 if (ret)
1591 return ret;
43bcfd2b 1592
4332c116 1593 /* calculate addressing information */
d456882b
LW
1594 if (mtd->writesize >= 2048)
1595 host->col_addr_cycles = 2;
1596 else
1597 host->col_addr_cycles = 1;
1598
62e8b851
EG
1599 /* release the initial buffer */
1600 kfree(info->data_buff);
1601
1602 /* allocate the real data + oob buffer */
1603 info->buf_size = mtd->writesize + mtd->oobsize;
1604 ret = pxa3xx_nand_init_buff(info);
1605 if (ret)
1606 return ret;
4332c116 1607 info->oob_buff = info->data_buff + mtd->writesize;
62e8b851 1608
4332c116 1609 if ((mtd->size >> chip->page_shift) > 65536)
d456882b 1610 host->row_addr_cycles = 3;
4332c116 1611 else
d456882b 1612 host->row_addr_cycles = 2;
401e67e2 1613 return nand_scan_tail(mtd);
fe69af00 1614}
1615
d456882b 1616static int alloc_nand_resource(struct platform_device *pdev)
fe69af00 1617{
f3c8cfc2 1618 struct pxa3xx_nand_platform_data *pdata;
fe69af00 1619 struct pxa3xx_nand_info *info;
d456882b 1620 struct pxa3xx_nand_host *host;
6e308f87 1621 struct nand_chip *chip = NULL;
fe69af00 1622 struct mtd_info *mtd;
1623 struct resource *r;
f3c8cfc2 1624 int ret, irq, cs;
fe69af00 1625
453810b7 1626 pdata = dev_get_platdata(&pdev->dev);
e423c90a
RJ
1627 if (pdata->num_cs <= 0)
1628 return -ENODEV;
4c073cd2
EG
1629 info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1630 sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1631 if (!info)
d456882b 1632 return -ENOMEM;
fe69af00 1633
fe69af00 1634 info->pdev = pdev;
c7e9c7e7 1635 info->variant = pxa3xx_nand_get_variant(pdev);
f3c8cfc2 1636 for (cs = 0; cs < pdata->num_cs; cs++) {
ce914e6b 1637 mtd = (void *)&info[1] + (sizeof(*mtd) + sizeof(*host)) * cs;
f3c8cfc2
LW
1638 chip = (struct nand_chip *)(&mtd[1]);
1639 host = (struct pxa3xx_nand_host *)chip;
1640 info->host[cs] = host;
1641 host->mtd = mtd;
1642 host->cs = cs;
1643 host->info_data = info;
1644 mtd->priv = host;
1645 mtd->owner = THIS_MODULE;
1646
1647 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1648 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1649 chip->controller = &info->controller;
1650 chip->waitfunc = pxa3xx_nand_waitfunc;
1651 chip->select_chip = pxa3xx_nand_select_chip;
f3c8cfc2
LW
1652 chip->read_word = pxa3xx_nand_read_word;
1653 chip->read_byte = pxa3xx_nand_read_byte;
1654 chip->read_buf = pxa3xx_nand_read_buf;
1655 chip->write_buf = pxa3xx_nand_write_buf;
664c7f5e 1656 chip->options |= NAND_NO_SUBPAGE_WRITE;
5cbbdc6a 1657 chip->cmdfunc = nand_cmdfunc;
f3c8cfc2 1658 }
401e67e2
LW
1659
1660 spin_lock_init(&chip->controller->lock);
1661 init_waitqueue_head(&chip->controller->wq);
9ca7944d 1662 info->clk = devm_clk_get(&pdev->dev, NULL);
fe69af00 1663 if (IS_ERR(info->clk)) {
1664 dev_err(&pdev->dev, "failed to get nand clock\n");
4c073cd2 1665 return PTR_ERR(info->clk);
fe69af00 1666 }
1f8eaff2
EG
1667 ret = clk_prepare_enable(info->clk);
1668 if (ret < 0)
1669 return ret;
fe69af00 1670
6b45c1ee
EG
1671 if (use_dma) {
1672 /*
1673 * This is a dirty hack to make this driver work from
1674 * devicetree bindings. It can be removed once we have
1675 * a prober DMA controller framework for DT.
1676 */
1677 if (pdev->dev.of_node &&
1678 of_machine_is_compatible("marvell,pxa3xx")) {
1679 info->drcmr_dat = 97;
1680 info->drcmr_cmd = 99;
1681 } else {
1682 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1683 if (r == NULL) {
1684 dev_err(&pdev->dev,
1685 "no resource defined for data DMA\n");
1686 ret = -ENXIO;
1687 goto fail_disable_clk;
1688 }
1689 info->drcmr_dat = r->start;
1690
1691 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1692 if (r == NULL) {
1693 dev_err(&pdev->dev,
1694 "no resource defined for cmd DMA\n");
1695 ret = -ENXIO;
1696 goto fail_disable_clk;
1697 }
1698 info->drcmr_cmd = r->start;
1e7ba630 1699 }
fe69af00 1700 }
fe69af00 1701
1702 irq = platform_get_irq(pdev, 0);
1703 if (irq < 0) {
1704 dev_err(&pdev->dev, "no IRQ resource defined\n");
1705 ret = -ENXIO;
9ca7944d 1706 goto fail_disable_clk;
fe69af00 1707 }
1708
1709 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0ddd846f
EG
1710 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1711 if (IS_ERR(info->mmio_base)) {
1712 ret = PTR_ERR(info->mmio_base);
9ca7944d 1713 goto fail_disable_clk;
fe69af00 1714 }
8638fac8 1715 info->mmio_phys = r->start;
fe69af00 1716
62e8b851
EG
1717 /* Allocate a buffer to allow flash detection */
1718 info->buf_size = INIT_BUFFER_SIZE;
1719 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1720 if (info->data_buff == NULL) {
1721 ret = -ENOMEM;
9ca7944d 1722 goto fail_disable_clk;
62e8b851 1723 }
fe69af00 1724
346e1259
HZ
1725 /* initialize all interrupts to be disabled */
1726 disable_int(info, NDSR_MASK);
1727
24542257
RJ
1728 ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1729 pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1730 pdev->name, info);
fe69af00 1731 if (ret < 0) {
1732 dev_err(&pdev->dev, "failed to request IRQ\n");
1733 goto fail_free_buf;
1734 }
1735
e353a20a 1736 platform_set_drvdata(pdev, info);
fe69af00 1737
d456882b 1738 return 0;
fe69af00 1739
fe69af00 1740fail_free_buf:
401e67e2 1741 free_irq(irq, info);
62e8b851 1742 kfree(info->data_buff);
9ca7944d 1743fail_disable_clk:
fb32061f 1744 clk_disable_unprepare(info->clk);
d456882b 1745 return ret;
fe69af00 1746}
1747
1748static int pxa3xx_nand_remove(struct platform_device *pdev)
1749{
e353a20a 1750 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
f3c8cfc2 1751 struct pxa3xx_nand_platform_data *pdata;
f3c8cfc2 1752 int irq, cs;
fe69af00 1753
d456882b
LW
1754 if (!info)
1755 return 0;
1756
453810b7 1757 pdata = dev_get_platdata(&pdev->dev);
fe69af00 1758
dbf5986a
HZ
1759 irq = platform_get_irq(pdev, 0);
1760 if (irq >= 0)
1761 free_irq(irq, info);
498b6145 1762 pxa3xx_nand_free_buff(info);
82a72d10 1763
fb32061f 1764 clk_disable_unprepare(info->clk);
82a72d10 1765
f3c8cfc2
LW
1766 for (cs = 0; cs < pdata->num_cs; cs++)
1767 nand_release(info->host[cs]->mtd);
fe69af00 1768 return 0;
1769}
1770
1e7ba630
DM
1771static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1772{
1773 struct pxa3xx_nand_platform_data *pdata;
1774 struct device_node *np = pdev->dev.of_node;
1775 const struct of_device_id *of_id =
1776 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1777
1778 if (!of_id)
1779 return 0;
1780
1781 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1782 if (!pdata)
1783 return -ENOMEM;
1784
1785 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1786 pdata->enable_arbiter = 1;
1787 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1788 pdata->keep_config = 1;
1789 of_property_read_u32(np, "num-cs", &pdata->num_cs);
776f265e 1790 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1e7ba630 1791
5b3e5078
EG
1792 pdata->ecc_strength = of_get_nand_ecc_strength(np);
1793 if (pdata->ecc_strength < 0)
1794 pdata->ecc_strength = 0;
1795
1796 pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1797 if (pdata->ecc_step_size < 0)
1798 pdata->ecc_step_size = 0;
1799
1e7ba630
DM
1800 pdev->dev.platform_data = pdata;
1801
1802 return 0;
1803}
1e7ba630 1804
e353a20a
LW
1805static int pxa3xx_nand_probe(struct platform_device *pdev)
1806{
1807 struct pxa3xx_nand_platform_data *pdata;
1e7ba630 1808 struct mtd_part_parser_data ppdata = {};
e353a20a 1809 struct pxa3xx_nand_info *info;
f3c8cfc2 1810 int ret, cs, probe_success;
e353a20a 1811
f4db2e3a
EG
1812#ifndef ARCH_HAS_DMA
1813 if (use_dma) {
1814 use_dma = 0;
1815 dev_warn(&pdev->dev,
1816 "This platform can't do DMA on this device\n");
1817 }
1818#endif
1e7ba630
DM
1819 ret = pxa3xx_nand_probe_dt(pdev);
1820 if (ret)
1821 return ret;
1822
453810b7 1823 pdata = dev_get_platdata(&pdev->dev);
e353a20a
LW
1824 if (!pdata) {
1825 dev_err(&pdev->dev, "no platform data defined\n");
1826 return -ENODEV;
1827 }
1828
d456882b
LW
1829 ret = alloc_nand_resource(pdev);
1830 if (ret) {
1831 dev_err(&pdev->dev, "alloc nand resource failed\n");
1832 return ret;
1833 }
e353a20a 1834
d456882b 1835 info = platform_get_drvdata(pdev);
f3c8cfc2
LW
1836 probe_success = 0;
1837 for (cs = 0; cs < pdata->num_cs; cs++) {
b7655bcb 1838 struct mtd_info *mtd = info->host[cs]->mtd;
f455578d 1839
18a84e93
EG
1840 /*
1841 * The mtd name matches the one used in 'mtdparts' kernel
1842 * parameter. This name cannot be changed or otherwise
1843 * user's mtd partitions configuration would get broken.
1844 */
1845 mtd->name = "pxa3xx_nand-0";
f3c8cfc2 1846 info->cs = cs;
b7655bcb 1847 ret = pxa3xx_nand_scan(mtd);
f3c8cfc2
LW
1848 if (ret) {
1849 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1850 cs);
1851 continue;
1852 }
1853
1e7ba630 1854 ppdata.of_node = pdev->dev.of_node;
b7655bcb 1855 ret = mtd_device_parse_register(mtd, NULL,
1e7ba630 1856 &ppdata, pdata->parts[cs],
42d7fbe2 1857 pdata->nr_parts[cs]);
f3c8cfc2
LW
1858 if (!ret)
1859 probe_success = 1;
1860 }
1861
1862 if (!probe_success) {
e353a20a
LW
1863 pxa3xx_nand_remove(pdev);
1864 return -ENODEV;
1865 }
1866
f3c8cfc2 1867 return 0;
e353a20a
LW
1868}
1869
fe69af00 1870#ifdef CONFIG_PM
1871static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1872{
e353a20a 1873 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
f3c8cfc2
LW
1874 struct pxa3xx_nand_platform_data *pdata;
1875 struct mtd_info *mtd;
1876 int cs;
fe69af00 1877
453810b7 1878 pdata = dev_get_platdata(&pdev->dev);
f8155a40 1879 if (info->state) {
fe69af00 1880 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1881 return -EAGAIN;
1882 }
1883
f3c8cfc2
LW
1884 for (cs = 0; cs < pdata->num_cs; cs++) {
1885 mtd = info->host[cs]->mtd;
3fe4bae8 1886 mtd_suspend(mtd);
f3c8cfc2
LW
1887 }
1888
fe69af00 1889 return 0;
1890}
1891
1892static int pxa3xx_nand_resume(struct platform_device *pdev)
1893{
e353a20a 1894 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
f3c8cfc2
LW
1895 struct pxa3xx_nand_platform_data *pdata;
1896 struct mtd_info *mtd;
1897 int cs;
051fc41c 1898
453810b7 1899 pdata = dev_get_platdata(&pdev->dev);
051fc41c
LW
1900 /* We don't want to handle interrupt without calling mtd routine */
1901 disable_int(info, NDCR_INT_MASK);
fe69af00 1902
f3c8cfc2
LW
1903 /*
1904 * Directly set the chip select to a invalid value,
1905 * then the driver would reset the timing according
1906 * to current chip select at the beginning of cmdfunc
1907 */
1908 info->cs = 0xff;
fe69af00 1909
051fc41c
LW
1910 /*
1911 * As the spec says, the NDSR would be updated to 0x1800 when
1912 * doing the nand_clk disable/enable.
1913 * To prevent it damaging state machine of the driver, clear
1914 * all status before resume
1915 */
1916 nand_writel(info, NDSR, NDSR_MASK);
f3c8cfc2
LW
1917 for (cs = 0; cs < pdata->num_cs; cs++) {
1918 mtd = info->host[cs]->mtd;
ead995f8 1919 mtd_resume(mtd);
f3c8cfc2
LW
1920 }
1921
18c81b18 1922 return 0;
fe69af00 1923}
1924#else
1925#define pxa3xx_nand_suspend NULL
1926#define pxa3xx_nand_resume NULL
1927#endif
1928
1929static struct platform_driver pxa3xx_nand_driver = {
1930 .driver = {
1931 .name = "pxa3xx-nand",
5576bc7b 1932 .of_match_table = pxa3xx_nand_dt_ids,
fe69af00 1933 },
1934 .probe = pxa3xx_nand_probe,
1935 .remove = pxa3xx_nand_remove,
1936 .suspend = pxa3xx_nand_suspend,
1937 .resume = pxa3xx_nand_resume,
1938};
1939
f99640de 1940module_platform_driver(pxa3xx_nand_driver);
fe69af00 1941
1942MODULE_LICENSE("GPL");
1943MODULE_DESCRIPTION("PXA3xx NAND controller driver");