include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[linux-block.git] / drivers / mtd / nand / omap2.c
CommitLineData
67ce04bf
VS
1/*
2 * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
3 * Copyright © 2004 Micron Technology Inc.
4 * Copyright © 2004 David Brownell
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/platform_device.h>
12#include <linux/dma-mapping.h>
13#include <linux/delay.h>
c276aca4 14#include <linux/jiffies.h>
15#include <linux/sched.h>
67ce04bf
VS
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/nand.h>
18#include <linux/mtd/partitions.h>
19#include <linux/io.h>
5a0e3ad6 20#include <linux/slab.h>
67ce04bf 21
ce491cf8
TL
22#include <plat/dma.h>
23#include <plat/gpmc.h>
24#include <plat/nand.h>
67ce04bf
VS
25
26#define GPMC_IRQ_STATUS 0x18
27#define GPMC_ECC_CONFIG 0x1F4
28#define GPMC_ECC_CONTROL 0x1F8
29#define GPMC_ECC_SIZE_CONFIG 0x1FC
30#define GPMC_ECC1_RESULT 0x200
31
32#define DRIVER_NAME "omap2-nand"
33
67ce04bf
VS
34#define NAND_WP_OFF 0
35#define NAND_WP_BIT 0x00000010
67ce04bf
VS
36
37#define GPMC_BUF_FULL 0x00000001
38#define GPMC_BUF_EMPTY 0x00000000
39
40#define NAND_Ecc_P1e (1 << 0)
41#define NAND_Ecc_P2e (1 << 1)
42#define NAND_Ecc_P4e (1 << 2)
43#define NAND_Ecc_P8e (1 << 3)
44#define NAND_Ecc_P16e (1 << 4)
45#define NAND_Ecc_P32e (1 << 5)
46#define NAND_Ecc_P64e (1 << 6)
47#define NAND_Ecc_P128e (1 << 7)
48#define NAND_Ecc_P256e (1 << 8)
49#define NAND_Ecc_P512e (1 << 9)
50#define NAND_Ecc_P1024e (1 << 10)
51#define NAND_Ecc_P2048e (1 << 11)
52
53#define NAND_Ecc_P1o (1 << 16)
54#define NAND_Ecc_P2o (1 << 17)
55#define NAND_Ecc_P4o (1 << 18)
56#define NAND_Ecc_P8o (1 << 19)
57#define NAND_Ecc_P16o (1 << 20)
58#define NAND_Ecc_P32o (1 << 21)
59#define NAND_Ecc_P64o (1 << 22)
60#define NAND_Ecc_P128o (1 << 23)
61#define NAND_Ecc_P256o (1 << 24)
62#define NAND_Ecc_P512o (1 << 25)
63#define NAND_Ecc_P1024o (1 << 26)
64#define NAND_Ecc_P2048o (1 << 27)
65
66#define TF(value) (value ? 1 : 0)
67
68#define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
69#define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
70#define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
71#define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
72#define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
73#define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
74#define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
75#define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
76
77#define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
78#define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
79#define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
80#define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
81#define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
82#define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
83#define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
84#define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
85
86#define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
87#define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
88#define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
89#define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
90#define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
91#define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
92#define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
93#define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
94
95#define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
96#define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
97#define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
98#define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
99#define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
100#define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
101#define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
102#define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
103
104#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
105#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
106
107#ifdef CONFIG_MTD_PARTITIONS
108static const char *part_probes[] = { "cmdlinepart", NULL };
109#endif
110
59e9c5ae 111#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH
112static int use_prefetch = 1;
113
114/* "modprobe ... use_prefetch=0" etc */
115module_param(use_prefetch, bool, 0);
116MODULE_PARM_DESC(use_prefetch, "enable/disable use of PREFETCH");
dfe32893 117
118#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
119static int use_dma = 1;
120
121/* "modprobe ... use_dma=0" etc */
122module_param(use_dma, bool, 0);
123MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
124#else
125const int use_dma;
126#endif
59e9c5ae 127#else
128const int use_prefetch;
dfe32893 129const int use_dma;
59e9c5ae 130#endif
131
67ce04bf
VS
132struct omap_nand_info {
133 struct nand_hw_control controller;
134 struct omap_nand_platform_data *pdata;
135 struct mtd_info mtd;
136 struct mtd_partition *parts;
137 struct nand_chip nand;
138 struct platform_device *pdev;
139
140 int gpmc_cs;
141 unsigned long phys_base;
142 void __iomem *gpmc_cs_baseaddr;
143 void __iomem *gpmc_baseaddr;
59e9c5ae 144 void __iomem *nand_pref_fifo_add;
dfe32893 145 struct completion comp;
146 int dma_ch;
67ce04bf
VS
147};
148
149/**
150 * omap_nand_wp - This function enable or disable the Write Protect feature
151 * @mtd: MTD device structure
152 * @mode: WP ON/OFF
153 */
154static void omap_nand_wp(struct mtd_info *mtd, int mode)
155{
156 struct omap_nand_info *info = container_of(mtd,
157 struct omap_nand_info, mtd);
158
159 unsigned long config = __raw_readl(info->gpmc_baseaddr + GPMC_CONFIG);
160
161 if (mode)
162 config &= ~(NAND_WP_BIT); /* WP is ON */
163 else
164 config |= (NAND_WP_BIT); /* WP is OFF */
165
166 __raw_writel(config, (info->gpmc_baseaddr + GPMC_CONFIG));
167}
168
169/**
170 * omap_hwcontrol - hardware specific access to control-lines
171 * @mtd: MTD device structure
172 * @cmd: command to device
173 * @ctrl:
174 * NAND_NCE: bit 0 -> don't care
175 * NAND_CLE: bit 1 -> Command Latch
176 * NAND_ALE: bit 2 -> Address Latch
177 *
178 * NOTE: boards may use different bits for these!!
179 */
180static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
181{
182 struct omap_nand_info *info = container_of(mtd,
183 struct omap_nand_info, mtd);
184 switch (ctrl) {
185 case NAND_CTRL_CHANGE | NAND_CTRL_CLE:
186 info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
187 GPMC_CS_NAND_COMMAND;
188 info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
189 GPMC_CS_NAND_DATA;
190 break;
191
192 case NAND_CTRL_CHANGE | NAND_CTRL_ALE:
193 info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
194 GPMC_CS_NAND_ADDRESS;
195 info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
196 GPMC_CS_NAND_DATA;
197 break;
198
199 case NAND_CTRL_CHANGE | NAND_NCE:
200 info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
201 GPMC_CS_NAND_DATA;
202 info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
203 GPMC_CS_NAND_DATA;
204 break;
205 }
206
207 if (cmd != NAND_CMD_NONE)
208 __raw_writeb(cmd, info->nand.IO_ADDR_W);
209}
210
59e9c5ae 211/**
212 * omap_read_buf8 - read data from NAND controller into buffer
213 * @mtd: MTD device structure
214 * @buf: buffer to store date
215 * @len: number of bytes to read
216 */
217static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
218{
219 struct nand_chip *nand = mtd->priv;
220
221 ioread8_rep(nand->IO_ADDR_R, buf, len);
222}
223
224/**
225 * omap_write_buf8 - write buffer to NAND controller
226 * @mtd: MTD device structure
227 * @buf: data buffer
228 * @len: number of bytes to write
229 */
230static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
231{
232 struct omap_nand_info *info = container_of(mtd,
233 struct omap_nand_info, mtd);
234 u_char *p = (u_char *)buf;
235
236 while (len--) {
237 iowrite8(*p++, info->nand.IO_ADDR_W);
238 while (GPMC_BUF_EMPTY == (readl(info->gpmc_baseaddr +
239 GPMC_STATUS) & GPMC_BUF_FULL));
240 }
241}
242
67ce04bf
VS
243/**
244 * omap_read_buf16 - read data from NAND controller into buffer
245 * @mtd: MTD device structure
246 * @buf: buffer to store date
247 * @len: number of bytes to read
248 */
249static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
250{
251 struct nand_chip *nand = mtd->priv;
252
59e9c5ae 253 ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
67ce04bf
VS
254}
255
256/**
257 * omap_write_buf16 - write buffer to NAND controller
258 * @mtd: MTD device structure
259 * @buf: data buffer
260 * @len: number of bytes to write
261 */
262static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
263{
264 struct omap_nand_info *info = container_of(mtd,
265 struct omap_nand_info, mtd);
266 u16 *p = (u16 *) buf;
267
268 /* FIXME try bursts of writesw() or DMA ... */
269 len >>= 1;
270
271 while (len--) {
59e9c5ae 272 iowrite16(*p++, info->nand.IO_ADDR_W);
67ce04bf
VS
273
274 while (GPMC_BUF_EMPTY == (readl(info->gpmc_baseaddr +
275 GPMC_STATUS) & GPMC_BUF_FULL))
276 ;
277 }
278}
59e9c5ae 279
280/**
281 * omap_read_buf_pref - read data from NAND controller into buffer
282 * @mtd: MTD device structure
283 * @buf: buffer to store date
284 * @len: number of bytes to read
285 */
286static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
287{
288 struct omap_nand_info *info = container_of(mtd,
289 struct omap_nand_info, mtd);
290 uint32_t pfpw_status = 0, r_count = 0;
291 int ret = 0;
292 u32 *p = (u32 *)buf;
293
294 /* take care of subpage reads */
295 for (; len % 4 != 0; ) {
296 *buf++ = __raw_readb(info->nand.IO_ADDR_R);
297 len--;
298 }
299 p = (u32 *) buf;
300
301 /* configure and start prefetch transfer */
302 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0);
303 if (ret) {
304 /* PFPW engine is busy, use cpu copy method */
305 if (info->nand.options & NAND_BUSWIDTH_16)
306 omap_read_buf16(mtd, buf, len);
307 else
308 omap_read_buf8(mtd, buf, len);
309 } else {
310 do {
311 pfpw_status = gpmc_prefetch_status();
312 r_count = ((pfpw_status >> 24) & 0x7F) >> 2;
313 ioread32_rep(info->nand_pref_fifo_add, p, r_count);
314 p += r_count;
315 len -= r_count << 2;
316 } while (len);
317
318 /* disable and stop the PFPW engine */
319 gpmc_prefetch_reset();
320 }
321}
322
323/**
324 * omap_write_buf_pref - write buffer to NAND controller
325 * @mtd: MTD device structure
326 * @buf: data buffer
327 * @len: number of bytes to write
328 */
329static void omap_write_buf_pref(struct mtd_info *mtd,
330 const u_char *buf, int len)
331{
332 struct omap_nand_info *info = container_of(mtd,
333 struct omap_nand_info, mtd);
334 uint32_t pfpw_status = 0, w_count = 0;
335 int i = 0, ret = 0;
336 u16 *p = (u16 *) buf;
337
338 /* take care of subpage writes */
339 if (len % 2 != 0) {
340 writeb(*buf, info->nand.IO_ADDR_R);
341 p = (u16 *)(buf + 1);
342 len--;
343 }
344
345 /* configure and start prefetch transfer */
346 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1);
347 if (ret) {
348 /* PFPW engine is busy, use cpu copy method */
349 if (info->nand.options & NAND_BUSWIDTH_16)
350 omap_write_buf16(mtd, buf, len);
351 else
352 omap_write_buf8(mtd, buf, len);
353 } else {
354 pfpw_status = gpmc_prefetch_status();
355 while (pfpw_status & 0x3FFF) {
356 w_count = ((pfpw_status >> 24) & 0x7F) >> 1;
357 for (i = 0; (i < w_count) && len; i++, len -= 2)
358 iowrite16(*p++, info->nand_pref_fifo_add);
359 pfpw_status = gpmc_prefetch_status();
360 }
361
362 /* disable and stop the PFPW engine */
363 gpmc_prefetch_reset();
364 }
365}
366
dfe32893 367#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA
368/*
369 * omap_nand_dma_cb: callback on the completion of dma transfer
370 * @lch: logical channel
371 * @ch_satuts: channel status
372 * @data: pointer to completion data structure
373 */
374static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
375{
376 complete((struct completion *) data);
377}
378
379/*
380 * omap_nand_dma_transfer: configer and start dma transfer
381 * @mtd: MTD device structure
382 * @addr: virtual address in RAM of source/destination
383 * @len: number of data bytes to be transferred
384 * @is_write: flag for read/write operation
385 */
386static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
387 unsigned int len, int is_write)
388{
389 struct omap_nand_info *info = container_of(mtd,
390 struct omap_nand_info, mtd);
391 uint32_t prefetch_status = 0;
392 enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
393 DMA_FROM_DEVICE;
394 dma_addr_t dma_addr;
395 int ret;
396
397 /* The fifo depth is 64 bytes. We have a sync at each frame and frame
398 * length is 64 bytes.
399 */
400 int buf_len = len >> 6;
401
402 if (addr >= high_memory) {
403 struct page *p1;
404
405 if (((size_t)addr & PAGE_MASK) !=
406 ((size_t)(addr + len - 1) & PAGE_MASK))
407 goto out_copy;
408 p1 = vmalloc_to_page(addr);
409 if (!p1)
410 goto out_copy;
411 addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
412 }
413
414 dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
415 if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
416 dev_err(&info->pdev->dev,
417 "Couldn't DMA map a %d byte buffer\n", len);
418 goto out_copy;
419 }
420
421 if (is_write) {
422 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
423 info->phys_base, 0, 0);
424 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
425 dma_addr, 0, 0);
426 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
427 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
428 OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
429 } else {
430 omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
431 info->phys_base, 0, 0);
432 omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
433 dma_addr, 0, 0);
434 omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
435 0x10, buf_len, OMAP_DMA_SYNC_FRAME,
436 OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
437 }
438 /* configure and start prefetch transfer */
439 ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write);
440 if (ret)
441 /* PFPW engine is busy, use cpu copy methode */
442 goto out_copy;
443
444 init_completion(&info->comp);
445
446 omap_start_dma(info->dma_ch);
447
448 /* setup and start DMA using dma_addr */
449 wait_for_completion(&info->comp);
450
451 while (0x3fff & (prefetch_status = gpmc_prefetch_status()))
452 ;
453 /* disable and stop the PFPW engine */
454 gpmc_prefetch_reset();
455
456 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
457 return 0;
458
459out_copy:
460 if (info->nand.options & NAND_BUSWIDTH_16)
461 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
462 : omap_write_buf16(mtd, (u_char *) addr, len);
463 else
464 is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
465 : omap_write_buf8(mtd, (u_char *) addr, len);
466 return 0;
467}
468#else
469static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) {}
470static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
471 unsigned int len, int is_write)
472{
473 return 0;
474}
475#endif
476
477/**
478 * omap_read_buf_dma_pref - read data from NAND controller into buffer
479 * @mtd: MTD device structure
480 * @buf: buffer to store date
481 * @len: number of bytes to read
482 */
483static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
484{
485 if (len <= mtd->oobsize)
486 omap_read_buf_pref(mtd, buf, len);
487 else
488 /* start transfer in DMA mode */
489 omap_nand_dma_transfer(mtd, buf, len, 0x0);
490}
491
492/**
493 * omap_write_buf_dma_pref - write buffer to NAND controller
494 * @mtd: MTD device structure
495 * @buf: data buffer
496 * @len: number of bytes to write
497 */
498static void omap_write_buf_dma_pref(struct mtd_info *mtd,
499 const u_char *buf, int len)
500{
501 if (len <= mtd->oobsize)
502 omap_write_buf_pref(mtd, buf, len);
503 else
504 /* start transfer in DMA mode */
505 omap_nand_dma_transfer(mtd, buf, len, 0x1);
506}
507
67ce04bf
VS
508/**
509 * omap_verify_buf - Verify chip data against buffer
510 * @mtd: MTD device structure
511 * @buf: buffer containing the data to compare
512 * @len: number of bytes to compare
513 */
514static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
515{
516 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
517 mtd);
518 u16 *p = (u16 *) buf;
519
520 len >>= 1;
521 while (len--) {
522 if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
523 return -EFAULT;
524 }
525
526 return 0;
527}
528
529#ifdef CONFIG_MTD_NAND_OMAP_HWECC
530/**
531 * omap_hwecc_init - Initialize the HW ECC for NAND flash in GPMC controller
532 * @mtd: MTD device structure
533 */
534static void omap_hwecc_init(struct mtd_info *mtd)
535{
536 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
537 mtd);
538 struct nand_chip *chip = mtd->priv;
539 unsigned long val = 0x0;
540
541 /* Read from ECC Control Register */
542 val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_CONTROL);
543 /* Clear all ECC | Enable Reg1 */
544 val = ((0x00000001<<8) | 0x00000001);
545 __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
546
547 /* Read from ECC Size Config Register */
548 val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_SIZE_CONFIG);
549 /* ECCSIZE1=512 | Select eccResultsize[0-3] */
550 val = ((((chip->ecc.size >> 1) - 1) << 22) | (0x0000000F));
551 __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_SIZE_CONFIG);
552}
553
554/**
555 * gen_true_ecc - This function will generate true ECC value
556 * @ecc_buf: buffer to store ecc code
557 *
558 * This generated true ECC value can be used when correcting
559 * data read from NAND flash memory core
560 */
561static void gen_true_ecc(u8 *ecc_buf)
562{
563 u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
564 ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
565
566 ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
567 P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
568 ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
569 P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
570 ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
571 P1e(tmp) | P2048o(tmp) | P2048e(tmp));
572}
573
574/**
575 * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
576 * @ecc_data1: ecc code from nand spare area
577 * @ecc_data2: ecc code from hardware register obtained from hardware ecc
578 * @page_data: page data
579 *
580 * This function compares two ECC's and indicates if there is an error.
581 * If the error can be corrected it will be corrected to the buffer.
582 */
583static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
584 u8 *ecc_data2, /* read from register */
585 u8 *page_data)
586{
587 uint i;
588 u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
589 u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
590 u8 ecc_bit[24];
591 u8 ecc_sum = 0;
592 u8 find_bit = 0;
593 uint find_byte = 0;
594 int isEccFF;
595
596 isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
597
598 gen_true_ecc(ecc_data1);
599 gen_true_ecc(ecc_data2);
600
601 for (i = 0; i <= 2; i++) {
602 *(ecc_data1 + i) = ~(*(ecc_data1 + i));
603 *(ecc_data2 + i) = ~(*(ecc_data2 + i));
604 }
605
606 for (i = 0; i < 8; i++) {
607 tmp0_bit[i] = *ecc_data1 % 2;
608 *ecc_data1 = *ecc_data1 / 2;
609 }
610
611 for (i = 0; i < 8; i++) {
612 tmp1_bit[i] = *(ecc_data1 + 1) % 2;
613 *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
614 }
615
616 for (i = 0; i < 8; i++) {
617 tmp2_bit[i] = *(ecc_data1 + 2) % 2;
618 *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
619 }
620
621 for (i = 0; i < 8; i++) {
622 comp0_bit[i] = *ecc_data2 % 2;
623 *ecc_data2 = *ecc_data2 / 2;
624 }
625
626 for (i = 0; i < 8; i++) {
627 comp1_bit[i] = *(ecc_data2 + 1) % 2;
628 *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
629 }
630
631 for (i = 0; i < 8; i++) {
632 comp2_bit[i] = *(ecc_data2 + 2) % 2;
633 *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
634 }
635
636 for (i = 0; i < 6; i++)
637 ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
638
639 for (i = 0; i < 8; i++)
640 ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
641
642 for (i = 0; i < 8; i++)
643 ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
644
645 ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
646 ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
647
648 for (i = 0; i < 24; i++)
649 ecc_sum += ecc_bit[i];
650
651 switch (ecc_sum) {
652 case 0:
653 /* Not reached because this function is not called if
654 * ECC values are equal
655 */
656 return 0;
657
658 case 1:
659 /* Uncorrectable error */
660 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
661 return -1;
662
663 case 11:
664 /* UN-Correctable error */
665 DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
666 return -1;
667
668 case 12:
669 /* Correctable error */
670 find_byte = (ecc_bit[23] << 8) +
671 (ecc_bit[21] << 7) +
672 (ecc_bit[19] << 6) +
673 (ecc_bit[17] << 5) +
674 (ecc_bit[15] << 4) +
675 (ecc_bit[13] << 3) +
676 (ecc_bit[11] << 2) +
677 (ecc_bit[9] << 1) +
678 ecc_bit[7];
679
680 find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
681
682 DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
683 "offset: %d, bit: %d\n", find_byte, find_bit);
684
685 page_data[find_byte] ^= (1 << find_bit);
686
687 return 0;
688 default:
689 if (isEccFF) {
690 if (ecc_data2[0] == 0 &&
691 ecc_data2[1] == 0 &&
692 ecc_data2[2] == 0)
693 return 0;
694 }
695 DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
696 return -1;
697 }
698}
699
700/**
701 * omap_correct_data - Compares the ECC read with HW generated ECC
702 * @mtd: MTD device structure
703 * @dat: page data
704 * @read_ecc: ecc read from nand flash
705 * @calc_ecc: ecc read from HW ECC registers
706 *
707 * Compares the ecc read from nand spare area with ECC registers values
708 * and if ECC's mismached, it will call 'omap_compare_ecc' for error detection
709 * and correction.
710 */
711static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
712 u_char *read_ecc, u_char *calc_ecc)
713{
714 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
715 mtd);
716 int blockCnt = 0, i = 0, ret = 0;
717
718 /* Ex NAND_ECC_HW12_2048 */
719 if ((info->nand.ecc.mode == NAND_ECC_HW) &&
720 (info->nand.ecc.size == 2048))
721 blockCnt = 4;
722 else
723 blockCnt = 1;
724
725 for (i = 0; i < blockCnt; i++) {
726 if (memcmp(read_ecc, calc_ecc, 3) != 0) {
727 ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
728 if (ret < 0)
729 return ret;
730 }
731 read_ecc += 3;
732 calc_ecc += 3;
733 dat += 512;
734 }
735 return 0;
736}
737
738/**
739 * omap_calcuate_ecc - Generate non-inverted ECC bytes.
740 * @mtd: MTD device structure
741 * @dat: The pointer to data on which ecc is computed
742 * @ecc_code: The ecc_code buffer
743 *
744 * Using noninverted ECC can be considered ugly since writing a blank
745 * page ie. padding will clear the ECC bytes. This is no problem as long
746 * nobody is trying to write data on the seemingly unused page. Reading
747 * an erased page will produce an ECC mismatch between generated and read
748 * ECC bytes that has to be dealt with separately.
749 */
750static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
751 u_char *ecc_code)
752{
753 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
754 mtd);
755 unsigned long val = 0x0;
756 unsigned long reg;
757
758 /* Start Reading from HW ECC1_Result = 0x200 */
759 reg = (unsigned long)(info->gpmc_baseaddr + GPMC_ECC1_RESULT);
760 val = __raw_readl(reg);
761 *ecc_code++ = val; /* P128e, ..., P1e */
762 *ecc_code++ = val >> 16; /* P128o, ..., P1o */
763 /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
764 *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
765 reg += 4;
766
767 return 0;
768}
769
770/**
771 * omap_enable_hwecc - This function enables the hardware ecc functionality
772 * @mtd: MTD device structure
773 * @mode: Read/Write mode
774 */
775static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
776{
777 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
778 mtd);
779 struct nand_chip *chip = mtd->priv;
780 unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
781 unsigned long val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_CONFIG);
782
783 switch (mode) {
784 case NAND_ECC_READ:
785 __raw_writel(0x101, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
786 /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
787 val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
788 break;
789 case NAND_ECC_READSYN:
790 __raw_writel(0x100, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
791 /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
792 val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
793 break;
794 case NAND_ECC_WRITE:
795 __raw_writel(0x101, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
796 /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
797 val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
798 break;
799 default:
800 DEBUG(MTD_DEBUG_LEVEL0, "Error: Unrecognized Mode[%d]!\n",
801 mode);
802 break;
803 }
804
805 __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_CONFIG);
806}
807#endif
808
809/**
810 * omap_wait - wait until the command is done
811 * @mtd: MTD device structure
812 * @chip: NAND Chip structure
813 *
814 * Wait function is called during Program and erase operations and
815 * the way it is called from MTD layer, we should wait till the NAND
816 * chip is ready after the programming/erase operation has completed.
817 *
818 * Erase can take up to 400ms and program up to 20ms according to
819 * general NAND and SmartMedia specs
820 */
821static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
822{
823 struct nand_chip *this = mtd->priv;
824 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
825 mtd);
826 unsigned long timeo = jiffies;
c276aca4 827 int status = NAND_STATUS_FAIL, state = this->state;
67ce04bf
VS
828
829 if (state == FL_ERASING)
830 timeo += (HZ * 400) / 1000;
831 else
832 timeo += (HZ * 20) / 1000;
833
834 this->IO_ADDR_W = (void *) info->gpmc_cs_baseaddr +
835 GPMC_CS_NAND_COMMAND;
836 this->IO_ADDR_R = (void *) info->gpmc_cs_baseaddr + GPMC_CS_NAND_DATA;
837
838 __raw_writeb(NAND_CMD_STATUS & 0xFF, this->IO_ADDR_W);
839
840 while (time_before(jiffies, timeo)) {
841 status = __raw_readb(this->IO_ADDR_R);
c276aca4 842 if (status & NAND_STATUS_READY)
67ce04bf 843 break;
c276aca4 844 cond_resched();
67ce04bf
VS
845 }
846 return status;
847}
848
849/**
850 * omap_dev_ready - calls the platform specific dev_ready function
851 * @mtd: MTD device structure
852 */
853static int omap_dev_ready(struct mtd_info *mtd)
854{
855 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
856 mtd);
857 unsigned int val = __raw_readl(info->gpmc_baseaddr + GPMC_IRQ_STATUS);
858
859 if ((val & 0x100) == 0x100) {
860 /* Clear IRQ Interrupt */
861 val |= 0x100;
862 val &= ~(0x0);
863 __raw_writel(val, info->gpmc_baseaddr + GPMC_IRQ_STATUS);
864 } else {
865 unsigned int cnt = 0;
866 while (cnt++ < 0x1FF) {
867 if ((val & 0x100) == 0x100)
868 return 0;
869 val = __raw_readl(info->gpmc_baseaddr +
870 GPMC_IRQ_STATUS);
871 }
872 }
873
874 return 1;
875}
876
877static int __devinit omap_nand_probe(struct platform_device *pdev)
878{
879 struct omap_nand_info *info;
880 struct omap_nand_platform_data *pdata;
881 int err;
67ce04bf
VS
882
883 pdata = pdev->dev.platform_data;
884 if (pdata == NULL) {
885 dev_err(&pdev->dev, "platform data missing\n");
886 return -ENODEV;
887 }
888
889 info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
890 if (!info)
891 return -ENOMEM;
892
893 platform_set_drvdata(pdev, info);
894
895 spin_lock_init(&info->controller.lock);
896 init_waitqueue_head(&info->controller.wq);
897
898 info->pdev = pdev;
899
900 info->gpmc_cs = pdata->cs;
901 info->gpmc_baseaddr = pdata->gpmc_baseaddr;
902 info->gpmc_cs_baseaddr = pdata->gpmc_cs_baseaddr;
2f70a1e9 903 info->phys_base = pdata->phys_base;
67ce04bf
VS
904
905 info->mtd.priv = &info->nand;
906 info->mtd.name = dev_name(&pdev->dev);
907 info->mtd.owner = THIS_MODULE;
908
2f70a1e9
VS
909 info->nand.options |= pdata->devsize ? NAND_BUSWIDTH_16 : 0;
910 info->nand.options |= NAND_SKIP_BBTSCAN;
67ce04bf
VS
911
912 /* NAND write protect off */
913 omap_nand_wp(&info->mtd, NAND_WP_OFF);
914
915 if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
916 pdev->dev.driver->name)) {
917 err = -EBUSY;
2f70a1e9 918 goto out_free_info;
67ce04bf
VS
919 }
920
921 info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
922 if (!info->nand.IO_ADDR_R) {
923 err = -ENOMEM;
924 goto out_release_mem_region;
925 }
59e9c5ae 926
67ce04bf
VS
927 info->nand.controller = &info->controller;
928
929 info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
930 info->nand.cmd_ctrl = omap_hwcontrol;
931
67ce04bf
VS
932 /*
933 * If RDY/BSY line is connected to OMAP then use the omap ready
934 * funcrtion and the generic nand_wait function which reads the status
935 * register after monitoring the RDY/BSY line.Otherwise use a standard
936 * chip delay which is slightly more than tR (AC Timing) of the NAND
937 * device and read status register until you get a failure or success
938 */
939 if (pdata->dev_ready) {
940 info->nand.dev_ready = omap_dev_ready;
941 info->nand.chip_delay = 0;
942 } else {
943 info->nand.waitfunc = omap_wait;
944 info->nand.chip_delay = 50;
945 }
946
59e9c5ae 947 if (use_prefetch) {
948 /* copy the virtual address of nand base for fifo access */
949 info->nand_pref_fifo_add = info->nand.IO_ADDR_R;
950
951 info->nand.read_buf = omap_read_buf_pref;
952 info->nand.write_buf = omap_write_buf_pref;
dfe32893 953 if (use_dma) {
954 err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
955 omap_nand_dma_cb, &info->comp, &info->dma_ch);
956 if (err < 0) {
957 info->dma_ch = -1;
958 printk(KERN_WARNING "DMA request failed."
959 " Non-dma data transfer mode\n");
960 } else {
961 omap_set_dma_dest_burst_mode(info->dma_ch,
962 OMAP_DMA_DATA_BURST_16);
963 omap_set_dma_src_burst_mode(info->dma_ch,
964 OMAP_DMA_DATA_BURST_16);
965
966 info->nand.read_buf = omap_read_buf_dma_pref;
967 info->nand.write_buf = omap_write_buf_dma_pref;
968 }
969 }
59e9c5ae 970 } else {
971 if (info->nand.options & NAND_BUSWIDTH_16) {
972 info->nand.read_buf = omap_read_buf16;
973 info->nand.write_buf = omap_write_buf16;
974 } else {
975 info->nand.read_buf = omap_read_buf8;
976 info->nand.write_buf = omap_write_buf8;
977 }
978 }
979 info->nand.verify_buf = omap_verify_buf;
980
67ce04bf
VS
981#ifdef CONFIG_MTD_NAND_OMAP_HWECC
982 info->nand.ecc.bytes = 3;
983 info->nand.ecc.size = 512;
984 info->nand.ecc.calculate = omap_calculate_ecc;
985 info->nand.ecc.hwctl = omap_enable_hwecc;
986 info->nand.ecc.correct = omap_correct_data;
987 info->nand.ecc.mode = NAND_ECC_HW;
988
989 /* init HW ECC */
990 omap_hwecc_init(&info->mtd);
991#else
992 info->nand.ecc.mode = NAND_ECC_SOFT;
993#endif
994
995 /* DIP switches on some boards change between 8 and 16 bit
996 * bus widths for flash. Try the other width if the first try fails.
997 */
998 if (nand_scan(&info->mtd, 1)) {
999 info->nand.options ^= NAND_BUSWIDTH_16;
1000 if (nand_scan(&info->mtd, 1)) {
1001 err = -ENXIO;
1002 goto out_release_mem_region;
1003 }
1004 }
1005
1006#ifdef CONFIG_MTD_PARTITIONS
1007 err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
1008 if (err > 0)
1009 add_mtd_partitions(&info->mtd, info->parts, err);
1010 else if (pdata->parts)
1011 add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
1012 else
1013#endif
1014 add_mtd_device(&info->mtd);
1015
1016 platform_set_drvdata(pdev, &info->mtd);
1017
1018 return 0;
1019
1020out_release_mem_region:
1021 release_mem_region(info->phys_base, NAND_IO_SIZE);
67ce04bf
VS
1022out_free_info:
1023 kfree(info);
1024
1025 return err;
1026}
1027
1028static int omap_nand_remove(struct platform_device *pdev)
1029{
1030 struct mtd_info *mtd = platform_get_drvdata(pdev);
1031 struct omap_nand_info *info = mtd->priv;
1032
1033 platform_set_drvdata(pdev, NULL);
dfe32893 1034 if (use_dma)
1035 omap_free_dma(info->dma_ch);
1036
67ce04bf
VS
1037 /* Release NAND device, its internal structures and partitions */
1038 nand_release(&info->mtd);
59e9c5ae 1039 iounmap(info->nand_pref_fifo_add);
67ce04bf
VS
1040 kfree(&info->mtd);
1041 return 0;
1042}
1043
1044static struct platform_driver omap_nand_driver = {
1045 .probe = omap_nand_probe,
1046 .remove = omap_nand_remove,
1047 .driver = {
1048 .name = DRIVER_NAME,
1049 .owner = THIS_MODULE,
1050 },
1051};
1052
1053static int __init omap_nand_init(void)
1054{
1055 printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME);
dfe32893 1056
1057 /* This check is required if driver is being
1058 * loaded run time as a module
1059 */
1060 if ((1 == use_dma) && (0 == use_prefetch)) {
1061 printk(KERN_INFO"Wrong parameters: 'use_dma' can not be 1 "
1062 "without use_prefetch'. Prefetch will not be"
1063 " used in either mode (mpu or dma)\n");
1064 }
67ce04bf
VS
1065 return platform_driver_register(&omap_nand_driver);
1066}
1067
1068static void __exit omap_nand_exit(void)
1069{
1070 platform_driver_unregister(&omap_nand_driver);
1071}
1072
1073module_init(omap_nand_init);
1074module_exit(omap_nand_exit);
1075
1076MODULE_ALIAS(DRIVER_NAME);
1077MODULE_LICENSE("GPL");
1078MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");