2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
8 * 10/10/2000 Nicolas Pitre <nico@fluxnic.net>
9 * - completely revamped method functions so they are aware and
10 * independent of the flash geometry (buswidth, interleave, etc.)
11 * - scalability vs code size is completely set at compile-time
12 * (see include/linux/mtd/cfi.h for selection)
13 * - optimized write buffer method
14 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15 * - reworked lock/unlock/erase support for var size flash
16 * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
17 * - auto unlock sectors on resume for auto locking flash on power up
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
25 #include <asm/byteorder.h>
27 #include <linux/errno.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/reboot.h>
32 #include <linux/bitmap.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
45 #define I82802AB 0x00ad
46 #define I82802AC 0x00ac
47 #define PF38F4476 0x881c
48 #define M28F00AP30 0x8963
49 /* STMicroelectronics chips */
50 #define M50LPW080 0x002F
51 #define M50FLW080A 0x0080
52 #define M50FLW080B 0x0081
54 #define AT49BV640D 0x02de
55 #define AT49BV640DT 0x02db
57 #define LH28F640BFHE_PTTL90 0x00b0
58 #define LH28F640BFHE_PBTL90 0x00b1
59 #define LH28F640BFHE_PTTL70A 0x00b2
60 #define LH28F640BFHE_PBTL70A 0x00b3
62 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
64 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
65 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
66 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
67 static void cfi_intelext_sync (struct mtd_info *);
68 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
69 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
70 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
73 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
74 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
75 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
76 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
77 static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
78 size_t *, struct otp_info *);
79 static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
80 size_t *, struct otp_info *);
82 static int cfi_intelext_suspend (struct mtd_info *);
83 static void cfi_intelext_resume (struct mtd_info *);
84 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
86 static void cfi_intelext_destroy(struct mtd_info *);
88 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
90 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
91 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
93 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
94 size_t *retlen, void **virt, resource_size_t *phys);
95 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
97 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
98 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
99 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
100 #include "fwh_lock.h"
105 * *********** SETUP AND PROBE BITS ***********
108 static struct mtd_chip_driver cfi_intelext_chipdrv = {
109 .probe = NULL, /* Not usable directly */
110 .destroy = cfi_intelext_destroy,
111 .name = "cfi_cmdset_0001",
112 .module = THIS_MODULE
115 /* #define DEBUG_LOCK_BITS */
116 /* #define DEBUG_CFI_FEATURES */
118 #ifdef DEBUG_CFI_FEATURES
119 static void cfi_tell_features(struct cfi_pri_intelext *extp)
122 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
123 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
124 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
125 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
126 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
127 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
128 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
129 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
130 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
131 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
132 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
133 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
134 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
135 for (i=11; i<32; i++) {
136 if (extp->FeatureSupport & (1<<i))
137 printk(" - Unknown Bit %X: supported\n", i);
140 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
141 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
142 for (i=1; i<8; i++) {
143 if (extp->SuspendCmdSupport & (1<<i))
144 printk(" - Unknown Bit %X: supported\n", i);
147 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
148 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
149 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
150 for (i=2; i<3; i++) {
151 if (extp->BlkStatusRegMask & (1<<i))
152 printk(" - Unknown Bit %X Active: yes\n",i);
154 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
155 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
156 for (i=6; i<16; i++) {
157 if (extp->BlkStatusRegMask & (1<<i))
158 printk(" - Unknown Bit %X Active: yes\n",i);
161 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
162 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
163 if (extp->VppOptimal)
164 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
165 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
169 /* Atmel chips don't use the same PRI format as Intel chips */
170 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
172 struct map_info *map = mtd->priv;
173 struct cfi_private *cfi = map->fldrv_priv;
174 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
175 struct cfi_pri_atmel atmel_pri;
176 uint32_t features = 0;
178 /* Reverse byteswapping */
179 extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
180 extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
181 extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
183 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
184 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
186 printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
188 if (atmel_pri.Features & 0x01) /* chip erase supported */
190 if (atmel_pri.Features & 0x02) /* erase suspend supported */
192 if (atmel_pri.Features & 0x04) /* program suspend supported */
194 if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
196 if (atmel_pri.Features & 0x20) /* page mode read supported */
198 if (atmel_pri.Features & 0x40) /* queued erase supported */
200 if (atmel_pri.Features & 0x80) /* Protection bits supported */
203 extp->FeatureSupport = features;
205 /* burst write mode not supported */
206 cfi->cfiq->BufWriteTimeoutTyp = 0;
207 cfi->cfiq->BufWriteTimeoutMax = 0;
210 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
212 struct map_info *map = mtd->priv;
213 struct cfi_private *cfi = map->fldrv_priv;
214 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
216 cfip->FeatureSupport |= (1 << 5);
217 mtd->flags |= MTD_POWERUP_LOCK;
220 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
221 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
222 static void fixup_intel_strataflash(struct mtd_info *mtd)
224 struct map_info *map = mtd->priv;
225 struct cfi_private *cfi = map->fldrv_priv;
226 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
228 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
229 "erase on write disabled.\n");
230 extp->SuspendCmdSupport &= ~1;
234 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
235 static void fixup_no_write_suspend(struct mtd_info *mtd)
237 struct map_info *map = mtd->priv;
238 struct cfi_private *cfi = map->fldrv_priv;
239 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
241 if (cfip && (cfip->FeatureSupport&4)) {
242 cfip->FeatureSupport &= ~4;
243 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
248 static void fixup_st_m28w320ct(struct mtd_info *mtd)
250 struct map_info *map = mtd->priv;
251 struct cfi_private *cfi = map->fldrv_priv;
253 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
254 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
257 static void fixup_st_m28w320cb(struct mtd_info *mtd)
259 struct map_info *map = mtd->priv;
260 struct cfi_private *cfi = map->fldrv_priv;
262 /* Note this is done after the region info is endian swapped */
263 cfi->cfiq->EraseRegionInfo[1] =
264 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
267 static int is_LH28F640BF(struct cfi_private *cfi)
269 /* Sharp LH28F640BF Family */
270 if (cfi->mfr == CFI_MFR_SHARP && (
271 cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
272 cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
277 static void fixup_LH28F640BF(struct mtd_info *mtd)
279 struct map_info *map = mtd->priv;
280 struct cfi_private *cfi = map->fldrv_priv;
281 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
283 /* Reset the Partition Configuration Register on LH28F640BF
284 * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
285 if (is_LH28F640BF(cfi)) {
286 printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
287 map_write(map, CMD(0x60), 0);
288 map_write(map, CMD(0x04), 0);
290 /* We have set one single partition thus
291 * Simultaneous Operations are not allowed */
292 printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
293 extp->FeatureSupport &= ~512;
297 static void fixup_use_point(struct mtd_info *mtd)
299 struct map_info *map = mtd->priv;
300 if (!mtd->_point && map_is_linear(map)) {
301 mtd->_point = cfi_intelext_point;
302 mtd->_unpoint = cfi_intelext_unpoint;
306 static void fixup_use_write_buffers(struct mtd_info *mtd)
308 struct map_info *map = mtd->priv;
309 struct cfi_private *cfi = map->fldrv_priv;
310 if (cfi->cfiq->BufWriteTimeoutTyp) {
311 printk(KERN_INFO "Using buffer write method\n" );
312 mtd->_write = cfi_intelext_write_buffers;
313 mtd->_writev = cfi_intelext_writev;
318 * Some chips power-up with all sectors locked by default.
320 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
322 struct map_info *map = mtd->priv;
323 struct cfi_private *cfi = map->fldrv_priv;
324 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
326 if (cfip->FeatureSupport&32) {
327 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
328 mtd->flags |= MTD_POWERUP_LOCK;
332 static struct cfi_fixup cfi_fixup_table[] = {
333 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
334 { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
335 { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
336 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
337 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
339 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
340 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
342 #if !FORCE_WORD_WRITE
343 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
345 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
346 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
347 { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
348 { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
349 { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
353 static struct cfi_fixup jedec_fixup_table[] = {
354 { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock },
355 { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock },
356 { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock },
357 { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock },
358 { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock },
361 static struct cfi_fixup fixup_table[] = {
362 /* The CFI vendor ids and the JEDEC vendor IDs appear
363 * to be common. It is like the devices id's are as
364 * well. This table is to pick all cases where
365 * we know that is the case.
367 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
371 static void cfi_fixup_major_minor(struct cfi_private *cfi,
372 struct cfi_pri_intelext *extp)
374 if (cfi->mfr == CFI_MFR_INTEL &&
375 cfi->id == PF38F4476 && extp->MinorVersion == '3')
376 extp->MinorVersion = '1';
379 static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
382 * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
383 * Erase Supend for their small Erase Blocks(0x8000)
385 if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
390 static inline struct cfi_pri_intelext *
391 read_pri_intelext(struct map_info *map, __u16 adr)
393 struct cfi_private *cfi = map->fldrv_priv;
394 struct cfi_pri_intelext *extp;
395 unsigned int extra_size = 0;
396 unsigned int extp_size = sizeof(*extp);
399 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
403 cfi_fixup_major_minor(cfi, extp);
405 if (extp->MajorVersion != '1' ||
406 (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
407 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
408 "version %c.%c.\n", extp->MajorVersion,
414 /* Do some byteswapping if necessary */
415 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
416 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
417 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
419 if (extp->MinorVersion >= '0') {
422 /* Protection Register info */
423 extra_size += (extp->NumProtectionFields - 1) *
424 sizeof(struct cfi_intelext_otpinfo);
427 if (extp->MinorVersion >= '1') {
428 /* Burst Read info */
430 if (extp_size < sizeof(*extp) + extra_size)
432 extra_size += extp->extra[extra_size - 1];
435 if (extp->MinorVersion >= '3') {
438 /* Number of hardware-partitions */
440 if (extp_size < sizeof(*extp) + extra_size)
442 nb_parts = extp->extra[extra_size - 1];
444 /* skip the sizeof(partregion) field in CFI 1.4 */
445 if (extp->MinorVersion >= '4')
448 for (i = 0; i < nb_parts; i++) {
449 struct cfi_intelext_regioninfo *rinfo;
450 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
451 extra_size += sizeof(*rinfo);
452 if (extp_size < sizeof(*extp) + extra_size)
454 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
455 extra_size += (rinfo->NumBlockTypes - 1)
456 * sizeof(struct cfi_intelext_blockinfo);
459 if (extp->MinorVersion >= '4')
460 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
462 if (extp_size < sizeof(*extp) + extra_size) {
464 extp_size = sizeof(*extp) + extra_size;
466 if (extp_size > 4096) {
468 "%s: cfi_pri_intelext is too fat\n",
479 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
481 struct cfi_private *cfi = map->fldrv_priv;
482 struct mtd_info *mtd;
485 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
489 mtd->type = MTD_NORFLASH;
491 /* Fill in the default mtd operations */
492 mtd->_erase = cfi_intelext_erase_varsize;
493 mtd->_read = cfi_intelext_read;
494 mtd->_write = cfi_intelext_write_words;
495 mtd->_sync = cfi_intelext_sync;
496 mtd->_lock = cfi_intelext_lock;
497 mtd->_unlock = cfi_intelext_unlock;
498 mtd->_is_locked = cfi_intelext_is_locked;
499 mtd->_suspend = cfi_intelext_suspend;
500 mtd->_resume = cfi_intelext_resume;
501 mtd->flags = MTD_CAP_NORFLASH;
502 mtd->name = map->name;
504 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
506 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
508 if (cfi->cfi_mode == CFI_MODE_CFI) {
510 * It's a real CFI chip, not one for which the probe
511 * routine faked a CFI structure. So we read the feature
514 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
515 struct cfi_pri_intelext *extp;
517 extp = read_pri_intelext(map, adr);
523 /* Install our own private info structure */
524 cfi->cmdset_priv = extp;
526 cfi_fixup(mtd, cfi_fixup_table);
528 #ifdef DEBUG_CFI_FEATURES
529 /* Tell the user about it in lots of lovely detail */
530 cfi_tell_features(extp);
533 if(extp->SuspendCmdSupport & 1) {
534 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
537 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
538 /* Apply jedec specific fixups */
539 cfi_fixup(mtd, jedec_fixup_table);
541 /* Apply generic fixups */
542 cfi_fixup(mtd, fixup_table);
544 for (i=0; i< cfi->numchips; i++) {
545 if (cfi->cfiq->WordWriteTimeoutTyp)
546 cfi->chips[i].word_write_time =
547 1<<cfi->cfiq->WordWriteTimeoutTyp;
549 cfi->chips[i].word_write_time = 50000;
551 if (cfi->cfiq->BufWriteTimeoutTyp)
552 cfi->chips[i].buffer_write_time =
553 1<<cfi->cfiq->BufWriteTimeoutTyp;
554 /* No default; if it isn't specified, we won't use it */
556 if (cfi->cfiq->BlockEraseTimeoutTyp)
557 cfi->chips[i].erase_time =
558 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
560 cfi->chips[i].erase_time = 2000000;
562 if (cfi->cfiq->WordWriteTimeoutTyp &&
563 cfi->cfiq->WordWriteTimeoutMax)
564 cfi->chips[i].word_write_time_max =
565 1<<(cfi->cfiq->WordWriteTimeoutTyp +
566 cfi->cfiq->WordWriteTimeoutMax);
568 cfi->chips[i].word_write_time_max = 50000 * 8;
570 if (cfi->cfiq->BufWriteTimeoutTyp &&
571 cfi->cfiq->BufWriteTimeoutMax)
572 cfi->chips[i].buffer_write_time_max =
573 1<<(cfi->cfiq->BufWriteTimeoutTyp +
574 cfi->cfiq->BufWriteTimeoutMax);
576 if (cfi->cfiq->BlockEraseTimeoutTyp &&
577 cfi->cfiq->BlockEraseTimeoutMax)
578 cfi->chips[i].erase_time_max =
579 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
580 cfi->cfiq->BlockEraseTimeoutMax);
582 cfi->chips[i].erase_time_max = 2000000 * 8;
584 cfi->chips[i].ref_point_counter = 0;
585 init_waitqueue_head(&(cfi->chips[i].wq));
588 map->fldrv = &cfi_intelext_chipdrv;
590 return cfi_intelext_setup(mtd);
592 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
593 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
594 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
595 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
596 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
598 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
600 struct map_info *map = mtd->priv;
601 struct cfi_private *cfi = map->fldrv_priv;
602 unsigned long offset = 0;
604 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
606 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
608 mtd->size = devsize * cfi->numchips;
610 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
611 mtd->eraseregions = kzalloc(sizeof(struct mtd_erase_region_info)
612 * mtd->numeraseregions, GFP_KERNEL);
613 if (!mtd->eraseregions)
616 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
617 unsigned long ernum, ersize;
618 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
619 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
621 if (mtd->erasesize < ersize) {
622 mtd->erasesize = ersize;
624 for (j=0; j<cfi->numchips; j++) {
625 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
626 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
627 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
628 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
629 if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
632 offset += (ersize * ernum);
635 if (offset != devsize) {
637 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
641 for (i=0; i<mtd->numeraseregions;i++){
642 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
643 i,(unsigned long long)mtd->eraseregions[i].offset,
644 mtd->eraseregions[i].erasesize,
645 mtd->eraseregions[i].numblocks);
648 #ifdef CONFIG_MTD_OTP
649 mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
650 mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
651 mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
652 mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
653 mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
654 mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
657 /* This function has the potential to distort the reality
658 a bit and therefore should be called last. */
659 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
662 __module_get(THIS_MODULE);
663 register_reboot_notifier(&mtd->reboot_notifier);
667 if (mtd->eraseregions)
668 for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
669 for (j=0; j<cfi->numchips; j++)
670 kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
671 kfree(mtd->eraseregions);
673 kfree(cfi->cmdset_priv);
677 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
678 struct cfi_private **pcfi)
680 struct map_info *map = mtd->priv;
681 struct cfi_private *cfi = *pcfi;
682 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
685 * Probing of multi-partition flash chips.
687 * To support multiple partitions when available, we simply arrange
688 * for each of them to have their own flchip structure even if they
689 * are on the same physical chip. This means completely recreating
690 * a new cfi_private structure right here which is a blatent code
691 * layering violation, but this is still the least intrusive
692 * arrangement at this point. This can be rearranged in the future
693 * if someone feels motivated enough. --nico
695 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
696 && extp->FeatureSupport & (1 << 9)) {
697 struct cfi_private *newcfi;
699 struct flchip_shared *shared;
700 int offs, numregions, numparts, partshift, numvirtchips, i, j;
702 /* Protection Register info */
703 offs = (extp->NumProtectionFields - 1) *
704 sizeof(struct cfi_intelext_otpinfo);
706 /* Burst Read info */
707 offs += extp->extra[offs+1]+2;
709 /* Number of partition regions */
710 numregions = extp->extra[offs];
713 /* skip the sizeof(partregion) field in CFI 1.4 */
714 if (extp->MinorVersion >= '4')
717 /* Number of hardware partitions */
719 for (i = 0; i < numregions; i++) {
720 struct cfi_intelext_regioninfo *rinfo;
721 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
722 numparts += rinfo->NumIdentPartitions;
723 offs += sizeof(*rinfo)
724 + (rinfo->NumBlockTypes - 1) *
725 sizeof(struct cfi_intelext_blockinfo);
731 /* Programming Region info */
732 if (extp->MinorVersion >= '4') {
733 struct cfi_intelext_programming_regioninfo *prinfo;
734 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
735 mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
736 mtd->flags &= ~MTD_BIT_WRITEABLE;
737 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
738 map->name, mtd->writesize,
739 cfi->interleave * prinfo->ControlValid,
740 cfi->interleave * prinfo->ControlInvalid);
744 * All functions below currently rely on all chips having
745 * the same geometry so we'll just assume that all hardware
746 * partitions are of the same size too.
748 partshift = cfi->chipshift - __ffs(numparts);
750 if ((1 << partshift) < mtd->erasesize) {
752 "%s: bad number of hw partitions (%d)\n",
757 numvirtchips = cfi->numchips * numparts;
758 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
761 shared = kmalloc_array(cfi->numchips,
762 sizeof(struct flchip_shared),
768 memcpy(newcfi, cfi, sizeof(struct cfi_private));
769 newcfi->numchips = numvirtchips;
770 newcfi->chipshift = partshift;
772 chip = &newcfi->chips[0];
773 for (i = 0; i < cfi->numchips; i++) {
774 shared[i].writing = shared[i].erasing = NULL;
775 mutex_init(&shared[i].lock);
776 for (j = 0; j < numparts; j++) {
777 *chip = cfi->chips[i];
778 chip->start += j << partshift;
779 chip->priv = &shared[i];
780 /* those should be reset too since
781 they create memory references. */
782 init_waitqueue_head(&chip->wq);
783 mutex_init(&chip->mutex);
788 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
789 "--> %d partitions of %d KiB\n",
790 map->name, cfi->numchips, cfi->interleave,
791 newcfi->numchips, 1<<(newcfi->chipshift-10));
793 map->fldrv_priv = newcfi;
802 * *********** CHIP ACCESS FUNCTIONS ***********
804 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
806 DECLARE_WAITQUEUE(wait, current);
807 struct cfi_private *cfi = map->fldrv_priv;
808 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
809 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
810 unsigned long timeo = jiffies + HZ;
812 /* Prevent setting state FL_SYNCING for chip in suspended state. */
813 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
816 switch (chip->state) {
820 status = map_read(map, adr);
821 if (map_word_andequal(map, status, status_OK, status_OK))
824 /* At this point we're fine with write operations
825 in other partitions as they don't conflict. */
826 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
829 mutex_unlock(&chip->mutex);
831 mutex_lock(&chip->mutex);
832 /* Someone else might have been playing with it. */
843 !(cfip->FeatureSupport & 2) ||
844 !(mode == FL_READY || mode == FL_POINT ||
845 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
848 /* Do not allow suspend iff read/write to EB address */
849 if ((adr & chip->in_progress_block_mask) ==
850 chip->in_progress_block_addr)
853 /* do not suspend small EBs, buggy Micron Chips */
854 if (cfi_is_micron_28F00AP30(cfi, chip) &&
855 (chip->in_progress_block_mask == ~(0x8000-1)))
859 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
861 /* If the flash has finished erasing, then 'erase suspend'
862 * appears to make some (28F320) flash devices switch to
863 * 'read' mode. Make sure that we switch to 'read status'
864 * mode so we get the right data. --rmk
866 map_write(map, CMD(0x70), chip->in_progress_block_addr);
867 chip->oldstate = FL_ERASING;
868 chip->state = FL_ERASE_SUSPENDING;
869 chip->erase_suspended = 1;
871 status = map_read(map, chip->in_progress_block_addr);
872 if (map_word_andequal(map, status, status_OK, status_OK))
875 if (time_after(jiffies, timeo)) {
876 /* Urgh. Resume and pretend we weren't here.
877 * Make sure we're in 'read status' mode if it had finished */
878 put_chip(map, chip, adr);
879 printk(KERN_ERR "%s: Chip not ready after erase "
880 "suspended: status = 0x%lx\n", map->name, status.x[0]);
884 mutex_unlock(&chip->mutex);
886 mutex_lock(&chip->mutex);
887 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
888 So we can just loop here. */
890 chip->state = FL_STATUS;
893 case FL_XIP_WHILE_ERASING:
894 if (mode != FL_READY && mode != FL_POINT &&
895 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
897 chip->oldstate = chip->state;
898 chip->state = FL_READY;
902 /* The machine is rebooting now,so no one can get chip anymore */
905 /* Only if there's no operation suspended... */
906 if (mode == FL_READY && chip->oldstate == FL_READY)
911 set_current_state(TASK_UNINTERRUPTIBLE);
912 add_wait_queue(&chip->wq, &wait);
913 mutex_unlock(&chip->mutex);
915 remove_wait_queue(&chip->wq, &wait);
916 mutex_lock(&chip->mutex);
921 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
924 DECLARE_WAITQUEUE(wait, current);
928 (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
929 || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
931 * OK. We have possibility for contention on the write/erase
932 * operations which are global to the real chip and not per
933 * partition. So let's fight it over in the partition which
934 * currently has authority on the operation.
936 * The rules are as follows:
938 * - any write operation must own shared->writing.
940 * - any erase operation must own _both_ shared->writing and
943 * - contention arbitration is handled in the owner's context.
945 * The 'shared' struct can be read and/or written only when
948 struct flchip_shared *shared = chip->priv;
949 struct flchip *contender;
950 mutex_lock(&shared->lock);
951 contender = shared->writing;
952 if (contender && contender != chip) {
954 * The engine to perform desired operation on this
955 * partition is already in use by someone else.
956 * Let's fight over it in the context of the chip
957 * currently using it. If it is possible to suspend,
958 * that other partition will do just that, otherwise
959 * it'll happily send us to sleep. In any case, when
960 * get_chip returns success we're clear to go ahead.
962 ret = mutex_trylock(&contender->mutex);
963 mutex_unlock(&shared->lock);
966 mutex_unlock(&chip->mutex);
967 ret = chip_ready(map, contender, contender->start, mode);
968 mutex_lock(&chip->mutex);
970 if (ret == -EAGAIN) {
971 mutex_unlock(&contender->mutex);
975 mutex_unlock(&contender->mutex);
978 mutex_lock(&shared->lock);
980 /* We should not own chip if it is already
981 * in FL_SYNCING state. Put contender and retry. */
982 if (chip->state == FL_SYNCING) {
983 put_chip(map, contender, contender->start);
984 mutex_unlock(&contender->mutex);
987 mutex_unlock(&contender->mutex);
990 /* Check if we already have suspended erase
991 * on this chip. Sleep. */
992 if (mode == FL_ERASING && shared->erasing
993 && shared->erasing->oldstate == FL_ERASING) {
994 mutex_unlock(&shared->lock);
995 set_current_state(TASK_UNINTERRUPTIBLE);
996 add_wait_queue(&chip->wq, &wait);
997 mutex_unlock(&chip->mutex);
999 remove_wait_queue(&chip->wq, &wait);
1000 mutex_lock(&chip->mutex);
1005 shared->writing = chip;
1006 if (mode == FL_ERASING)
1007 shared->erasing = chip;
1008 mutex_unlock(&shared->lock);
1010 ret = chip_ready(map, chip, adr, mode);
1017 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
1019 struct cfi_private *cfi = map->fldrv_priv;
1022 struct flchip_shared *shared = chip->priv;
1023 mutex_lock(&shared->lock);
1024 if (shared->writing == chip && chip->oldstate == FL_READY) {
1025 /* We own the ability to write, but we're done */
1026 shared->writing = shared->erasing;
1027 if (shared->writing && shared->writing != chip) {
1028 /* give back ownership to who we loaned it from */
1029 struct flchip *loaner = shared->writing;
1030 mutex_lock(&loaner->mutex);
1031 mutex_unlock(&shared->lock);
1032 mutex_unlock(&chip->mutex);
1033 put_chip(map, loaner, loaner->start);
1034 mutex_lock(&chip->mutex);
1035 mutex_unlock(&loaner->mutex);
1039 shared->erasing = NULL;
1040 shared->writing = NULL;
1041 } else if (shared->erasing == chip && shared->writing != chip) {
1043 * We own the ability to erase without the ability
1044 * to write, which means the erase was suspended
1045 * and some other partition is currently writing.
1046 * Don't let the switch below mess things up since
1047 * we don't have ownership to resume anything.
1049 mutex_unlock(&shared->lock);
1053 mutex_unlock(&shared->lock);
1056 switch(chip->oldstate) {
1058 /* What if one interleaved chip has finished and the
1059 other hasn't? The old code would leave the finished
1060 one in READY mode. That's bad, and caused -EROFS
1061 errors to be returned from do_erase_oneblock because
1062 that's the only bit it checked for at the time.
1063 As the state machine appears to explicitly allow
1064 sending the 0x70 (Read Status) command to an erasing
1065 chip and expecting it to be ignored, that's what we
1067 map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1068 map_write(map, CMD(0x70), chip->in_progress_block_addr);
1069 chip->oldstate = FL_READY;
1070 chip->state = FL_ERASING;
1073 case FL_XIP_WHILE_ERASING:
1074 chip->state = chip->oldstate;
1075 chip->oldstate = FL_READY;
1080 case FL_JEDEC_QUERY:
1083 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1088 #ifdef CONFIG_MTD_XIP
1091 * No interrupt what so ever can be serviced while the flash isn't in array
1092 * mode. This is ensured by the xip_disable() and xip_enable() functions
1093 * enclosing any code path where the flash is known not to be in array mode.
1094 * And within a XIP disabled code path, only functions marked with __xipram
1095 * may be called and nothing else (it's a good thing to inspect generated
1096 * assembly to make sure inline functions were actually inlined and that gcc
1097 * didn't emit calls to its own support functions). Also configuring MTD CFI
1098 * support to a single buswidth and a single interleave is also recommended.
1101 static void xip_disable(struct map_info *map, struct flchip *chip,
1104 /* TODO: chips with no XIP use should ignore and return */
1105 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1106 local_irq_disable();
1109 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1112 struct cfi_private *cfi = map->fldrv_priv;
1113 if (chip->state != FL_POINT && chip->state != FL_READY) {
1114 map_write(map, CMD(0xff), adr);
1115 chip->state = FL_READY;
1117 (void) map_read(map, adr);
1123 * When a delay is required for the flash operation to complete, the
1124 * xip_wait_for_operation() function is polling for both the given timeout
1125 * and pending (but still masked) hardware interrupts. Whenever there is an
1126 * interrupt pending then the flash erase or write operation is suspended,
1127 * array mode restored and interrupts unmasked. Task scheduling might also
1128 * happen at that point. The CPU eventually returns from the interrupt or
1129 * the call to schedule() and the suspended flash operation is resumed for
1130 * the remaining of the delay period.
1132 * Warning: this function _will_ fool interrupt latency tracing tools.
1135 static int __xipram xip_wait_for_operation(
1136 struct map_info *map, struct flchip *chip,
1137 unsigned long adr, unsigned int chip_op_time_max)
1139 struct cfi_private *cfi = map->fldrv_priv;
1140 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1141 map_word status, OK = CMD(0x80);
1142 unsigned long usec, suspended, start, done;
1143 flstate_t oldstate, newstate;
1145 start = xip_currtime();
1146 usec = chip_op_time_max;
1153 if (xip_irqpending() && cfip &&
1154 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1155 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1156 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1158 * Let's suspend the erase or write operation when
1159 * supported. Note that we currently don't try to
1160 * suspend interleaved chips if there is already
1161 * another operation suspended (imagine what happens
1162 * when one chip was already done with the current
1163 * operation while another chip suspended it, then
1164 * we resume the whole thing at once). Yes, it
1168 map_write(map, CMD(0xb0), adr);
1169 map_write(map, CMD(0x70), adr);
1170 suspended = xip_currtime();
1172 if (xip_elapsed_since(suspended) > 100000) {
1174 * The chip doesn't want to suspend
1175 * after waiting for 100 msecs.
1176 * This is a critical error but there
1177 * is not much we can do here.
1181 status = map_read(map, adr);
1182 } while (!map_word_andequal(map, status, OK, OK));
1184 /* Suspend succeeded */
1185 oldstate = chip->state;
1186 if (oldstate == FL_ERASING) {
1187 if (!map_word_bitsset(map, status, CMD(0x40)))
1189 newstate = FL_XIP_WHILE_ERASING;
1190 chip->erase_suspended = 1;
1192 if (!map_word_bitsset(map, status, CMD(0x04)))
1194 newstate = FL_XIP_WHILE_WRITING;
1195 chip->write_suspended = 1;
1197 chip->state = newstate;
1198 map_write(map, CMD(0xff), adr);
1199 (void) map_read(map, adr);
1202 mutex_unlock(&chip->mutex);
1207 * We're back. However someone else might have
1208 * decided to go write to the chip if we are in
1209 * a suspended erase state. If so let's wait
1212 mutex_lock(&chip->mutex);
1213 while (chip->state != newstate) {
1214 DECLARE_WAITQUEUE(wait, current);
1215 set_current_state(TASK_UNINTERRUPTIBLE);
1216 add_wait_queue(&chip->wq, &wait);
1217 mutex_unlock(&chip->mutex);
1219 remove_wait_queue(&chip->wq, &wait);
1220 mutex_lock(&chip->mutex);
1222 /* Disallow XIP again */
1223 local_irq_disable();
1225 /* Resume the write or erase operation */
1226 map_write(map, CMD(0xd0), adr);
1227 map_write(map, CMD(0x70), adr);
1228 chip->state = oldstate;
1229 start = xip_currtime();
1230 } else if (usec >= 1000000/HZ) {
1232 * Try to save on CPU power when waiting delay
1233 * is at least a system timer tick period.
1234 * No need to be extremely accurate here.
1238 status = map_read(map, adr);
1239 done = xip_elapsed_since(start);
1240 } while (!map_word_andequal(map, status, OK, OK)
1243 return (done >= usec) ? -ETIME : 0;
1247 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1248 * the flash is actively programming or erasing since we have to poll for
1249 * the operation to complete anyway. We can't do that in a generic way with
1250 * a XIP setup so do it before the actual flash operation in this case
1251 * and stub it out from INVAL_CACHE_AND_WAIT.
1253 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1254 INVALIDATE_CACHED_RANGE(map, from, size)
1256 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1257 xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1261 #define xip_disable(map, chip, adr)
1262 #define xip_enable(map, chip, adr)
1263 #define XIP_INVAL_CACHED_RANGE(x...)
1264 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1266 static int inval_cache_and_wait_for_operation(
1267 struct map_info *map, struct flchip *chip,
1268 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1269 unsigned int chip_op_time, unsigned int chip_op_time_max)
1271 struct cfi_private *cfi = map->fldrv_priv;
1272 map_word status, status_OK = CMD(0x80);
1273 int chip_state = chip->state;
1274 unsigned int timeo, sleep_time, reset_timeo;
1276 mutex_unlock(&chip->mutex);
1278 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1279 mutex_lock(&chip->mutex);
1281 timeo = chip_op_time_max;
1284 reset_timeo = timeo;
1285 sleep_time = chip_op_time / 2;
1288 if (chip->state != chip_state) {
1289 /* Someone's suspended the operation: sleep */
1290 DECLARE_WAITQUEUE(wait, current);
1291 set_current_state(TASK_UNINTERRUPTIBLE);
1292 add_wait_queue(&chip->wq, &wait);
1293 mutex_unlock(&chip->mutex);
1295 remove_wait_queue(&chip->wq, &wait);
1296 mutex_lock(&chip->mutex);
1300 status = map_read(map, cmd_adr);
1301 if (map_word_andequal(map, status, status_OK, status_OK))
1304 if (chip->erase_suspended && chip_state == FL_ERASING) {
1305 /* Erase suspend occurred while sleep: reset timeout */
1306 timeo = reset_timeo;
1307 chip->erase_suspended = 0;
1309 if (chip->write_suspended && chip_state == FL_WRITING) {
1310 /* Write suspend occurred while sleep: reset timeout */
1311 timeo = reset_timeo;
1312 chip->write_suspended = 0;
1315 map_write(map, CMD(0x70), cmd_adr);
1316 chip->state = FL_STATUS;
1320 /* OK Still waiting. Drop the lock, wait a while and retry. */
1321 mutex_unlock(&chip->mutex);
1322 if (sleep_time >= 1000000/HZ) {
1324 * Half of the normal delay still remaining
1325 * can be performed with a sleeping delay instead
1328 msleep(sleep_time/1000);
1329 timeo -= sleep_time;
1330 sleep_time = 1000000/HZ;
1336 mutex_lock(&chip->mutex);
1339 /* Done and happy. */
1340 chip->state = FL_STATUS;
1346 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1347 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1350 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1352 unsigned long cmd_addr;
1353 struct cfi_private *cfi = map->fldrv_priv;
1358 /* Ensure cmd read/writes are aligned. */
1359 cmd_addr = adr & ~(map_bankwidth(map)-1);
1361 mutex_lock(&chip->mutex);
1363 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1366 if (chip->state != FL_POINT && chip->state != FL_READY)
1367 map_write(map, CMD(0xff), cmd_addr);
1369 chip->state = FL_POINT;
1370 chip->ref_point_counter++;
1372 mutex_unlock(&chip->mutex);
1377 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1378 size_t *retlen, void **virt, resource_size_t *phys)
1380 struct map_info *map = mtd->priv;
1381 struct cfi_private *cfi = map->fldrv_priv;
1382 unsigned long ofs, last_end = 0;
1389 /* Now lock the chip(s) to POINT state */
1391 /* ofs: offset within the first chip that the first read should start */
1392 chipnum = (from >> cfi->chipshift);
1393 ofs = from - (chipnum << cfi->chipshift);
1395 *virt = map->virt + cfi->chips[chipnum].start + ofs;
1397 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1400 unsigned long thislen;
1402 if (chipnum >= cfi->numchips)
1405 /* We cannot point across chips that are virtually disjoint */
1407 last_end = cfi->chips[chipnum].start;
1408 else if (cfi->chips[chipnum].start != last_end)
1411 if ((len + ofs -1) >> cfi->chipshift)
1412 thislen = (1<<cfi->chipshift) - ofs;
1416 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1424 last_end += 1 << cfi->chipshift;
1430 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1432 struct map_info *map = mtd->priv;
1433 struct cfi_private *cfi = map->fldrv_priv;
1435 int chipnum, err = 0;
1437 /* Now unlock the chip(s) POINT state */
1439 /* ofs: offset within the first chip that the first read should start */
1440 chipnum = (from >> cfi->chipshift);
1441 ofs = from - (chipnum << cfi->chipshift);
1443 while (len && !err) {
1444 unsigned long thislen;
1445 struct flchip *chip;
1447 chip = &cfi->chips[chipnum];
1448 if (chipnum >= cfi->numchips)
1451 if ((len + ofs -1) >> cfi->chipshift)
1452 thislen = (1<<cfi->chipshift) - ofs;
1456 mutex_lock(&chip->mutex);
1457 if (chip->state == FL_POINT) {
1458 chip->ref_point_counter--;
1459 if(chip->ref_point_counter == 0)
1460 chip->state = FL_READY;
1462 printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1466 put_chip(map, chip, chip->start);
1467 mutex_unlock(&chip->mutex);
1477 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1479 unsigned long cmd_addr;
1480 struct cfi_private *cfi = map->fldrv_priv;
1485 /* Ensure cmd read/writes are aligned. */
1486 cmd_addr = adr & ~(map_bankwidth(map)-1);
1488 mutex_lock(&chip->mutex);
1489 ret = get_chip(map, chip, cmd_addr, FL_READY);
1491 mutex_unlock(&chip->mutex);
1495 if (chip->state != FL_POINT && chip->state != FL_READY) {
1496 map_write(map, CMD(0xff), cmd_addr);
1498 chip->state = FL_READY;
1501 map_copy_from(map, buf, adr, len);
1503 put_chip(map, chip, cmd_addr);
1505 mutex_unlock(&chip->mutex);
1509 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1511 struct map_info *map = mtd->priv;
1512 struct cfi_private *cfi = map->fldrv_priv;
1517 /* ofs: offset within the first chip that the first read should start */
1518 chipnum = (from >> cfi->chipshift);
1519 ofs = from - (chipnum << cfi->chipshift);
1522 unsigned long thislen;
1524 if (chipnum >= cfi->numchips)
1527 if ((len + ofs -1) >> cfi->chipshift)
1528 thislen = (1<<cfi->chipshift) - ofs;
1532 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1546 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1547 unsigned long adr, map_word datum, int mode)
1549 struct cfi_private *cfi = map->fldrv_priv;
1550 map_word status, write_cmd;
1557 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1560 write_cmd = CMD(0xc0);
1566 mutex_lock(&chip->mutex);
1567 ret = get_chip(map, chip, adr, mode);
1569 mutex_unlock(&chip->mutex);
1573 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1575 xip_disable(map, chip, adr);
1576 map_write(map, write_cmd, adr);
1577 map_write(map, datum, adr);
1580 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1581 adr, map_bankwidth(map),
1582 chip->word_write_time,
1583 chip->word_write_time_max);
1585 xip_enable(map, chip, adr);
1586 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1590 /* check for errors */
1591 status = map_read(map, adr);
1592 if (map_word_bitsset(map, status, CMD(0x1a))) {
1593 unsigned long chipstatus = MERGESTATUS(status);
1596 map_write(map, CMD(0x50), adr);
1597 map_write(map, CMD(0x70), adr);
1598 xip_enable(map, chip, adr);
1600 if (chipstatus & 0x02) {
1602 } else if (chipstatus & 0x08) {
1603 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1606 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1613 xip_enable(map, chip, adr);
1614 out: DISABLE_VPP(map);
1615 put_chip(map, chip, adr);
1616 mutex_unlock(&chip->mutex);
1621 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1623 struct map_info *map = mtd->priv;
1624 struct cfi_private *cfi = map->fldrv_priv;
1629 chipnum = to >> cfi->chipshift;
1630 ofs = to - (chipnum << cfi->chipshift);
1632 /* If it's not bus-aligned, do the first byte write */
1633 if (ofs & (map_bankwidth(map)-1)) {
1634 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1635 int gap = ofs - bus_ofs;
1639 n = min_t(int, len, map_bankwidth(map)-gap);
1640 datum = map_word_ff(map);
1641 datum = map_word_load_partial(map, datum, buf, gap, n);
1643 ret = do_write_oneword(map, &cfi->chips[chipnum],
1644 bus_ofs, datum, FL_WRITING);
1653 if (ofs >> cfi->chipshift) {
1656 if (chipnum == cfi->numchips)
1661 while(len >= map_bankwidth(map)) {
1662 map_word datum = map_word_load(map, buf);
1664 ret = do_write_oneword(map, &cfi->chips[chipnum],
1665 ofs, datum, FL_WRITING);
1669 ofs += map_bankwidth(map);
1670 buf += map_bankwidth(map);
1671 (*retlen) += map_bankwidth(map);
1672 len -= map_bankwidth(map);
1674 if (ofs >> cfi->chipshift) {
1677 if (chipnum == cfi->numchips)
1682 if (len & (map_bankwidth(map)-1)) {
1685 datum = map_word_ff(map);
1686 datum = map_word_load_partial(map, datum, buf, 0, len);
1688 ret = do_write_oneword(map, &cfi->chips[chipnum],
1689 ofs, datum, FL_WRITING);
1700 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1701 unsigned long adr, const struct kvec **pvec,
1702 unsigned long *pvec_seek, int len)
1704 struct cfi_private *cfi = map->fldrv_priv;
1705 map_word status, write_cmd, datum;
1706 unsigned long cmd_adr;
1707 int ret, wbufsize, word_gap, words;
1708 const struct kvec *vec;
1709 unsigned long vec_seek;
1710 unsigned long initial_adr;
1711 int initial_len = len;
1713 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1716 cmd_adr = adr & ~(wbufsize-1);
1718 /* Sharp LH28F640BF chips need the first address for the
1719 * Page Buffer Program command. See Table 5 of
1720 * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1721 if (is_LH28F640BF(cfi))
1724 /* Let's determine this according to the interleave only once */
1725 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1727 mutex_lock(&chip->mutex);
1728 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1730 mutex_unlock(&chip->mutex);
1734 XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1736 xip_disable(map, chip, cmd_adr);
1738 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1739 [...], the device will not accept any more Write to Buffer commands".
1740 So we must check here and reset those bits if they're set. Otherwise
1741 we're just pissing in the wind */
1742 if (chip->state != FL_STATUS) {
1743 map_write(map, CMD(0x70), cmd_adr);
1744 chip->state = FL_STATUS;
1746 status = map_read(map, cmd_adr);
1747 if (map_word_bitsset(map, status, CMD(0x30))) {
1748 xip_enable(map, chip, cmd_adr);
1749 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1750 xip_disable(map, chip, cmd_adr);
1751 map_write(map, CMD(0x50), cmd_adr);
1752 map_write(map, CMD(0x70), cmd_adr);
1755 chip->state = FL_WRITING_TO_BUFFER;
1756 map_write(map, write_cmd, cmd_adr);
1757 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1759 /* Argh. Not ready for write to buffer */
1760 map_word Xstatus = map_read(map, cmd_adr);
1761 map_write(map, CMD(0x70), cmd_adr);
1762 chip->state = FL_STATUS;
1763 status = map_read(map, cmd_adr);
1764 map_write(map, CMD(0x50), cmd_adr);
1765 map_write(map, CMD(0x70), cmd_adr);
1766 xip_enable(map, chip, cmd_adr);
1767 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1768 map->name, Xstatus.x[0], status.x[0]);
1772 /* Figure out the number of words to write */
1773 word_gap = (-adr & (map_bankwidth(map)-1));
1774 words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1778 word_gap = map_bankwidth(map) - word_gap;
1780 datum = map_word_ff(map);
1783 /* Write length of data to come */
1784 map_write(map, CMD(words), cmd_adr );
1788 vec_seek = *pvec_seek;
1790 int n = map_bankwidth(map) - word_gap;
1791 if (n > vec->iov_len - vec_seek)
1792 n = vec->iov_len - vec_seek;
1796 if (!word_gap && len < map_bankwidth(map))
1797 datum = map_word_ff(map);
1799 datum = map_word_load_partial(map, datum,
1800 vec->iov_base + vec_seek,
1805 if (!len || word_gap == map_bankwidth(map)) {
1806 map_write(map, datum, adr);
1807 adr += map_bankwidth(map);
1812 if (vec_seek == vec->iov_len) {
1818 *pvec_seek = vec_seek;
1821 map_write(map, CMD(0xd0), cmd_adr);
1822 chip->state = FL_WRITING;
1824 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1825 initial_adr, initial_len,
1826 chip->buffer_write_time,
1827 chip->buffer_write_time_max);
1829 map_write(map, CMD(0x70), cmd_adr);
1830 chip->state = FL_STATUS;
1831 xip_enable(map, chip, cmd_adr);
1832 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1836 /* check for errors */
1837 status = map_read(map, cmd_adr);
1838 if (map_word_bitsset(map, status, CMD(0x1a))) {
1839 unsigned long chipstatus = MERGESTATUS(status);
1842 map_write(map, CMD(0x50), cmd_adr);
1843 map_write(map, CMD(0x70), cmd_adr);
1844 xip_enable(map, chip, cmd_adr);
1846 if (chipstatus & 0x02) {
1848 } else if (chipstatus & 0x08) {
1849 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1852 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1859 xip_enable(map, chip, cmd_adr);
1860 out: DISABLE_VPP(map);
1861 put_chip(map, chip, cmd_adr);
1862 mutex_unlock(&chip->mutex);
1866 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1867 unsigned long count, loff_t to, size_t *retlen)
1869 struct map_info *map = mtd->priv;
1870 struct cfi_private *cfi = map->fldrv_priv;
1871 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1874 unsigned long ofs, vec_seek, i;
1877 for (i = 0; i < count; i++)
1878 len += vecs[i].iov_len;
1883 chipnum = to >> cfi->chipshift;
1884 ofs = to - (chipnum << cfi->chipshift);
1888 /* We must not cross write block boundaries */
1889 int size = wbufsize - (ofs & (wbufsize-1));
1893 ret = do_write_buffer(map, &cfi->chips[chipnum],
1894 ofs, &vecs, &vec_seek, size);
1902 if (ofs >> cfi->chipshift) {
1905 if (chipnum == cfi->numchips)
1909 /* Be nice and reschedule with the chip in a usable state for other
1918 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1919 size_t len, size_t *retlen, const u_char *buf)
1923 vec.iov_base = (void *) buf;
1926 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1929 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1930 unsigned long adr, int len, void *thunk)
1932 struct cfi_private *cfi = map->fldrv_priv;
1940 mutex_lock(&chip->mutex);
1941 ret = get_chip(map, chip, adr, FL_ERASING);
1943 mutex_unlock(&chip->mutex);
1947 XIP_INVAL_CACHED_RANGE(map, adr, len);
1949 xip_disable(map, chip, adr);
1951 /* Clear the status register first */
1952 map_write(map, CMD(0x50), adr);
1955 map_write(map, CMD(0x20), adr);
1956 map_write(map, CMD(0xD0), adr);
1957 chip->state = FL_ERASING;
1958 chip->erase_suspended = 0;
1959 chip->in_progress_block_addr = adr;
1960 chip->in_progress_block_mask = ~(len - 1);
1962 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1965 chip->erase_time_max);
1967 map_write(map, CMD(0x70), adr);
1968 chip->state = FL_STATUS;
1969 xip_enable(map, chip, adr);
1970 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1974 /* We've broken this before. It doesn't hurt to be safe */
1975 map_write(map, CMD(0x70), adr);
1976 chip->state = FL_STATUS;
1977 status = map_read(map, adr);
1979 /* check for errors */
1980 if (map_word_bitsset(map, status, CMD(0x3a))) {
1981 unsigned long chipstatus = MERGESTATUS(status);
1983 /* Reset the error bits */
1984 map_write(map, CMD(0x50), adr);
1985 map_write(map, CMD(0x70), adr);
1986 xip_enable(map, chip, adr);
1988 if ((chipstatus & 0x30) == 0x30) {
1989 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1991 } else if (chipstatus & 0x02) {
1992 /* Protection bit set */
1994 } else if (chipstatus & 0x8) {
1996 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1998 } else if (chipstatus & 0x20 && retries--) {
1999 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
2001 put_chip(map, chip, adr);
2002 mutex_unlock(&chip->mutex);
2005 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
2012 xip_enable(map, chip, adr);
2013 out: DISABLE_VPP(map);
2014 put_chip(map, chip, adr);
2015 mutex_unlock(&chip->mutex);
2019 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2021 return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2025 static void cfi_intelext_sync (struct mtd_info *mtd)
2027 struct map_info *map = mtd->priv;
2028 struct cfi_private *cfi = map->fldrv_priv;
2030 struct flchip *chip;
2033 for (i=0; !ret && i<cfi->numchips; i++) {
2034 chip = &cfi->chips[i];
2036 mutex_lock(&chip->mutex);
2037 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2040 chip->oldstate = chip->state;
2041 chip->state = FL_SYNCING;
2042 /* No need to wake_up() on this state change -
2043 * as the whole point is that nobody can do anything
2044 * with the chip now anyway.
2047 mutex_unlock(&chip->mutex);
2050 /* Unlock the chips again */
2052 for (i--; i >=0; i--) {
2053 chip = &cfi->chips[i];
2055 mutex_lock(&chip->mutex);
2057 if (chip->state == FL_SYNCING) {
2058 chip->state = chip->oldstate;
2059 chip->oldstate = FL_READY;
2062 mutex_unlock(&chip->mutex);
2066 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2067 struct flchip *chip,
2069 int len, void *thunk)
2071 struct cfi_private *cfi = map->fldrv_priv;
2072 int status, ofs_factor = cfi->interleave * cfi->device_type;
2075 xip_disable(map, chip, adr+(2*ofs_factor));
2076 map_write(map, CMD(0x90), adr+(2*ofs_factor));
2077 chip->state = FL_JEDEC_QUERY;
2078 status = cfi_read_query(map, adr+(2*ofs_factor));
2079 xip_enable(map, chip, 0);
2083 #ifdef DEBUG_LOCK_BITS
2084 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2085 struct flchip *chip,
2087 int len, void *thunk)
2089 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2090 adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2095 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
2096 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
2098 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2099 unsigned long adr, int len, void *thunk)
2101 struct cfi_private *cfi = map->fldrv_priv;
2102 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2108 mutex_lock(&chip->mutex);
2109 ret = get_chip(map, chip, adr, FL_LOCKING);
2111 mutex_unlock(&chip->mutex);
2116 xip_disable(map, chip, adr);
2118 map_write(map, CMD(0x60), adr);
2119 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2120 map_write(map, CMD(0x01), adr);
2121 chip->state = FL_LOCKING;
2122 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2123 map_write(map, CMD(0xD0), adr);
2124 chip->state = FL_UNLOCKING;
2129 * If Instant Individual Block Locking supported then no need
2133 * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2134 * lets use a max of 1.5 seconds (1500ms) as timeout.
2136 * See "Clear Block Lock-Bits Time" on page 40 in
2137 * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2138 * from February 2003
2140 mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2142 ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2144 map_write(map, CMD(0x70), adr);
2145 chip->state = FL_STATUS;
2146 xip_enable(map, chip, adr);
2147 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2151 xip_enable(map, chip, adr);
2152 out: DISABLE_VPP(map);
2153 put_chip(map, chip, adr);
2154 mutex_unlock(&chip->mutex);
2158 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2162 #ifdef DEBUG_LOCK_BITS
2163 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2164 __func__, ofs, len);
2165 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2169 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2170 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2172 #ifdef DEBUG_LOCK_BITS
2173 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2175 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2182 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2186 #ifdef DEBUG_LOCK_BITS
2187 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2188 __func__, ofs, len);
2189 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2193 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2194 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2196 #ifdef DEBUG_LOCK_BITS
2197 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2199 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2206 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2209 return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2210 ofs, len, NULL) ? 1 : 0;
2213 #ifdef CONFIG_MTD_OTP
2215 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2216 u_long data_offset, u_char *buf, u_int size,
2217 u_long prot_offset, u_int groupno, u_int groupsize);
2220 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2221 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2223 struct cfi_private *cfi = map->fldrv_priv;
2226 mutex_lock(&chip->mutex);
2227 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2229 mutex_unlock(&chip->mutex);
2233 /* let's ensure we're not reading back cached data from array mode */
2234 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2236 xip_disable(map, chip, chip->start);
2237 if (chip->state != FL_JEDEC_QUERY) {
2238 map_write(map, CMD(0x90), chip->start);
2239 chip->state = FL_JEDEC_QUERY;
2241 map_copy_from(map, buf, chip->start + offset, size);
2242 xip_enable(map, chip, chip->start);
2244 /* then ensure we don't keep OTP data in the cache */
2245 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2247 put_chip(map, chip, chip->start);
2248 mutex_unlock(&chip->mutex);
2253 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2254 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2259 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2260 int gap = offset - bus_ofs;
2261 int n = min_t(int, size, map_bankwidth(map)-gap);
2262 map_word datum = map_word_ff(map);
2264 datum = map_word_load_partial(map, datum, buf, gap, n);
2265 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2278 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2279 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2281 struct cfi_private *cfi = map->fldrv_priv;
2284 /* make sure area matches group boundaries */
2288 datum = map_word_ff(map);
2289 datum = map_word_clr(map, datum, CMD(1 << grpno));
2290 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2293 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2294 size_t *retlen, u_char *buf,
2295 otp_op_t action, int user_regs)
2297 struct map_info *map = mtd->priv;
2298 struct cfi_private *cfi = map->fldrv_priv;
2299 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2300 struct flchip *chip;
2301 struct cfi_intelext_otpinfo *otp;
2302 u_long devsize, reg_prot_offset, data_offset;
2303 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2304 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2309 /* Check that we actually have some OTP registers */
2310 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2313 /* we need real chips here not virtual ones */
2314 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2315 chip_step = devsize >> cfi->chipshift;
2318 /* Some chips have OTP located in the _top_ partition only.
2319 For example: Intel 28F256L18T (T means top-parameter device) */
2320 if (cfi->mfr == CFI_MFR_INTEL) {
2325 chip_num = chip_step - 1;
2329 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2330 chip = &cfi->chips[chip_num];
2331 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2333 /* first OTP region */
2335 reg_prot_offset = extp->ProtRegAddr;
2336 reg_fact_groups = 1;
2337 reg_fact_size = 1 << extp->FactProtRegSize;
2338 reg_user_groups = 1;
2339 reg_user_size = 1 << extp->UserProtRegSize;
2342 /* flash geometry fixup */
2343 data_offset = reg_prot_offset + 1;
2344 data_offset *= cfi->interleave * cfi->device_type;
2345 reg_prot_offset *= cfi->interleave * cfi->device_type;
2346 reg_fact_size *= cfi->interleave;
2347 reg_user_size *= cfi->interleave;
2350 groups = reg_user_groups;
2351 groupsize = reg_user_size;
2352 /* skip over factory reg area */
2353 groupno = reg_fact_groups;
2354 data_offset += reg_fact_groups * reg_fact_size;
2356 groups = reg_fact_groups;
2357 groupsize = reg_fact_size;
2361 while (len > 0 && groups > 0) {
2364 * Special case: if action is NULL
2365 * we fill buf with otp_info records.
2367 struct otp_info *otpinfo;
2369 len -= sizeof(struct otp_info);
2372 ret = do_otp_read(map, chip,
2374 (u_char *)&lockword,
2379 otpinfo = (struct otp_info *)buf;
2380 otpinfo->start = from;
2381 otpinfo->length = groupsize;
2383 !map_word_bitsset(map, lockword,
2386 buf += sizeof(*otpinfo);
2387 *retlen += sizeof(*otpinfo);
2388 } else if (from >= groupsize) {
2390 data_offset += groupsize;
2392 int size = groupsize;
2393 data_offset += from;
2398 ret = action(map, chip, data_offset,
2399 buf, size, reg_prot_offset,
2400 groupno, groupsize);
2406 data_offset += size;
2412 /* next OTP region */
2413 if (++field == extp->NumProtectionFields)
2415 reg_prot_offset = otp->ProtRegAddr;
2416 reg_fact_groups = otp->FactGroups;
2417 reg_fact_size = 1 << otp->FactProtRegSize;
2418 reg_user_groups = otp->UserGroups;
2419 reg_user_size = 1 << otp->UserProtRegSize;
2427 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2428 size_t len, size_t *retlen,
2431 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2432 buf, do_otp_read, 0);
2435 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2436 size_t len, size_t *retlen,
2439 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2440 buf, do_otp_read, 1);
2443 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2444 size_t len, size_t *retlen,
2447 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2448 buf, do_otp_write, 1);
2451 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2452 loff_t from, size_t len)
2455 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2456 NULL, do_otp_lock, 1);
2459 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2460 size_t *retlen, struct otp_info *buf)
2463 return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2467 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2468 size_t *retlen, struct otp_info *buf)
2470 return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2476 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2478 struct mtd_erase_region_info *region;
2479 int block, status, i;
2483 for (i = 0; i < mtd->numeraseregions; i++) {
2484 region = &mtd->eraseregions[i];
2485 if (!region->lockmap)
2488 for (block = 0; block < region->numblocks; block++){
2489 len = region->erasesize;
2490 adr = region->offset + block * len;
2492 status = cfi_varsize_frob(mtd,
2493 do_getlockstatus_oneblock, adr, len, NULL);
2495 set_bit(block, region->lockmap);
2497 clear_bit(block, region->lockmap);
2502 static int cfi_intelext_suspend(struct mtd_info *mtd)
2504 struct map_info *map = mtd->priv;
2505 struct cfi_private *cfi = map->fldrv_priv;
2506 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2508 struct flchip *chip;
2511 if ((mtd->flags & MTD_POWERUP_LOCK)
2512 && extp && (extp->FeatureSupport & (1 << 5)))
2513 cfi_intelext_save_locks(mtd);
2515 for (i=0; !ret && i<cfi->numchips; i++) {
2516 chip = &cfi->chips[i];
2518 mutex_lock(&chip->mutex);
2520 switch (chip->state) {
2524 case FL_JEDEC_QUERY:
2525 if (chip->oldstate == FL_READY) {
2526 /* place the chip in a known state before suspend */
2527 map_write(map, CMD(0xFF), cfi->chips[i].start);
2528 chip->oldstate = chip->state;
2529 chip->state = FL_PM_SUSPENDED;
2530 /* No need to wake_up() on this state change -
2531 * as the whole point is that nobody can do anything
2532 * with the chip now anyway.
2535 /* There seems to be an operation pending. We must wait for it. */
2536 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2541 /* Should we actually wait? Once upon a time these routines weren't
2542 allowed to. Or should we return -EAGAIN, because the upper layers
2543 ought to have already shut down anything which was using the device
2544 anyway? The latter for now. */
2545 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2547 case FL_PM_SUSPENDED:
2550 mutex_unlock(&chip->mutex);
2553 /* Unlock the chips again */
2556 for (i--; i >=0; i--) {
2557 chip = &cfi->chips[i];
2559 mutex_lock(&chip->mutex);
2561 if (chip->state == FL_PM_SUSPENDED) {
2562 /* No need to force it into a known state here,
2563 because we're returning failure, and it didn't
2565 chip->state = chip->oldstate;
2566 chip->oldstate = FL_READY;
2569 mutex_unlock(&chip->mutex);
2576 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2578 struct mtd_erase_region_info *region;
2583 for (i = 0; i < mtd->numeraseregions; i++) {
2584 region = &mtd->eraseregions[i];
2585 if (!region->lockmap)
2588 for_each_clear_bit(block, region->lockmap, region->numblocks) {
2589 len = region->erasesize;
2590 adr = region->offset + block * len;
2591 cfi_intelext_unlock(mtd, adr, len);
2596 static void cfi_intelext_resume(struct mtd_info *mtd)
2598 struct map_info *map = mtd->priv;
2599 struct cfi_private *cfi = map->fldrv_priv;
2600 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2602 struct flchip *chip;
2604 for (i=0; i<cfi->numchips; i++) {
2606 chip = &cfi->chips[i];
2608 mutex_lock(&chip->mutex);
2610 /* Go to known state. Chip may have been power cycled */
2611 if (chip->state == FL_PM_SUSPENDED) {
2612 /* Refresh LH28F640BF Partition Config. Register */
2613 fixup_LH28F640BF(mtd);
2614 map_write(map, CMD(0xFF), cfi->chips[i].start);
2615 chip->oldstate = chip->state = FL_READY;
2619 mutex_unlock(&chip->mutex);
2622 if ((mtd->flags & MTD_POWERUP_LOCK)
2623 && extp && (extp->FeatureSupport & (1 << 5)))
2624 cfi_intelext_restore_locks(mtd);
2627 static int cfi_intelext_reset(struct mtd_info *mtd)
2629 struct map_info *map = mtd->priv;
2630 struct cfi_private *cfi = map->fldrv_priv;
2633 for (i=0; i < cfi->numchips; i++) {
2634 struct flchip *chip = &cfi->chips[i];
2636 /* force the completion of any ongoing operation
2637 and switch to array mode so any bootloader in
2638 flash is accessible for soft reboot. */
2639 mutex_lock(&chip->mutex);
2640 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2642 map_write(map, CMD(0xff), chip->start);
2643 chip->state = FL_SHUTDOWN;
2644 put_chip(map, chip, chip->start);
2646 mutex_unlock(&chip->mutex);
2652 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2655 struct mtd_info *mtd;
2657 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2658 cfi_intelext_reset(mtd);
2662 static void cfi_intelext_destroy(struct mtd_info *mtd)
2664 struct map_info *map = mtd->priv;
2665 struct cfi_private *cfi = map->fldrv_priv;
2666 struct mtd_erase_region_info *region;
2668 cfi_intelext_reset(mtd);
2669 unregister_reboot_notifier(&mtd->reboot_notifier);
2670 kfree(cfi->cmdset_priv);
2672 kfree(cfi->chips[0].priv);
2674 for (i = 0; i < mtd->numeraseregions; i++) {
2675 region = &mtd->eraseregions[i];
2676 kfree(region->lockmap);
2678 kfree(mtd->eraseregions);
2681 MODULE_LICENSE("GPL");
2682 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2683 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2684 MODULE_ALIAS("cfi_cmdset_0003");
2685 MODULE_ALIAS("cfi_cmdset_0200");