treewide: kmalloc() -> kmalloc_array()
[linux-block.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
9  *      - completely revamped method functions so they are aware and
10  *        independent of the flash geometry (buswidth, interleave, etc.)
11  *      - scalability vs code size is completely set at compile-time
12  *        (see include/linux/mtd/cfi.h for selection)
13  *      - optimized write buffer method
14  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *      - reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  *      - auto unlock sectors on resume for auto locking flash on power up
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <asm/io.h>
25 #include <asm/byteorder.h>
26
27 #include <linux/errno.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/reboot.h>
32 #include <linux/bitmap.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
37
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
40
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
43
44 /* Intel chips */
45 #define I82802AB        0x00ad
46 #define I82802AC        0x00ac
47 #define PF38F4476       0x881c
48 #define M28F00AP30      0x8963
49 /* STMicroelectronics chips */
50 #define M50LPW080       0x002F
51 #define M50FLW080A      0x0080
52 #define M50FLW080B      0x0081
53 /* Atmel chips */
54 #define AT49BV640D      0x02de
55 #define AT49BV640DT     0x02db
56 /* Sharp chips */
57 #define LH28F640BFHE_PTTL90     0x00b0
58 #define LH28F640BFHE_PBTL90     0x00b1
59 #define LH28F640BFHE_PTTL70A    0x00b2
60 #define LH28F640BFHE_PBTL70A    0x00b3
61
62 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
64 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
65 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
66 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
67 static void cfi_intelext_sync (struct mtd_info *);
68 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
69 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
70 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
71                                   uint64_t len);
72 #ifdef CONFIG_MTD_OTP
73 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
74 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
75 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
76 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
77 static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
78                                            size_t *, struct otp_info *);
79 static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
80                                            size_t *, struct otp_info *);
81 #endif
82 static int cfi_intelext_suspend (struct mtd_info *);
83 static void cfi_intelext_resume (struct mtd_info *);
84 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
85
86 static void cfi_intelext_destroy(struct mtd_info *);
87
88 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
89
90 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
91 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
92
93 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
94                      size_t *retlen, void **virt, resource_size_t *phys);
95 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
96
97 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
98 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
99 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
100 #include "fwh_lock.h"
101
102
103
104 /*
105  *  *********** SETUP AND PROBE BITS  ***********
106  */
107
108 static struct mtd_chip_driver cfi_intelext_chipdrv = {
109         .probe          = NULL, /* Not usable directly */
110         .destroy        = cfi_intelext_destroy,
111         .name           = "cfi_cmdset_0001",
112         .module         = THIS_MODULE
113 };
114
115 /* #define DEBUG_LOCK_BITS */
116 /* #define DEBUG_CFI_FEATURES */
117
118 #ifdef DEBUG_CFI_FEATURES
119 static void cfi_tell_features(struct cfi_pri_intelext *extp)
120 {
121         int i;
122         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
123         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
124         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
125         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
126         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
127         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
128         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
129         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
130         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
131         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
132         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
133         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
134         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
135         for (i=11; i<32; i++) {
136                 if (extp->FeatureSupport & (1<<i))
137                         printk("     - Unknown Bit %X:      supported\n", i);
138         }
139
140         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
141         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
142         for (i=1; i<8; i++) {
143                 if (extp->SuspendCmdSupport & (1<<i))
144                         printk("     - Unknown Bit %X:               supported\n", i);
145         }
146
147         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
148         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
149         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
150         for (i=2; i<3; i++) {
151                 if (extp->BlkStatusRegMask & (1<<i))
152                         printk("     - Unknown Bit %X Active: yes\n",i);
153         }
154         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
155         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
156         for (i=6; i<16; i++) {
157                 if (extp->BlkStatusRegMask & (1<<i))
158                         printk("     - Unknown Bit %X Active: yes\n",i);
159         }
160
161         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
162                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
163         if (extp->VppOptimal)
164                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
165                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
166 }
167 #endif
168
169 /* Atmel chips don't use the same PRI format as Intel chips */
170 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
171 {
172         struct map_info *map = mtd->priv;
173         struct cfi_private *cfi = map->fldrv_priv;
174         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
175         struct cfi_pri_atmel atmel_pri;
176         uint32_t features = 0;
177
178         /* Reverse byteswapping */
179         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
180         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
181         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
182
183         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
184         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
185
186         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
187
188         if (atmel_pri.Features & 0x01) /* chip erase supported */
189                 features |= (1<<0);
190         if (atmel_pri.Features & 0x02) /* erase suspend supported */
191                 features |= (1<<1);
192         if (atmel_pri.Features & 0x04) /* program suspend supported */
193                 features |= (1<<2);
194         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
195                 features |= (1<<9);
196         if (atmel_pri.Features & 0x20) /* page mode read supported */
197                 features |= (1<<7);
198         if (atmel_pri.Features & 0x40) /* queued erase supported */
199                 features |= (1<<4);
200         if (atmel_pri.Features & 0x80) /* Protection bits supported */
201                 features |= (1<<6);
202
203         extp->FeatureSupport = features;
204
205         /* burst write mode not supported */
206         cfi->cfiq->BufWriteTimeoutTyp = 0;
207         cfi->cfiq->BufWriteTimeoutMax = 0;
208 }
209
210 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
211 {
212         struct map_info *map = mtd->priv;
213         struct cfi_private *cfi = map->fldrv_priv;
214         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
215
216         cfip->FeatureSupport |= (1 << 5);
217         mtd->flags |= MTD_POWERUP_LOCK;
218 }
219
220 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
221 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
222 static void fixup_intel_strataflash(struct mtd_info *mtd)
223 {
224         struct map_info *map = mtd->priv;
225         struct cfi_private *cfi = map->fldrv_priv;
226         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
227
228         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
229                             "erase on write disabled.\n");
230         extp->SuspendCmdSupport &= ~1;
231 }
232 #endif
233
234 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
235 static void fixup_no_write_suspend(struct mtd_info *mtd)
236 {
237         struct map_info *map = mtd->priv;
238         struct cfi_private *cfi = map->fldrv_priv;
239         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
240
241         if (cfip && (cfip->FeatureSupport&4)) {
242                 cfip->FeatureSupport &= ~4;
243                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
244         }
245 }
246 #endif
247
248 static void fixup_st_m28w320ct(struct mtd_info *mtd)
249 {
250         struct map_info *map = mtd->priv;
251         struct cfi_private *cfi = map->fldrv_priv;
252
253         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
254         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
255 }
256
257 static void fixup_st_m28w320cb(struct mtd_info *mtd)
258 {
259         struct map_info *map = mtd->priv;
260         struct cfi_private *cfi = map->fldrv_priv;
261
262         /* Note this is done after the region info is endian swapped */
263         cfi->cfiq->EraseRegionInfo[1] =
264                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
265 };
266
267 static int is_LH28F640BF(struct cfi_private *cfi)
268 {
269         /* Sharp LH28F640BF Family */
270         if (cfi->mfr == CFI_MFR_SHARP && (
271             cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
272             cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
273                 return 1;
274         return 0;
275 }
276
277 static void fixup_LH28F640BF(struct mtd_info *mtd)
278 {
279         struct map_info *map = mtd->priv;
280         struct cfi_private *cfi = map->fldrv_priv;
281         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
282
283         /* Reset the Partition Configuration Register on LH28F640BF
284          * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
285         if (is_LH28F640BF(cfi)) {
286                 printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
287                 map_write(map, CMD(0x60), 0);
288                 map_write(map, CMD(0x04), 0);
289
290                 /* We have set one single partition thus
291                  * Simultaneous Operations are not allowed */
292                 printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
293                 extp->FeatureSupport &= ~512;
294         }
295 }
296
297 static void fixup_use_point(struct mtd_info *mtd)
298 {
299         struct map_info *map = mtd->priv;
300         if (!mtd->_point && map_is_linear(map)) {
301                 mtd->_point   = cfi_intelext_point;
302                 mtd->_unpoint = cfi_intelext_unpoint;
303         }
304 }
305
306 static void fixup_use_write_buffers(struct mtd_info *mtd)
307 {
308         struct map_info *map = mtd->priv;
309         struct cfi_private *cfi = map->fldrv_priv;
310         if (cfi->cfiq->BufWriteTimeoutTyp) {
311                 printk(KERN_INFO "Using buffer write method\n" );
312                 mtd->_write = cfi_intelext_write_buffers;
313                 mtd->_writev = cfi_intelext_writev;
314         }
315 }
316
317 /*
318  * Some chips power-up with all sectors locked by default.
319  */
320 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
321 {
322         struct map_info *map = mtd->priv;
323         struct cfi_private *cfi = map->fldrv_priv;
324         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
325
326         if (cfip->FeatureSupport&32) {
327                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
328                 mtd->flags |= MTD_POWERUP_LOCK;
329         }
330 }
331
332 static struct cfi_fixup cfi_fixup_table[] = {
333         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
334         { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
335         { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
336 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
337         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
338 #endif
339 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
340         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
341 #endif
342 #if !FORCE_WORD_WRITE
343         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
344 #endif
345         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
346         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
347         { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
348         { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
349         { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
350         { 0, 0, NULL }
351 };
352
353 static struct cfi_fixup jedec_fixup_table[] = {
354         { CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
355         { CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
356         { CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
357         { CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
358         { CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
359         { 0, 0, NULL }
360 };
361 static struct cfi_fixup fixup_table[] = {
362         /* The CFI vendor ids and the JEDEC vendor IDs appear
363          * to be common.  It is like the devices id's are as
364          * well.  This table is to pick all cases where
365          * we know that is the case.
366          */
367         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
368         { 0, 0, NULL }
369 };
370
371 static void cfi_fixup_major_minor(struct cfi_private *cfi,
372                                                 struct cfi_pri_intelext *extp)
373 {
374         if (cfi->mfr == CFI_MFR_INTEL &&
375                         cfi->id == PF38F4476 && extp->MinorVersion == '3')
376                 extp->MinorVersion = '1';
377 }
378
379 static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
380 {
381         /*
382          * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
383          * Erase Supend for their small Erase Blocks(0x8000)
384          */
385         if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
386                 return 1;
387         return 0;
388 }
389
390 static inline struct cfi_pri_intelext *
391 read_pri_intelext(struct map_info *map, __u16 adr)
392 {
393         struct cfi_private *cfi = map->fldrv_priv;
394         struct cfi_pri_intelext *extp;
395         unsigned int extra_size = 0;
396         unsigned int extp_size = sizeof(*extp);
397
398  again:
399         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
400         if (!extp)
401                 return NULL;
402
403         cfi_fixup_major_minor(cfi, extp);
404
405         if (extp->MajorVersion != '1' ||
406             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
407                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
408                        "version %c.%c.\n",  extp->MajorVersion,
409                        extp->MinorVersion);
410                 kfree(extp);
411                 return NULL;
412         }
413
414         /* Do some byteswapping if necessary */
415         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
416         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
417         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
418
419         if (extp->MinorVersion >= '0') {
420                 extra_size = 0;
421
422                 /* Protection Register info */
423                 extra_size += (extp->NumProtectionFields - 1) *
424                               sizeof(struct cfi_intelext_otpinfo);
425         }
426
427         if (extp->MinorVersion >= '1') {
428                 /* Burst Read info */
429                 extra_size += 2;
430                 if (extp_size < sizeof(*extp) + extra_size)
431                         goto need_more;
432                 extra_size += extp->extra[extra_size - 1];
433         }
434
435         if (extp->MinorVersion >= '3') {
436                 int nb_parts, i;
437
438                 /* Number of hardware-partitions */
439                 extra_size += 1;
440                 if (extp_size < sizeof(*extp) + extra_size)
441                         goto need_more;
442                 nb_parts = extp->extra[extra_size - 1];
443
444                 /* skip the sizeof(partregion) field in CFI 1.4 */
445                 if (extp->MinorVersion >= '4')
446                         extra_size += 2;
447
448                 for (i = 0; i < nb_parts; i++) {
449                         struct cfi_intelext_regioninfo *rinfo;
450                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
451                         extra_size += sizeof(*rinfo);
452                         if (extp_size < sizeof(*extp) + extra_size)
453                                 goto need_more;
454                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
455                         extra_size += (rinfo->NumBlockTypes - 1)
456                                       * sizeof(struct cfi_intelext_blockinfo);
457                 }
458
459                 if (extp->MinorVersion >= '4')
460                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
461
462                 if (extp_size < sizeof(*extp) + extra_size) {
463                         need_more:
464                         extp_size = sizeof(*extp) + extra_size;
465                         kfree(extp);
466                         if (extp_size > 4096) {
467                                 printk(KERN_ERR
468                                         "%s: cfi_pri_intelext is too fat\n",
469                                         __func__);
470                                 return NULL;
471                         }
472                         goto again;
473                 }
474         }
475
476         return extp;
477 }
478
479 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
480 {
481         struct cfi_private *cfi = map->fldrv_priv;
482         struct mtd_info *mtd;
483         int i;
484
485         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
486         if (!mtd)
487                 return NULL;
488         mtd->priv = map;
489         mtd->type = MTD_NORFLASH;
490
491         /* Fill in the default mtd operations */
492         mtd->_erase   = cfi_intelext_erase_varsize;
493         mtd->_read    = cfi_intelext_read;
494         mtd->_write   = cfi_intelext_write_words;
495         mtd->_sync    = cfi_intelext_sync;
496         mtd->_lock    = cfi_intelext_lock;
497         mtd->_unlock  = cfi_intelext_unlock;
498         mtd->_is_locked = cfi_intelext_is_locked;
499         mtd->_suspend = cfi_intelext_suspend;
500         mtd->_resume  = cfi_intelext_resume;
501         mtd->flags   = MTD_CAP_NORFLASH;
502         mtd->name    = map->name;
503         mtd->writesize = 1;
504         mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
505
506         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
507
508         if (cfi->cfi_mode == CFI_MODE_CFI) {
509                 /*
510                  * It's a real CFI chip, not one for which the probe
511                  * routine faked a CFI structure. So we read the feature
512                  * table from it.
513                  */
514                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
515                 struct cfi_pri_intelext *extp;
516
517                 extp = read_pri_intelext(map, adr);
518                 if (!extp) {
519                         kfree(mtd);
520                         return NULL;
521                 }
522
523                 /* Install our own private info structure */
524                 cfi->cmdset_priv = extp;
525
526                 cfi_fixup(mtd, cfi_fixup_table);
527
528 #ifdef DEBUG_CFI_FEATURES
529                 /* Tell the user about it in lots of lovely detail */
530                 cfi_tell_features(extp);
531 #endif
532
533                 if(extp->SuspendCmdSupport & 1) {
534                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
535                 }
536         }
537         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
538                 /* Apply jedec specific fixups */
539                 cfi_fixup(mtd, jedec_fixup_table);
540         }
541         /* Apply generic fixups */
542         cfi_fixup(mtd, fixup_table);
543
544         for (i=0; i< cfi->numchips; i++) {
545                 if (cfi->cfiq->WordWriteTimeoutTyp)
546                         cfi->chips[i].word_write_time =
547                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
548                 else
549                         cfi->chips[i].word_write_time = 50000;
550
551                 if (cfi->cfiq->BufWriteTimeoutTyp)
552                         cfi->chips[i].buffer_write_time =
553                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
554                 /* No default; if it isn't specified, we won't use it */
555
556                 if (cfi->cfiq->BlockEraseTimeoutTyp)
557                         cfi->chips[i].erase_time =
558                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
559                 else
560                         cfi->chips[i].erase_time = 2000000;
561
562                 if (cfi->cfiq->WordWriteTimeoutTyp &&
563                     cfi->cfiq->WordWriteTimeoutMax)
564                         cfi->chips[i].word_write_time_max =
565                                 1<<(cfi->cfiq->WordWriteTimeoutTyp +
566                                     cfi->cfiq->WordWriteTimeoutMax);
567                 else
568                         cfi->chips[i].word_write_time_max = 50000 * 8;
569
570                 if (cfi->cfiq->BufWriteTimeoutTyp &&
571                     cfi->cfiq->BufWriteTimeoutMax)
572                         cfi->chips[i].buffer_write_time_max =
573                                 1<<(cfi->cfiq->BufWriteTimeoutTyp +
574                                     cfi->cfiq->BufWriteTimeoutMax);
575
576                 if (cfi->cfiq->BlockEraseTimeoutTyp &&
577                     cfi->cfiq->BlockEraseTimeoutMax)
578                         cfi->chips[i].erase_time_max =
579                                 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
580                                        cfi->cfiq->BlockEraseTimeoutMax);
581                 else
582                         cfi->chips[i].erase_time_max = 2000000 * 8;
583
584                 cfi->chips[i].ref_point_counter = 0;
585                 init_waitqueue_head(&(cfi->chips[i].wq));
586         }
587
588         map->fldrv = &cfi_intelext_chipdrv;
589
590         return cfi_intelext_setup(mtd);
591 }
592 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
593 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
594 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
595 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
596 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
597
598 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
599 {
600         struct map_info *map = mtd->priv;
601         struct cfi_private *cfi = map->fldrv_priv;
602         unsigned long offset = 0;
603         int i,j;
604         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
605
606         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
607
608         mtd->size = devsize * cfi->numchips;
609
610         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
611         mtd->eraseregions = kzalloc(sizeof(struct mtd_erase_region_info)
612                         * mtd->numeraseregions, GFP_KERNEL);
613         if (!mtd->eraseregions)
614                 goto setup_err;
615
616         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
617                 unsigned long ernum, ersize;
618                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
619                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
620
621                 if (mtd->erasesize < ersize) {
622                         mtd->erasesize = ersize;
623                 }
624                 for (j=0; j<cfi->numchips; j++) {
625                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
626                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
627                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
628                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
629                         if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
630                                 goto setup_err;
631                 }
632                 offset += (ersize * ernum);
633         }
634
635         if (offset != devsize) {
636                 /* Argh */
637                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
638                 goto setup_err;
639         }
640
641         for (i=0; i<mtd->numeraseregions;i++){
642                 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
643                        i,(unsigned long long)mtd->eraseregions[i].offset,
644                        mtd->eraseregions[i].erasesize,
645                        mtd->eraseregions[i].numblocks);
646         }
647
648 #ifdef CONFIG_MTD_OTP
649         mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
650         mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
651         mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
652         mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
653         mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
654         mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
655 #endif
656
657         /* This function has the potential to distort the reality
658            a bit and therefore should be called last. */
659         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
660                 goto setup_err;
661
662         __module_get(THIS_MODULE);
663         register_reboot_notifier(&mtd->reboot_notifier);
664         return mtd;
665
666  setup_err:
667         if (mtd->eraseregions)
668                 for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
669                         for (j=0; j<cfi->numchips; j++)
670                                 kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
671         kfree(mtd->eraseregions);
672         kfree(mtd);
673         kfree(cfi->cmdset_priv);
674         return NULL;
675 }
676
677 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
678                                         struct cfi_private **pcfi)
679 {
680         struct map_info *map = mtd->priv;
681         struct cfi_private *cfi = *pcfi;
682         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
683
684         /*
685          * Probing of multi-partition flash chips.
686          *
687          * To support multiple partitions when available, we simply arrange
688          * for each of them to have their own flchip structure even if they
689          * are on the same physical chip.  This means completely recreating
690          * a new cfi_private structure right here which is a blatent code
691          * layering violation, but this is still the least intrusive
692          * arrangement at this point. This can be rearranged in the future
693          * if someone feels motivated enough.  --nico
694          */
695         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
696             && extp->FeatureSupport & (1 << 9)) {
697                 struct cfi_private *newcfi;
698                 struct flchip *chip;
699                 struct flchip_shared *shared;
700                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
701
702                 /* Protection Register info */
703                 offs = (extp->NumProtectionFields - 1) *
704                        sizeof(struct cfi_intelext_otpinfo);
705
706                 /* Burst Read info */
707                 offs += extp->extra[offs+1]+2;
708
709                 /* Number of partition regions */
710                 numregions = extp->extra[offs];
711                 offs += 1;
712
713                 /* skip the sizeof(partregion) field in CFI 1.4 */
714                 if (extp->MinorVersion >= '4')
715                         offs += 2;
716
717                 /* Number of hardware partitions */
718                 numparts = 0;
719                 for (i = 0; i < numregions; i++) {
720                         struct cfi_intelext_regioninfo *rinfo;
721                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
722                         numparts += rinfo->NumIdentPartitions;
723                         offs += sizeof(*rinfo)
724                                 + (rinfo->NumBlockTypes - 1) *
725                                   sizeof(struct cfi_intelext_blockinfo);
726                 }
727
728                 if (!numparts)
729                         numparts = 1;
730
731                 /* Programming Region info */
732                 if (extp->MinorVersion >= '4') {
733                         struct cfi_intelext_programming_regioninfo *prinfo;
734                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
735                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
736                         mtd->flags &= ~MTD_BIT_WRITEABLE;
737                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
738                                map->name, mtd->writesize,
739                                cfi->interleave * prinfo->ControlValid,
740                                cfi->interleave * prinfo->ControlInvalid);
741                 }
742
743                 /*
744                  * All functions below currently rely on all chips having
745                  * the same geometry so we'll just assume that all hardware
746                  * partitions are of the same size too.
747                  */
748                 partshift = cfi->chipshift - __ffs(numparts);
749
750                 if ((1 << partshift) < mtd->erasesize) {
751                         printk( KERN_ERR
752                                 "%s: bad number of hw partitions (%d)\n",
753                                 __func__, numparts);
754                         return -EINVAL;
755                 }
756
757                 numvirtchips = cfi->numchips * numparts;
758                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
759                 if (!newcfi)
760                         return -ENOMEM;
761                 shared = kmalloc_array(cfi->numchips,
762                                        sizeof(struct flchip_shared),
763                                        GFP_KERNEL);
764                 if (!shared) {
765                         kfree(newcfi);
766                         return -ENOMEM;
767                 }
768                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
769                 newcfi->numchips = numvirtchips;
770                 newcfi->chipshift = partshift;
771
772                 chip = &newcfi->chips[0];
773                 for (i = 0; i < cfi->numchips; i++) {
774                         shared[i].writing = shared[i].erasing = NULL;
775                         mutex_init(&shared[i].lock);
776                         for (j = 0; j < numparts; j++) {
777                                 *chip = cfi->chips[i];
778                                 chip->start += j << partshift;
779                                 chip->priv = &shared[i];
780                                 /* those should be reset too since
781                                    they create memory references. */
782                                 init_waitqueue_head(&chip->wq);
783                                 mutex_init(&chip->mutex);
784                                 chip++;
785                         }
786                 }
787
788                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
789                                   "--> %d partitions of %d KiB\n",
790                                   map->name, cfi->numchips, cfi->interleave,
791                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
792
793                 map->fldrv_priv = newcfi;
794                 *pcfi = newcfi;
795                 kfree(cfi);
796         }
797
798         return 0;
799 }
800
801 /*
802  *  *********** CHIP ACCESS FUNCTIONS ***********
803  */
804 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
805 {
806         DECLARE_WAITQUEUE(wait, current);
807         struct cfi_private *cfi = map->fldrv_priv;
808         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
809         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
810         unsigned long timeo = jiffies + HZ;
811
812         /* Prevent setting state FL_SYNCING for chip in suspended state. */
813         if (mode == FL_SYNCING && chip->oldstate != FL_READY)
814                 goto sleep;
815
816         switch (chip->state) {
817
818         case FL_STATUS:
819                 for (;;) {
820                         status = map_read(map, adr);
821                         if (map_word_andequal(map, status, status_OK, status_OK))
822                                 break;
823
824                         /* At this point we're fine with write operations
825                            in other partitions as they don't conflict. */
826                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
827                                 break;
828
829                         mutex_unlock(&chip->mutex);
830                         cfi_udelay(1);
831                         mutex_lock(&chip->mutex);
832                         /* Someone else might have been playing with it. */
833                         return -EAGAIN;
834                 }
835                 /* Fall through */
836         case FL_READY:
837         case FL_CFI_QUERY:
838         case FL_JEDEC_QUERY:
839                 return 0;
840
841         case FL_ERASING:
842                 if (!cfip ||
843                     !(cfip->FeatureSupport & 2) ||
844                     !(mode == FL_READY || mode == FL_POINT ||
845                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
846                         goto sleep;
847
848                 /* Do not allow suspend iff read/write to EB address */
849                 if ((adr & chip->in_progress_block_mask) ==
850                     chip->in_progress_block_addr)
851                         goto sleep;
852
853                 /* do not suspend small EBs, buggy Micron Chips */
854                 if (cfi_is_micron_28F00AP30(cfi, chip) &&
855                     (chip->in_progress_block_mask == ~(0x8000-1)))
856                         goto sleep;
857
858                 /* Erase suspend */
859                 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
860
861                 /* If the flash has finished erasing, then 'erase suspend'
862                  * appears to make some (28F320) flash devices switch to
863                  * 'read' mode.  Make sure that we switch to 'read status'
864                  * mode so we get the right data. --rmk
865                  */
866                 map_write(map, CMD(0x70), chip->in_progress_block_addr);
867                 chip->oldstate = FL_ERASING;
868                 chip->state = FL_ERASE_SUSPENDING;
869                 chip->erase_suspended = 1;
870                 for (;;) {
871                         status = map_read(map, chip->in_progress_block_addr);
872                         if (map_word_andequal(map, status, status_OK, status_OK))
873                                 break;
874
875                         if (time_after(jiffies, timeo)) {
876                                 /* Urgh. Resume and pretend we weren't here.
877                                  * Make sure we're in 'read status' mode if it had finished */
878                                 put_chip(map, chip, adr);
879                                 printk(KERN_ERR "%s: Chip not ready after erase "
880                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
881                                 return -EIO;
882                         }
883
884                         mutex_unlock(&chip->mutex);
885                         cfi_udelay(1);
886                         mutex_lock(&chip->mutex);
887                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
888                            So we can just loop here. */
889                 }
890                 chip->state = FL_STATUS;
891                 return 0;
892
893         case FL_XIP_WHILE_ERASING:
894                 if (mode != FL_READY && mode != FL_POINT &&
895                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
896                         goto sleep;
897                 chip->oldstate = chip->state;
898                 chip->state = FL_READY;
899                 return 0;
900
901         case FL_SHUTDOWN:
902                 /* The machine is rebooting now,so no one can get chip anymore */
903                 return -EIO;
904         case FL_POINT:
905                 /* Only if there's no operation suspended... */
906                 if (mode == FL_READY && chip->oldstate == FL_READY)
907                         return 0;
908                 /* Fall through */
909         default:
910         sleep:
911                 set_current_state(TASK_UNINTERRUPTIBLE);
912                 add_wait_queue(&chip->wq, &wait);
913                 mutex_unlock(&chip->mutex);
914                 schedule();
915                 remove_wait_queue(&chip->wq, &wait);
916                 mutex_lock(&chip->mutex);
917                 return -EAGAIN;
918         }
919 }
920
921 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
922 {
923         int ret;
924         DECLARE_WAITQUEUE(wait, current);
925
926  retry:
927         if (chip->priv &&
928             (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
929             || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
930                 /*
931                  * OK. We have possibility for contention on the write/erase
932                  * operations which are global to the real chip and not per
933                  * partition.  So let's fight it over in the partition which
934                  * currently has authority on the operation.
935                  *
936                  * The rules are as follows:
937                  *
938                  * - any write operation must own shared->writing.
939                  *
940                  * - any erase operation must own _both_ shared->writing and
941                  *   shared->erasing.
942                  *
943                  * - contention arbitration is handled in the owner's context.
944                  *
945                  * The 'shared' struct can be read and/or written only when
946                  * its lock is taken.
947                  */
948                 struct flchip_shared *shared = chip->priv;
949                 struct flchip *contender;
950                 mutex_lock(&shared->lock);
951                 contender = shared->writing;
952                 if (contender && contender != chip) {
953                         /*
954                          * The engine to perform desired operation on this
955                          * partition is already in use by someone else.
956                          * Let's fight over it in the context of the chip
957                          * currently using it.  If it is possible to suspend,
958                          * that other partition will do just that, otherwise
959                          * it'll happily send us to sleep.  In any case, when
960                          * get_chip returns success we're clear to go ahead.
961                          */
962                         ret = mutex_trylock(&contender->mutex);
963                         mutex_unlock(&shared->lock);
964                         if (!ret)
965                                 goto retry;
966                         mutex_unlock(&chip->mutex);
967                         ret = chip_ready(map, contender, contender->start, mode);
968                         mutex_lock(&chip->mutex);
969
970                         if (ret == -EAGAIN) {
971                                 mutex_unlock(&contender->mutex);
972                                 goto retry;
973                         }
974                         if (ret) {
975                                 mutex_unlock(&contender->mutex);
976                                 return ret;
977                         }
978                         mutex_lock(&shared->lock);
979
980                         /* We should not own chip if it is already
981                          * in FL_SYNCING state. Put contender and retry. */
982                         if (chip->state == FL_SYNCING) {
983                                 put_chip(map, contender, contender->start);
984                                 mutex_unlock(&contender->mutex);
985                                 goto retry;
986                         }
987                         mutex_unlock(&contender->mutex);
988                 }
989
990                 /* Check if we already have suspended erase
991                  * on this chip. Sleep. */
992                 if (mode == FL_ERASING && shared->erasing
993                     && shared->erasing->oldstate == FL_ERASING) {
994                         mutex_unlock(&shared->lock);
995                         set_current_state(TASK_UNINTERRUPTIBLE);
996                         add_wait_queue(&chip->wq, &wait);
997                         mutex_unlock(&chip->mutex);
998                         schedule();
999                         remove_wait_queue(&chip->wq, &wait);
1000                         mutex_lock(&chip->mutex);
1001                         goto retry;
1002                 }
1003
1004                 /* We now own it */
1005                 shared->writing = chip;
1006                 if (mode == FL_ERASING)
1007                         shared->erasing = chip;
1008                 mutex_unlock(&shared->lock);
1009         }
1010         ret = chip_ready(map, chip, adr, mode);
1011         if (ret == -EAGAIN)
1012                 goto retry;
1013
1014         return ret;
1015 }
1016
1017 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
1018 {
1019         struct cfi_private *cfi = map->fldrv_priv;
1020
1021         if (chip->priv) {
1022                 struct flchip_shared *shared = chip->priv;
1023                 mutex_lock(&shared->lock);
1024                 if (shared->writing == chip && chip->oldstate == FL_READY) {
1025                         /* We own the ability to write, but we're done */
1026                         shared->writing = shared->erasing;
1027                         if (shared->writing && shared->writing != chip) {
1028                                 /* give back ownership to who we loaned it from */
1029                                 struct flchip *loaner = shared->writing;
1030                                 mutex_lock(&loaner->mutex);
1031                                 mutex_unlock(&shared->lock);
1032                                 mutex_unlock(&chip->mutex);
1033                                 put_chip(map, loaner, loaner->start);
1034                                 mutex_lock(&chip->mutex);
1035                                 mutex_unlock(&loaner->mutex);
1036                                 wake_up(&chip->wq);
1037                                 return;
1038                         }
1039                         shared->erasing = NULL;
1040                         shared->writing = NULL;
1041                 } else if (shared->erasing == chip && shared->writing != chip) {
1042                         /*
1043                          * We own the ability to erase without the ability
1044                          * to write, which means the erase was suspended
1045                          * and some other partition is currently writing.
1046                          * Don't let the switch below mess things up since
1047                          * we don't have ownership to resume anything.
1048                          */
1049                         mutex_unlock(&shared->lock);
1050                         wake_up(&chip->wq);
1051                         return;
1052                 }
1053                 mutex_unlock(&shared->lock);
1054         }
1055
1056         switch(chip->oldstate) {
1057         case FL_ERASING:
1058                 /* What if one interleaved chip has finished and the
1059                    other hasn't? The old code would leave the finished
1060                    one in READY mode. That's bad, and caused -EROFS
1061                    errors to be returned from do_erase_oneblock because
1062                    that's the only bit it checked for at the time.
1063                    As the state machine appears to explicitly allow
1064                    sending the 0x70 (Read Status) command to an erasing
1065                    chip and expecting it to be ignored, that's what we
1066                    do. */
1067                 map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1068                 map_write(map, CMD(0x70), chip->in_progress_block_addr);
1069                 chip->oldstate = FL_READY;
1070                 chip->state = FL_ERASING;
1071                 break;
1072
1073         case FL_XIP_WHILE_ERASING:
1074                 chip->state = chip->oldstate;
1075                 chip->oldstate = FL_READY;
1076                 break;
1077
1078         case FL_READY:
1079         case FL_STATUS:
1080         case FL_JEDEC_QUERY:
1081                 break;
1082         default:
1083                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1084         }
1085         wake_up(&chip->wq);
1086 }
1087
1088 #ifdef CONFIG_MTD_XIP
1089
1090 /*
1091  * No interrupt what so ever can be serviced while the flash isn't in array
1092  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1093  * enclosing any code path where the flash is known not to be in array mode.
1094  * And within a XIP disabled code path, only functions marked with __xipram
1095  * may be called and nothing else (it's a good thing to inspect generated
1096  * assembly to make sure inline functions were actually inlined and that gcc
1097  * didn't emit calls to its own support functions). Also configuring MTD CFI
1098  * support to a single buswidth and a single interleave is also recommended.
1099  */
1100
1101 static void xip_disable(struct map_info *map, struct flchip *chip,
1102                         unsigned long adr)
1103 {
1104         /* TODO: chips with no XIP use should ignore and return */
1105         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1106         local_irq_disable();
1107 }
1108
1109 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1110                                 unsigned long adr)
1111 {
1112         struct cfi_private *cfi = map->fldrv_priv;
1113         if (chip->state != FL_POINT && chip->state != FL_READY) {
1114                 map_write(map, CMD(0xff), adr);
1115                 chip->state = FL_READY;
1116         }
1117         (void) map_read(map, adr);
1118         xip_iprefetch();
1119         local_irq_enable();
1120 }
1121
1122 /*
1123  * When a delay is required for the flash operation to complete, the
1124  * xip_wait_for_operation() function is polling for both the given timeout
1125  * and pending (but still masked) hardware interrupts.  Whenever there is an
1126  * interrupt pending then the flash erase or write operation is suspended,
1127  * array mode restored and interrupts unmasked.  Task scheduling might also
1128  * happen at that point.  The CPU eventually returns from the interrupt or
1129  * the call to schedule() and the suspended flash operation is resumed for
1130  * the remaining of the delay period.
1131  *
1132  * Warning: this function _will_ fool interrupt latency tracing tools.
1133  */
1134
1135 static int __xipram xip_wait_for_operation(
1136                 struct map_info *map, struct flchip *chip,
1137                 unsigned long adr, unsigned int chip_op_time_max)
1138 {
1139         struct cfi_private *cfi = map->fldrv_priv;
1140         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1141         map_word status, OK = CMD(0x80);
1142         unsigned long usec, suspended, start, done;
1143         flstate_t oldstate, newstate;
1144
1145         start = xip_currtime();
1146         usec = chip_op_time_max;
1147         if (usec == 0)
1148                 usec = 500000;
1149         done = 0;
1150
1151         do {
1152                 cpu_relax();
1153                 if (xip_irqpending() && cfip &&
1154                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1155                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1156                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1157                         /*
1158                          * Let's suspend the erase or write operation when
1159                          * supported.  Note that we currently don't try to
1160                          * suspend interleaved chips if there is already
1161                          * another operation suspended (imagine what happens
1162                          * when one chip was already done with the current
1163                          * operation while another chip suspended it, then
1164                          * we resume the whole thing at once).  Yes, it
1165                          * can happen!
1166                          */
1167                         usec -= done;
1168                         map_write(map, CMD(0xb0), adr);
1169                         map_write(map, CMD(0x70), adr);
1170                         suspended = xip_currtime();
1171                         do {
1172                                 if (xip_elapsed_since(suspended) > 100000) {
1173                                         /*
1174                                          * The chip doesn't want to suspend
1175                                          * after waiting for 100 msecs.
1176                                          * This is a critical error but there
1177                                          * is not much we can do here.
1178                                          */
1179                                         return -EIO;
1180                                 }
1181                                 status = map_read(map, adr);
1182                         } while (!map_word_andequal(map, status, OK, OK));
1183
1184                         /* Suspend succeeded */
1185                         oldstate = chip->state;
1186                         if (oldstate == FL_ERASING) {
1187                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1188                                         break;
1189                                 newstate = FL_XIP_WHILE_ERASING;
1190                                 chip->erase_suspended = 1;
1191                         } else {
1192                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1193                                         break;
1194                                 newstate = FL_XIP_WHILE_WRITING;
1195                                 chip->write_suspended = 1;
1196                         }
1197                         chip->state = newstate;
1198                         map_write(map, CMD(0xff), adr);
1199                         (void) map_read(map, adr);
1200                         xip_iprefetch();
1201                         local_irq_enable();
1202                         mutex_unlock(&chip->mutex);
1203                         xip_iprefetch();
1204                         cond_resched();
1205
1206                         /*
1207                          * We're back.  However someone else might have
1208                          * decided to go write to the chip if we are in
1209                          * a suspended erase state.  If so let's wait
1210                          * until it's done.
1211                          */
1212                         mutex_lock(&chip->mutex);
1213                         while (chip->state != newstate) {
1214                                 DECLARE_WAITQUEUE(wait, current);
1215                                 set_current_state(TASK_UNINTERRUPTIBLE);
1216                                 add_wait_queue(&chip->wq, &wait);
1217                                 mutex_unlock(&chip->mutex);
1218                                 schedule();
1219                                 remove_wait_queue(&chip->wq, &wait);
1220                                 mutex_lock(&chip->mutex);
1221                         }
1222                         /* Disallow XIP again */
1223                         local_irq_disable();
1224
1225                         /* Resume the write or erase operation */
1226                         map_write(map, CMD(0xd0), adr);
1227                         map_write(map, CMD(0x70), adr);
1228                         chip->state = oldstate;
1229                         start = xip_currtime();
1230                 } else if (usec >= 1000000/HZ) {
1231                         /*
1232                          * Try to save on CPU power when waiting delay
1233                          * is at least a system timer tick period.
1234                          * No need to be extremely accurate here.
1235                          */
1236                         xip_cpu_idle();
1237                 }
1238                 status = map_read(map, adr);
1239                 done = xip_elapsed_since(start);
1240         } while (!map_word_andequal(map, status, OK, OK)
1241                  && done < usec);
1242
1243         return (done >= usec) ? -ETIME : 0;
1244 }
1245
1246 /*
1247  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1248  * the flash is actively programming or erasing since we have to poll for
1249  * the operation to complete anyway.  We can't do that in a generic way with
1250  * a XIP setup so do it before the actual flash operation in this case
1251  * and stub it out from INVAL_CACHE_AND_WAIT.
1252  */
1253 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1254         INVALIDATE_CACHED_RANGE(map, from, size)
1255
1256 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1257         xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1258
1259 #else
1260
1261 #define xip_disable(map, chip, adr)
1262 #define xip_enable(map, chip, adr)
1263 #define XIP_INVAL_CACHED_RANGE(x...)
1264 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1265
1266 static int inval_cache_and_wait_for_operation(
1267                 struct map_info *map, struct flchip *chip,
1268                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1269                 unsigned int chip_op_time, unsigned int chip_op_time_max)
1270 {
1271         struct cfi_private *cfi = map->fldrv_priv;
1272         map_word status, status_OK = CMD(0x80);
1273         int chip_state = chip->state;
1274         unsigned int timeo, sleep_time, reset_timeo;
1275
1276         mutex_unlock(&chip->mutex);
1277         if (inval_len)
1278                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1279         mutex_lock(&chip->mutex);
1280
1281         timeo = chip_op_time_max;
1282         if (!timeo)
1283                 timeo = 500000;
1284         reset_timeo = timeo;
1285         sleep_time = chip_op_time / 2;
1286
1287         for (;;) {
1288                 if (chip->state != chip_state) {
1289                         /* Someone's suspended the operation: sleep */
1290                         DECLARE_WAITQUEUE(wait, current);
1291                         set_current_state(TASK_UNINTERRUPTIBLE);
1292                         add_wait_queue(&chip->wq, &wait);
1293                         mutex_unlock(&chip->mutex);
1294                         schedule();
1295                         remove_wait_queue(&chip->wq, &wait);
1296                         mutex_lock(&chip->mutex);
1297                         continue;
1298                 }
1299
1300                 status = map_read(map, cmd_adr);
1301                 if (map_word_andequal(map, status, status_OK, status_OK))
1302                         break;
1303
1304                 if (chip->erase_suspended && chip_state == FL_ERASING)  {
1305                         /* Erase suspend occurred while sleep: reset timeout */
1306                         timeo = reset_timeo;
1307                         chip->erase_suspended = 0;
1308                 }
1309                 if (chip->write_suspended && chip_state == FL_WRITING)  {
1310                         /* Write suspend occurred while sleep: reset timeout */
1311                         timeo = reset_timeo;
1312                         chip->write_suspended = 0;
1313                 }
1314                 if (!timeo) {
1315                         map_write(map, CMD(0x70), cmd_adr);
1316                         chip->state = FL_STATUS;
1317                         return -ETIME;
1318                 }
1319
1320                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1321                 mutex_unlock(&chip->mutex);
1322                 if (sleep_time >= 1000000/HZ) {
1323                         /*
1324                          * Half of the normal delay still remaining
1325                          * can be performed with a sleeping delay instead
1326                          * of busy waiting.
1327                          */
1328                         msleep(sleep_time/1000);
1329                         timeo -= sleep_time;
1330                         sleep_time = 1000000/HZ;
1331                 } else {
1332                         udelay(1);
1333                         cond_resched();
1334                         timeo--;
1335                 }
1336                 mutex_lock(&chip->mutex);
1337         }
1338
1339         /* Done and happy. */
1340         chip->state = FL_STATUS;
1341         return 0;
1342 }
1343
1344 #endif
1345
1346 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1347         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1348
1349
1350 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1351 {
1352         unsigned long cmd_addr;
1353         struct cfi_private *cfi = map->fldrv_priv;
1354         int ret = 0;
1355
1356         adr += chip->start;
1357
1358         /* Ensure cmd read/writes are aligned. */
1359         cmd_addr = adr & ~(map_bankwidth(map)-1);
1360
1361         mutex_lock(&chip->mutex);
1362
1363         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1364
1365         if (!ret) {
1366                 if (chip->state != FL_POINT && chip->state != FL_READY)
1367                         map_write(map, CMD(0xff), cmd_addr);
1368
1369                 chip->state = FL_POINT;
1370                 chip->ref_point_counter++;
1371         }
1372         mutex_unlock(&chip->mutex);
1373
1374         return ret;
1375 }
1376
1377 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1378                 size_t *retlen, void **virt, resource_size_t *phys)
1379 {
1380         struct map_info *map = mtd->priv;
1381         struct cfi_private *cfi = map->fldrv_priv;
1382         unsigned long ofs, last_end = 0;
1383         int chipnum;
1384         int ret = 0;
1385
1386         if (!map->virt)
1387                 return -EINVAL;
1388
1389         /* Now lock the chip(s) to POINT state */
1390
1391         /* ofs: offset within the first chip that the first read should start */
1392         chipnum = (from >> cfi->chipshift);
1393         ofs = from - (chipnum << cfi->chipshift);
1394
1395         *virt = map->virt + cfi->chips[chipnum].start + ofs;
1396         if (phys)
1397                 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1398
1399         while (len) {
1400                 unsigned long thislen;
1401
1402                 if (chipnum >= cfi->numchips)
1403                         break;
1404
1405                 /* We cannot point across chips that are virtually disjoint */
1406                 if (!last_end)
1407                         last_end = cfi->chips[chipnum].start;
1408                 else if (cfi->chips[chipnum].start != last_end)
1409                         break;
1410
1411                 if ((len + ofs -1) >> cfi->chipshift)
1412                         thislen = (1<<cfi->chipshift) - ofs;
1413                 else
1414                         thislen = len;
1415
1416                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1417                 if (ret)
1418                         break;
1419
1420                 *retlen += thislen;
1421                 len -= thislen;
1422
1423                 ofs = 0;
1424                 last_end += 1 << cfi->chipshift;
1425                 chipnum++;
1426         }
1427         return 0;
1428 }
1429
1430 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1431 {
1432         struct map_info *map = mtd->priv;
1433         struct cfi_private *cfi = map->fldrv_priv;
1434         unsigned long ofs;
1435         int chipnum, err = 0;
1436
1437         /* Now unlock the chip(s) POINT state */
1438
1439         /* ofs: offset within the first chip that the first read should start */
1440         chipnum = (from >> cfi->chipshift);
1441         ofs = from - (chipnum <<  cfi->chipshift);
1442
1443         while (len && !err) {
1444                 unsigned long thislen;
1445                 struct flchip *chip;
1446
1447                 chip = &cfi->chips[chipnum];
1448                 if (chipnum >= cfi->numchips)
1449                         break;
1450
1451                 if ((len + ofs -1) >> cfi->chipshift)
1452                         thislen = (1<<cfi->chipshift) - ofs;
1453                 else
1454                         thislen = len;
1455
1456                 mutex_lock(&chip->mutex);
1457                 if (chip->state == FL_POINT) {
1458                         chip->ref_point_counter--;
1459                         if(chip->ref_point_counter == 0)
1460                                 chip->state = FL_READY;
1461                 } else {
1462                         printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1463                         err = -EINVAL;
1464                 }
1465
1466                 put_chip(map, chip, chip->start);
1467                 mutex_unlock(&chip->mutex);
1468
1469                 len -= thislen;
1470                 ofs = 0;
1471                 chipnum++;
1472         }
1473
1474         return err;
1475 }
1476
1477 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1478 {
1479         unsigned long cmd_addr;
1480         struct cfi_private *cfi = map->fldrv_priv;
1481         int ret;
1482
1483         adr += chip->start;
1484
1485         /* Ensure cmd read/writes are aligned. */
1486         cmd_addr = adr & ~(map_bankwidth(map)-1);
1487
1488         mutex_lock(&chip->mutex);
1489         ret = get_chip(map, chip, cmd_addr, FL_READY);
1490         if (ret) {
1491                 mutex_unlock(&chip->mutex);
1492                 return ret;
1493         }
1494
1495         if (chip->state != FL_POINT && chip->state != FL_READY) {
1496                 map_write(map, CMD(0xff), cmd_addr);
1497
1498                 chip->state = FL_READY;
1499         }
1500
1501         map_copy_from(map, buf, adr, len);
1502
1503         put_chip(map, chip, cmd_addr);
1504
1505         mutex_unlock(&chip->mutex);
1506         return 0;
1507 }
1508
1509 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1510 {
1511         struct map_info *map = mtd->priv;
1512         struct cfi_private *cfi = map->fldrv_priv;
1513         unsigned long ofs;
1514         int chipnum;
1515         int ret = 0;
1516
1517         /* ofs: offset within the first chip that the first read should start */
1518         chipnum = (from >> cfi->chipshift);
1519         ofs = from - (chipnum <<  cfi->chipshift);
1520
1521         while (len) {
1522                 unsigned long thislen;
1523
1524                 if (chipnum >= cfi->numchips)
1525                         break;
1526
1527                 if ((len + ofs -1) >> cfi->chipshift)
1528                         thislen = (1<<cfi->chipshift) - ofs;
1529                 else
1530                         thislen = len;
1531
1532                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1533                 if (ret)
1534                         break;
1535
1536                 *retlen += thislen;
1537                 len -= thislen;
1538                 buf += thislen;
1539
1540                 ofs = 0;
1541                 chipnum++;
1542         }
1543         return ret;
1544 }
1545
1546 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1547                                      unsigned long adr, map_word datum, int mode)
1548 {
1549         struct cfi_private *cfi = map->fldrv_priv;
1550         map_word status, write_cmd;
1551         int ret=0;
1552
1553         adr += chip->start;
1554
1555         switch (mode) {
1556         case FL_WRITING:
1557                 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1558                 break;
1559         case FL_OTP_WRITE:
1560                 write_cmd = CMD(0xc0);
1561                 break;
1562         default:
1563                 return -EINVAL;
1564         }
1565
1566         mutex_lock(&chip->mutex);
1567         ret = get_chip(map, chip, adr, mode);
1568         if (ret) {
1569                 mutex_unlock(&chip->mutex);
1570                 return ret;
1571         }
1572
1573         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1574         ENABLE_VPP(map);
1575         xip_disable(map, chip, adr);
1576         map_write(map, write_cmd, adr);
1577         map_write(map, datum, adr);
1578         chip->state = mode;
1579
1580         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1581                                    adr, map_bankwidth(map),
1582                                    chip->word_write_time,
1583                                    chip->word_write_time_max);
1584         if (ret) {
1585                 xip_enable(map, chip, adr);
1586                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1587                 goto out;
1588         }
1589
1590         /* check for errors */
1591         status = map_read(map, adr);
1592         if (map_word_bitsset(map, status, CMD(0x1a))) {
1593                 unsigned long chipstatus = MERGESTATUS(status);
1594
1595                 /* reset status */
1596                 map_write(map, CMD(0x50), adr);
1597                 map_write(map, CMD(0x70), adr);
1598                 xip_enable(map, chip, adr);
1599
1600                 if (chipstatus & 0x02) {
1601                         ret = -EROFS;
1602                 } else if (chipstatus & 0x08) {
1603                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1604                         ret = -EIO;
1605                 } else {
1606                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1607                         ret = -EINVAL;
1608                 }
1609
1610                 goto out;
1611         }
1612
1613         xip_enable(map, chip, adr);
1614  out:   DISABLE_VPP(map);
1615         put_chip(map, chip, adr);
1616         mutex_unlock(&chip->mutex);
1617         return ret;
1618 }
1619
1620
1621 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1622 {
1623         struct map_info *map = mtd->priv;
1624         struct cfi_private *cfi = map->fldrv_priv;
1625         int ret = 0;
1626         int chipnum;
1627         unsigned long ofs;
1628
1629         chipnum = to >> cfi->chipshift;
1630         ofs = to  - (chipnum << cfi->chipshift);
1631
1632         /* If it's not bus-aligned, do the first byte write */
1633         if (ofs & (map_bankwidth(map)-1)) {
1634                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1635                 int gap = ofs - bus_ofs;
1636                 int n;
1637                 map_word datum;
1638
1639                 n = min_t(int, len, map_bankwidth(map)-gap);
1640                 datum = map_word_ff(map);
1641                 datum = map_word_load_partial(map, datum, buf, gap, n);
1642
1643                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1644                                                bus_ofs, datum, FL_WRITING);
1645                 if (ret)
1646                         return ret;
1647
1648                 len -= n;
1649                 ofs += n;
1650                 buf += n;
1651                 (*retlen) += n;
1652
1653                 if (ofs >> cfi->chipshift) {
1654                         chipnum ++;
1655                         ofs = 0;
1656                         if (chipnum == cfi->numchips)
1657                                 return 0;
1658                 }
1659         }
1660
1661         while(len >= map_bankwidth(map)) {
1662                 map_word datum = map_word_load(map, buf);
1663
1664                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1665                                        ofs, datum, FL_WRITING);
1666                 if (ret)
1667                         return ret;
1668
1669                 ofs += map_bankwidth(map);
1670                 buf += map_bankwidth(map);
1671                 (*retlen) += map_bankwidth(map);
1672                 len -= map_bankwidth(map);
1673
1674                 if (ofs >> cfi->chipshift) {
1675                         chipnum ++;
1676                         ofs = 0;
1677                         if (chipnum == cfi->numchips)
1678                                 return 0;
1679                 }
1680         }
1681
1682         if (len & (map_bankwidth(map)-1)) {
1683                 map_word datum;
1684
1685                 datum = map_word_ff(map);
1686                 datum = map_word_load_partial(map, datum, buf, 0, len);
1687
1688                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1689                                        ofs, datum, FL_WRITING);
1690                 if (ret)
1691                         return ret;
1692
1693                 (*retlen) += len;
1694         }
1695
1696         return 0;
1697 }
1698
1699
1700 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1701                                     unsigned long adr, const struct kvec **pvec,
1702                                     unsigned long *pvec_seek, int len)
1703 {
1704         struct cfi_private *cfi = map->fldrv_priv;
1705         map_word status, write_cmd, datum;
1706         unsigned long cmd_adr;
1707         int ret, wbufsize, word_gap, words;
1708         const struct kvec *vec;
1709         unsigned long vec_seek;
1710         unsigned long initial_adr;
1711         int initial_len = len;
1712
1713         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1714         adr += chip->start;
1715         initial_adr = adr;
1716         cmd_adr = adr & ~(wbufsize-1);
1717
1718         /* Sharp LH28F640BF chips need the first address for the
1719          * Page Buffer Program command. See Table 5 of
1720          * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1721         if (is_LH28F640BF(cfi))
1722                 cmd_adr = adr;
1723
1724         /* Let's determine this according to the interleave only once */
1725         write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1726
1727         mutex_lock(&chip->mutex);
1728         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1729         if (ret) {
1730                 mutex_unlock(&chip->mutex);
1731                 return ret;
1732         }
1733
1734         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1735         ENABLE_VPP(map);
1736         xip_disable(map, chip, cmd_adr);
1737
1738         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1739            [...], the device will not accept any more Write to Buffer commands".
1740            So we must check here and reset those bits if they're set. Otherwise
1741            we're just pissing in the wind */
1742         if (chip->state != FL_STATUS) {
1743                 map_write(map, CMD(0x70), cmd_adr);
1744                 chip->state = FL_STATUS;
1745         }
1746         status = map_read(map, cmd_adr);
1747         if (map_word_bitsset(map, status, CMD(0x30))) {
1748                 xip_enable(map, chip, cmd_adr);
1749                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1750                 xip_disable(map, chip, cmd_adr);
1751                 map_write(map, CMD(0x50), cmd_adr);
1752                 map_write(map, CMD(0x70), cmd_adr);
1753         }
1754
1755         chip->state = FL_WRITING_TO_BUFFER;
1756         map_write(map, write_cmd, cmd_adr);
1757         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1758         if (ret) {
1759                 /* Argh. Not ready for write to buffer */
1760                 map_word Xstatus = map_read(map, cmd_adr);
1761                 map_write(map, CMD(0x70), cmd_adr);
1762                 chip->state = FL_STATUS;
1763                 status = map_read(map, cmd_adr);
1764                 map_write(map, CMD(0x50), cmd_adr);
1765                 map_write(map, CMD(0x70), cmd_adr);
1766                 xip_enable(map, chip, cmd_adr);
1767                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1768                                 map->name, Xstatus.x[0], status.x[0]);
1769                 goto out;
1770         }
1771
1772         /* Figure out the number of words to write */
1773         word_gap = (-adr & (map_bankwidth(map)-1));
1774         words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1775         if (!word_gap) {
1776                 words--;
1777         } else {
1778                 word_gap = map_bankwidth(map) - word_gap;
1779                 adr -= word_gap;
1780                 datum = map_word_ff(map);
1781         }
1782
1783         /* Write length of data to come */
1784         map_write(map, CMD(words), cmd_adr );
1785
1786         /* Write data */
1787         vec = *pvec;
1788         vec_seek = *pvec_seek;
1789         do {
1790                 int n = map_bankwidth(map) - word_gap;
1791                 if (n > vec->iov_len - vec_seek)
1792                         n = vec->iov_len - vec_seek;
1793                 if (n > len)
1794                         n = len;
1795
1796                 if (!word_gap && len < map_bankwidth(map))
1797                         datum = map_word_ff(map);
1798
1799                 datum = map_word_load_partial(map, datum,
1800                                               vec->iov_base + vec_seek,
1801                                               word_gap, n);
1802
1803                 len -= n;
1804                 word_gap += n;
1805                 if (!len || word_gap == map_bankwidth(map)) {
1806                         map_write(map, datum, adr);
1807                         adr += map_bankwidth(map);
1808                         word_gap = 0;
1809                 }
1810
1811                 vec_seek += n;
1812                 if (vec_seek == vec->iov_len) {
1813                         vec++;
1814                         vec_seek = 0;
1815                 }
1816         } while (len);
1817         *pvec = vec;
1818         *pvec_seek = vec_seek;
1819
1820         /* GO GO GO */
1821         map_write(map, CMD(0xd0), cmd_adr);
1822         chip->state = FL_WRITING;
1823
1824         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1825                                    initial_adr, initial_len,
1826                                    chip->buffer_write_time,
1827                                    chip->buffer_write_time_max);
1828         if (ret) {
1829                 map_write(map, CMD(0x70), cmd_adr);
1830                 chip->state = FL_STATUS;
1831                 xip_enable(map, chip, cmd_adr);
1832                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1833                 goto out;
1834         }
1835
1836         /* check for errors */
1837         status = map_read(map, cmd_adr);
1838         if (map_word_bitsset(map, status, CMD(0x1a))) {
1839                 unsigned long chipstatus = MERGESTATUS(status);
1840
1841                 /* reset status */
1842                 map_write(map, CMD(0x50), cmd_adr);
1843                 map_write(map, CMD(0x70), cmd_adr);
1844                 xip_enable(map, chip, cmd_adr);
1845
1846                 if (chipstatus & 0x02) {
1847                         ret = -EROFS;
1848                 } else if (chipstatus & 0x08) {
1849                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1850                         ret = -EIO;
1851                 } else {
1852                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1853                         ret = -EINVAL;
1854                 }
1855
1856                 goto out;
1857         }
1858
1859         xip_enable(map, chip, cmd_adr);
1860  out:   DISABLE_VPP(map);
1861         put_chip(map, chip, cmd_adr);
1862         mutex_unlock(&chip->mutex);
1863         return ret;
1864 }
1865
1866 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1867                                 unsigned long count, loff_t to, size_t *retlen)
1868 {
1869         struct map_info *map = mtd->priv;
1870         struct cfi_private *cfi = map->fldrv_priv;
1871         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1872         int ret = 0;
1873         int chipnum;
1874         unsigned long ofs, vec_seek, i;
1875         size_t len = 0;
1876
1877         for (i = 0; i < count; i++)
1878                 len += vecs[i].iov_len;
1879
1880         if (!len)
1881                 return 0;
1882
1883         chipnum = to >> cfi->chipshift;
1884         ofs = to - (chipnum << cfi->chipshift);
1885         vec_seek = 0;
1886
1887         do {
1888                 /* We must not cross write block boundaries */
1889                 int size = wbufsize - (ofs & (wbufsize-1));
1890
1891                 if (size > len)
1892                         size = len;
1893                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1894                                       ofs, &vecs, &vec_seek, size);
1895                 if (ret)
1896                         return ret;
1897
1898                 ofs += size;
1899                 (*retlen) += size;
1900                 len -= size;
1901
1902                 if (ofs >> cfi->chipshift) {
1903                         chipnum ++;
1904                         ofs = 0;
1905                         if (chipnum == cfi->numchips)
1906                                 return 0;
1907                 }
1908
1909                 /* Be nice and reschedule with the chip in a usable state for other
1910                    processes. */
1911                 cond_resched();
1912
1913         } while (len);
1914
1915         return 0;
1916 }
1917
1918 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1919                                        size_t len, size_t *retlen, const u_char *buf)
1920 {
1921         struct kvec vec;
1922
1923         vec.iov_base = (void *) buf;
1924         vec.iov_len = len;
1925
1926         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1927 }
1928
1929 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1930                                       unsigned long adr, int len, void *thunk)
1931 {
1932         struct cfi_private *cfi = map->fldrv_priv;
1933         map_word status;
1934         int retries = 3;
1935         int ret;
1936
1937         adr += chip->start;
1938
1939  retry:
1940         mutex_lock(&chip->mutex);
1941         ret = get_chip(map, chip, adr, FL_ERASING);
1942         if (ret) {
1943                 mutex_unlock(&chip->mutex);
1944                 return ret;
1945         }
1946
1947         XIP_INVAL_CACHED_RANGE(map, adr, len);
1948         ENABLE_VPP(map);
1949         xip_disable(map, chip, adr);
1950
1951         /* Clear the status register first */
1952         map_write(map, CMD(0x50), adr);
1953
1954         /* Now erase */
1955         map_write(map, CMD(0x20), adr);
1956         map_write(map, CMD(0xD0), adr);
1957         chip->state = FL_ERASING;
1958         chip->erase_suspended = 0;
1959         chip->in_progress_block_addr = adr;
1960         chip->in_progress_block_mask = ~(len - 1);
1961
1962         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1963                                    adr, len,
1964                                    chip->erase_time,
1965                                    chip->erase_time_max);
1966         if (ret) {
1967                 map_write(map, CMD(0x70), adr);
1968                 chip->state = FL_STATUS;
1969                 xip_enable(map, chip, adr);
1970                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1971                 goto out;
1972         }
1973
1974         /* We've broken this before. It doesn't hurt to be safe */
1975         map_write(map, CMD(0x70), adr);
1976         chip->state = FL_STATUS;
1977         status = map_read(map, adr);
1978
1979         /* check for errors */
1980         if (map_word_bitsset(map, status, CMD(0x3a))) {
1981                 unsigned long chipstatus = MERGESTATUS(status);
1982
1983                 /* Reset the error bits */
1984                 map_write(map, CMD(0x50), adr);
1985                 map_write(map, CMD(0x70), adr);
1986                 xip_enable(map, chip, adr);
1987
1988                 if ((chipstatus & 0x30) == 0x30) {
1989                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1990                         ret = -EINVAL;
1991                 } else if (chipstatus & 0x02) {
1992                         /* Protection bit set */
1993                         ret = -EROFS;
1994                 } else if (chipstatus & 0x8) {
1995                         /* Voltage */
1996                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1997                         ret = -EIO;
1998                 } else if (chipstatus & 0x20 && retries--) {
1999                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
2000                         DISABLE_VPP(map);
2001                         put_chip(map, chip, adr);
2002                         mutex_unlock(&chip->mutex);
2003                         goto retry;
2004                 } else {
2005                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
2006                         ret = -EIO;
2007                 }
2008
2009                 goto out;
2010         }
2011
2012         xip_enable(map, chip, adr);
2013  out:   DISABLE_VPP(map);
2014         put_chip(map, chip, adr);
2015         mutex_unlock(&chip->mutex);
2016         return ret;
2017 }
2018
2019 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2020 {
2021         return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2022                                 instr->len, NULL);
2023 }
2024
2025 static void cfi_intelext_sync (struct mtd_info *mtd)
2026 {
2027         struct map_info *map = mtd->priv;
2028         struct cfi_private *cfi = map->fldrv_priv;
2029         int i;
2030         struct flchip *chip;
2031         int ret = 0;
2032
2033         for (i=0; !ret && i<cfi->numchips; i++) {
2034                 chip = &cfi->chips[i];
2035
2036                 mutex_lock(&chip->mutex);
2037                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2038
2039                 if (!ret) {
2040                         chip->oldstate = chip->state;
2041                         chip->state = FL_SYNCING;
2042                         /* No need to wake_up() on this state change -
2043                          * as the whole point is that nobody can do anything
2044                          * with the chip now anyway.
2045                          */
2046                 }
2047                 mutex_unlock(&chip->mutex);
2048         }
2049
2050         /* Unlock the chips again */
2051
2052         for (i--; i >=0; i--) {
2053                 chip = &cfi->chips[i];
2054
2055                 mutex_lock(&chip->mutex);
2056
2057                 if (chip->state == FL_SYNCING) {
2058                         chip->state = chip->oldstate;
2059                         chip->oldstate = FL_READY;
2060                         wake_up(&chip->wq);
2061                 }
2062                 mutex_unlock(&chip->mutex);
2063         }
2064 }
2065
2066 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2067                                                 struct flchip *chip,
2068                                                 unsigned long adr,
2069                                                 int len, void *thunk)
2070 {
2071         struct cfi_private *cfi = map->fldrv_priv;
2072         int status, ofs_factor = cfi->interleave * cfi->device_type;
2073
2074         adr += chip->start;
2075         xip_disable(map, chip, adr+(2*ofs_factor));
2076         map_write(map, CMD(0x90), adr+(2*ofs_factor));
2077         chip->state = FL_JEDEC_QUERY;
2078         status = cfi_read_query(map, adr+(2*ofs_factor));
2079         xip_enable(map, chip, 0);
2080         return status;
2081 }
2082
2083 #ifdef DEBUG_LOCK_BITS
2084 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2085                                                 struct flchip *chip,
2086                                                 unsigned long adr,
2087                                                 int len, void *thunk)
2088 {
2089         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2090                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2091         return 0;
2092 }
2093 #endif
2094
2095 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
2096 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
2097
2098 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2099                                        unsigned long adr, int len, void *thunk)
2100 {
2101         struct cfi_private *cfi = map->fldrv_priv;
2102         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2103         int mdelay;
2104         int ret;
2105
2106         adr += chip->start;
2107
2108         mutex_lock(&chip->mutex);
2109         ret = get_chip(map, chip, adr, FL_LOCKING);
2110         if (ret) {
2111                 mutex_unlock(&chip->mutex);
2112                 return ret;
2113         }
2114
2115         ENABLE_VPP(map);
2116         xip_disable(map, chip, adr);
2117
2118         map_write(map, CMD(0x60), adr);
2119         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2120                 map_write(map, CMD(0x01), adr);
2121                 chip->state = FL_LOCKING;
2122         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2123                 map_write(map, CMD(0xD0), adr);
2124                 chip->state = FL_UNLOCKING;
2125         } else
2126                 BUG();
2127
2128         /*
2129          * If Instant Individual Block Locking supported then no need
2130          * to delay.
2131          */
2132         /*
2133          * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2134          * lets use a max of 1.5 seconds (1500ms) as timeout.
2135          *
2136          * See "Clear Block Lock-Bits Time" on page 40 in
2137          * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2138          * from February 2003
2139          */
2140         mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2141
2142         ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2143         if (ret) {
2144                 map_write(map, CMD(0x70), adr);
2145                 chip->state = FL_STATUS;
2146                 xip_enable(map, chip, adr);
2147                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2148                 goto out;
2149         }
2150
2151         xip_enable(map, chip, adr);
2152  out:   DISABLE_VPP(map);
2153         put_chip(map, chip, adr);
2154         mutex_unlock(&chip->mutex);
2155         return ret;
2156 }
2157
2158 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2159 {
2160         int ret;
2161
2162 #ifdef DEBUG_LOCK_BITS
2163         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2164                __func__, ofs, len);
2165         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2166                 ofs, len, NULL);
2167 #endif
2168
2169         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2170                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2171
2172 #ifdef DEBUG_LOCK_BITS
2173         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2174                __func__, ret);
2175         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2176                 ofs, len, NULL);
2177 #endif
2178
2179         return ret;
2180 }
2181
2182 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2183 {
2184         int ret;
2185
2186 #ifdef DEBUG_LOCK_BITS
2187         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2188                __func__, ofs, len);
2189         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2190                 ofs, len, NULL);
2191 #endif
2192
2193         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2194                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2195
2196 #ifdef DEBUG_LOCK_BITS
2197         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2198                __func__, ret);
2199         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2200                 ofs, len, NULL);
2201 #endif
2202
2203         return ret;
2204 }
2205
2206 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2207                                   uint64_t len)
2208 {
2209         return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2210                                 ofs, len, NULL) ? 1 : 0;
2211 }
2212
2213 #ifdef CONFIG_MTD_OTP
2214
2215 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2216                         u_long data_offset, u_char *buf, u_int size,
2217                         u_long prot_offset, u_int groupno, u_int groupsize);
2218
2219 static int __xipram
2220 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2221             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2222 {
2223         struct cfi_private *cfi = map->fldrv_priv;
2224         int ret;
2225
2226         mutex_lock(&chip->mutex);
2227         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2228         if (ret) {
2229                 mutex_unlock(&chip->mutex);
2230                 return ret;
2231         }
2232
2233         /* let's ensure we're not reading back cached data from array mode */
2234         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2235
2236         xip_disable(map, chip, chip->start);
2237         if (chip->state != FL_JEDEC_QUERY) {
2238                 map_write(map, CMD(0x90), chip->start);
2239                 chip->state = FL_JEDEC_QUERY;
2240         }
2241         map_copy_from(map, buf, chip->start + offset, size);
2242         xip_enable(map, chip, chip->start);
2243
2244         /* then ensure we don't keep OTP data in the cache */
2245         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2246
2247         put_chip(map, chip, chip->start);
2248         mutex_unlock(&chip->mutex);
2249         return 0;
2250 }
2251
2252 static int
2253 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2254              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2255 {
2256         int ret;
2257
2258         while (size) {
2259                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2260                 int gap = offset - bus_ofs;
2261                 int n = min_t(int, size, map_bankwidth(map)-gap);
2262                 map_word datum = map_word_ff(map);
2263
2264                 datum = map_word_load_partial(map, datum, buf, gap, n);
2265                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2266                 if (ret)
2267                         return ret;
2268
2269                 offset += n;
2270                 buf += n;
2271                 size -= n;
2272         }
2273
2274         return 0;
2275 }
2276
2277 static int
2278 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2279             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2280 {
2281         struct cfi_private *cfi = map->fldrv_priv;
2282         map_word datum;
2283
2284         /* make sure area matches group boundaries */
2285         if (size != grpsz)
2286                 return -EXDEV;
2287
2288         datum = map_word_ff(map);
2289         datum = map_word_clr(map, datum, CMD(1 << grpno));
2290         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2291 }
2292
2293 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2294                                  size_t *retlen, u_char *buf,
2295                                  otp_op_t action, int user_regs)
2296 {
2297         struct map_info *map = mtd->priv;
2298         struct cfi_private *cfi = map->fldrv_priv;
2299         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2300         struct flchip *chip;
2301         struct cfi_intelext_otpinfo *otp;
2302         u_long devsize, reg_prot_offset, data_offset;
2303         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2304         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2305         int ret;
2306
2307         *retlen = 0;
2308
2309         /* Check that we actually have some OTP registers */
2310         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2311                 return -ENODATA;
2312
2313         /* we need real chips here not virtual ones */
2314         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2315         chip_step = devsize >> cfi->chipshift;
2316         chip_num = 0;
2317
2318         /* Some chips have OTP located in the _top_ partition only.
2319            For example: Intel 28F256L18T (T means top-parameter device) */
2320         if (cfi->mfr == CFI_MFR_INTEL) {
2321                 switch (cfi->id) {
2322                 case 0x880b:
2323                 case 0x880c:
2324                 case 0x880d:
2325                         chip_num = chip_step - 1;
2326                 }
2327         }
2328
2329         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2330                 chip = &cfi->chips[chip_num];
2331                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2332
2333                 /* first OTP region */
2334                 field = 0;
2335                 reg_prot_offset = extp->ProtRegAddr;
2336                 reg_fact_groups = 1;
2337                 reg_fact_size = 1 << extp->FactProtRegSize;
2338                 reg_user_groups = 1;
2339                 reg_user_size = 1 << extp->UserProtRegSize;
2340
2341                 while (len > 0) {
2342                         /* flash geometry fixup */
2343                         data_offset = reg_prot_offset + 1;
2344                         data_offset *= cfi->interleave * cfi->device_type;
2345                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2346                         reg_fact_size *= cfi->interleave;
2347                         reg_user_size *= cfi->interleave;
2348
2349                         if (user_regs) {
2350                                 groups = reg_user_groups;
2351                                 groupsize = reg_user_size;
2352                                 /* skip over factory reg area */
2353                                 groupno = reg_fact_groups;
2354                                 data_offset += reg_fact_groups * reg_fact_size;
2355                         } else {
2356                                 groups = reg_fact_groups;
2357                                 groupsize = reg_fact_size;
2358                                 groupno = 0;
2359                         }
2360
2361                         while (len > 0 && groups > 0) {
2362                                 if (!action) {
2363                                         /*
2364                                          * Special case: if action is NULL
2365                                          * we fill buf with otp_info records.
2366                                          */
2367                                         struct otp_info *otpinfo;
2368                                         map_word lockword;
2369                                         len -= sizeof(struct otp_info);
2370                                         if (len <= 0)
2371                                                 return -ENOSPC;
2372                                         ret = do_otp_read(map, chip,
2373                                                           reg_prot_offset,
2374                                                           (u_char *)&lockword,
2375                                                           map_bankwidth(map),
2376                                                           0, 0,  0);
2377                                         if (ret)
2378                                                 return ret;
2379                                         otpinfo = (struct otp_info *)buf;
2380                                         otpinfo->start = from;
2381                                         otpinfo->length = groupsize;
2382                                         otpinfo->locked =
2383                                            !map_word_bitsset(map, lockword,
2384                                                              CMD(1 << groupno));
2385                                         from += groupsize;
2386                                         buf += sizeof(*otpinfo);
2387                                         *retlen += sizeof(*otpinfo);
2388                                 } else if (from >= groupsize) {
2389                                         from -= groupsize;
2390                                         data_offset += groupsize;
2391                                 } else {
2392                                         int size = groupsize;
2393                                         data_offset += from;
2394                                         size -= from;
2395                                         from = 0;
2396                                         if (size > len)
2397                                                 size = len;
2398                                         ret = action(map, chip, data_offset,
2399                                                      buf, size, reg_prot_offset,
2400                                                      groupno, groupsize);
2401                                         if (ret < 0)
2402                                                 return ret;
2403                                         buf += size;
2404                                         len -= size;
2405                                         *retlen += size;
2406                                         data_offset += size;
2407                                 }
2408                                 groupno++;
2409                                 groups--;
2410                         }
2411
2412                         /* next OTP region */
2413                         if (++field == extp->NumProtectionFields)
2414                                 break;
2415                         reg_prot_offset = otp->ProtRegAddr;
2416                         reg_fact_groups = otp->FactGroups;
2417                         reg_fact_size = 1 << otp->FactProtRegSize;
2418                         reg_user_groups = otp->UserGroups;
2419                         reg_user_size = 1 << otp->UserProtRegSize;
2420                         otp++;
2421                 }
2422         }
2423
2424         return 0;
2425 }
2426
2427 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2428                                            size_t len, size_t *retlen,
2429                                             u_char *buf)
2430 {
2431         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2432                                      buf, do_otp_read, 0);
2433 }
2434
2435 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2436                                            size_t len, size_t *retlen,
2437                                             u_char *buf)
2438 {
2439         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2440                                      buf, do_otp_read, 1);
2441 }
2442
2443 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2444                                             size_t len, size_t *retlen,
2445                                              u_char *buf)
2446 {
2447         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2448                                      buf, do_otp_write, 1);
2449 }
2450
2451 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2452                                            loff_t from, size_t len)
2453 {
2454         size_t retlen;
2455         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2456                                      NULL, do_otp_lock, 1);
2457 }
2458
2459 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2460                                            size_t *retlen, struct otp_info *buf)
2461
2462 {
2463         return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2464                                      NULL, 0);
2465 }
2466
2467 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2468                                            size_t *retlen, struct otp_info *buf)
2469 {
2470         return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2471                                      NULL, 1);
2472 }
2473
2474 #endif
2475
2476 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2477 {
2478         struct mtd_erase_region_info *region;
2479         int block, status, i;
2480         unsigned long adr;
2481         size_t len;
2482
2483         for (i = 0; i < mtd->numeraseregions; i++) {
2484                 region = &mtd->eraseregions[i];
2485                 if (!region->lockmap)
2486                         continue;
2487
2488                 for (block = 0; block < region->numblocks; block++){
2489                         len = region->erasesize;
2490                         adr = region->offset + block * len;
2491
2492                         status = cfi_varsize_frob(mtd,
2493                                         do_getlockstatus_oneblock, adr, len, NULL);
2494                         if (status)
2495                                 set_bit(block, region->lockmap);
2496                         else
2497                                 clear_bit(block, region->lockmap);
2498                 }
2499         }
2500 }
2501
2502 static int cfi_intelext_suspend(struct mtd_info *mtd)
2503 {
2504         struct map_info *map = mtd->priv;
2505         struct cfi_private *cfi = map->fldrv_priv;
2506         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2507         int i;
2508         struct flchip *chip;
2509         int ret = 0;
2510
2511         if ((mtd->flags & MTD_POWERUP_LOCK)
2512             && extp && (extp->FeatureSupport & (1 << 5)))
2513                 cfi_intelext_save_locks(mtd);
2514
2515         for (i=0; !ret && i<cfi->numchips; i++) {
2516                 chip = &cfi->chips[i];
2517
2518                 mutex_lock(&chip->mutex);
2519
2520                 switch (chip->state) {
2521                 case FL_READY:
2522                 case FL_STATUS:
2523                 case FL_CFI_QUERY:
2524                 case FL_JEDEC_QUERY:
2525                         if (chip->oldstate == FL_READY) {
2526                                 /* place the chip in a known state before suspend */
2527                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2528                                 chip->oldstate = chip->state;
2529                                 chip->state = FL_PM_SUSPENDED;
2530                                 /* No need to wake_up() on this state change -
2531                                  * as the whole point is that nobody can do anything
2532                                  * with the chip now anyway.
2533                                  */
2534                         } else {
2535                                 /* There seems to be an operation pending. We must wait for it. */
2536                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2537                                 ret = -EAGAIN;
2538                         }
2539                         break;
2540                 default:
2541                         /* Should we actually wait? Once upon a time these routines weren't
2542                            allowed to. Or should we return -EAGAIN, because the upper layers
2543                            ought to have already shut down anything which was using the device
2544                            anyway? The latter for now. */
2545                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2546                         ret = -EAGAIN;
2547                 case FL_PM_SUSPENDED:
2548                         break;
2549                 }
2550                 mutex_unlock(&chip->mutex);
2551         }
2552
2553         /* Unlock the chips again */
2554
2555         if (ret) {
2556                 for (i--; i >=0; i--) {
2557                         chip = &cfi->chips[i];
2558
2559                         mutex_lock(&chip->mutex);
2560
2561                         if (chip->state == FL_PM_SUSPENDED) {
2562                                 /* No need to force it into a known state here,
2563                                    because we're returning failure, and it didn't
2564                                    get power cycled */
2565                                 chip->state = chip->oldstate;
2566                                 chip->oldstate = FL_READY;
2567                                 wake_up(&chip->wq);
2568                         }
2569                         mutex_unlock(&chip->mutex);
2570                 }
2571         }
2572
2573         return ret;
2574 }
2575
2576 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2577 {
2578         struct mtd_erase_region_info *region;
2579         int block, i;
2580         unsigned long adr;
2581         size_t len;
2582
2583         for (i = 0; i < mtd->numeraseregions; i++) {
2584                 region = &mtd->eraseregions[i];
2585                 if (!region->lockmap)
2586                         continue;
2587
2588                 for_each_clear_bit(block, region->lockmap, region->numblocks) {
2589                         len = region->erasesize;
2590                         adr = region->offset + block * len;
2591                         cfi_intelext_unlock(mtd, adr, len);
2592                 }
2593         }
2594 }
2595
2596 static void cfi_intelext_resume(struct mtd_info *mtd)
2597 {
2598         struct map_info *map = mtd->priv;
2599         struct cfi_private *cfi = map->fldrv_priv;
2600         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2601         int i;
2602         struct flchip *chip;
2603
2604         for (i=0; i<cfi->numchips; i++) {
2605
2606                 chip = &cfi->chips[i];
2607
2608                 mutex_lock(&chip->mutex);
2609
2610                 /* Go to known state. Chip may have been power cycled */
2611                 if (chip->state == FL_PM_SUSPENDED) {
2612                         /* Refresh LH28F640BF Partition Config. Register */
2613                         fixup_LH28F640BF(mtd);
2614                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2615                         chip->oldstate = chip->state = FL_READY;
2616                         wake_up(&chip->wq);
2617                 }
2618
2619                 mutex_unlock(&chip->mutex);
2620         }
2621
2622         if ((mtd->flags & MTD_POWERUP_LOCK)
2623             && extp && (extp->FeatureSupport & (1 << 5)))
2624                 cfi_intelext_restore_locks(mtd);
2625 }
2626
2627 static int cfi_intelext_reset(struct mtd_info *mtd)
2628 {
2629         struct map_info *map = mtd->priv;
2630         struct cfi_private *cfi = map->fldrv_priv;
2631         int i, ret;
2632
2633         for (i=0; i < cfi->numchips; i++) {
2634                 struct flchip *chip = &cfi->chips[i];
2635
2636                 /* force the completion of any ongoing operation
2637                    and switch to array mode so any bootloader in
2638                    flash is accessible for soft reboot. */
2639                 mutex_lock(&chip->mutex);
2640                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2641                 if (!ret) {
2642                         map_write(map, CMD(0xff), chip->start);
2643                         chip->state = FL_SHUTDOWN;
2644                         put_chip(map, chip, chip->start);
2645                 }
2646                 mutex_unlock(&chip->mutex);
2647         }
2648
2649         return 0;
2650 }
2651
2652 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2653                                void *v)
2654 {
2655         struct mtd_info *mtd;
2656
2657         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2658         cfi_intelext_reset(mtd);
2659         return NOTIFY_DONE;
2660 }
2661
2662 static void cfi_intelext_destroy(struct mtd_info *mtd)
2663 {
2664         struct map_info *map = mtd->priv;
2665         struct cfi_private *cfi = map->fldrv_priv;
2666         struct mtd_erase_region_info *region;
2667         int i;
2668         cfi_intelext_reset(mtd);
2669         unregister_reboot_notifier(&mtd->reboot_notifier);
2670         kfree(cfi->cmdset_priv);
2671         kfree(cfi->cfiq);
2672         kfree(cfi->chips[0].priv);
2673         kfree(cfi);
2674         for (i = 0; i < mtd->numeraseregions; i++) {
2675                 region = &mtd->eraseregions[i];
2676                 kfree(region->lockmap);
2677         }
2678         kfree(mtd->eraseregions);
2679 }
2680
2681 MODULE_LICENSE("GPL");
2682 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2683 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2684 MODULE_ALIAS("cfi_cmdset_0003");
2685 MODULE_ALIAS("cfi_cmdset_0200");