5157e3cb4b9e73f13609baf126ef6a40561c3beb
[linux-2.6-block.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000   Nicolas Pitre <nico@cam.org>
9  *      - completely revamped method functions so they are aware and
10  *        independent of the flash geometry (buswidth, interleave, etc.)
11  *      - scalability vs code size is completely set at compile-time
12  *        (see include/linux/mtd/cfi.h for selection)
13  *      - optimized write buffer method
14  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *      - reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  *      - auto unlock sectors on resume for auto locking flash on power up
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/bitmap.h>
34 #include <linux/mtd/xip.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/compatmac.h>
38 #include <linux/mtd/cfi.h>
39
40 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
41 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
42
43 // debugging, turns off buffer write mode if set to 1
44 #define FORCE_WORD_WRITE 0
45
46 #define MANUFACTURER_INTEL      0x0089
47 #define I82802AB        0x00ad
48 #define I82802AC        0x00ac
49 #define MANUFACTURER_ST         0x0020
50 #define M50LPW080       0x002F
51 #define M50FLW080A      0x0080
52 #define M50FLW080B      0x0081
53 #define AT49BV640D      0x02de
54
55 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
56 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
59 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
60 static void cfi_intelext_sync (struct mtd_info *);
61 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
62 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
63 #ifdef CONFIG_MTD_OTP
64 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
65 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
66 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
67 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
68 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
69                                             struct otp_info *, size_t);
70 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
71                                             struct otp_info *, size_t);
72 #endif
73 static int cfi_intelext_suspend (struct mtd_info *);
74 static void cfi_intelext_resume (struct mtd_info *);
75 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
76
77 static void cfi_intelext_destroy(struct mtd_info *);
78
79 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
80
81 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
82 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
83
84 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
85                      size_t *retlen, void **virt, resource_size_t *phys);
86 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
87
88 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
89 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
90 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
91 #include "fwh_lock.h"
92
93
94
95 /*
96  *  *********** SETUP AND PROBE BITS  ***********
97  */
98
99 static struct mtd_chip_driver cfi_intelext_chipdrv = {
100         .probe          = NULL, /* Not usable directly */
101         .destroy        = cfi_intelext_destroy,
102         .name           = "cfi_cmdset_0001",
103         .module         = THIS_MODULE
104 };
105
106 /* #define DEBUG_LOCK_BITS */
107 /* #define DEBUG_CFI_FEATURES */
108
109 #ifdef DEBUG_CFI_FEATURES
110 static void cfi_tell_features(struct cfi_pri_intelext *extp)
111 {
112         int i;
113         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
114         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
115         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
116         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
117         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
118         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
119         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
120         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
121         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
122         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
123         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
124         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
125         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
126         for (i=11; i<32; i++) {
127                 if (extp->FeatureSupport & (1<<i))
128                         printk("     - Unknown Bit %X:      supported\n", i);
129         }
130
131         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
132         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
133         for (i=1; i<8; i++) {
134                 if (extp->SuspendCmdSupport & (1<<i))
135                         printk("     - Unknown Bit %X:               supported\n", i);
136         }
137
138         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
139         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
140         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
141         for (i=2; i<3; i++) {
142                 if (extp->BlkStatusRegMask & (1<<i))
143                         printk("     - Unknown Bit %X Active: yes\n",i);
144         }
145         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
146         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
147         for (i=6; i<16; i++) {
148                 if (extp->BlkStatusRegMask & (1<<i))
149                         printk("     - Unknown Bit %X Active: yes\n",i);
150         }
151
152         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
153                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
154         if (extp->VppOptimal)
155                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
156                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
157 }
158 #endif
159
160 /* Atmel chips don't use the same PRI format as Intel chips */
161 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
162 {
163         struct map_info *map = mtd->priv;
164         struct cfi_private *cfi = map->fldrv_priv;
165         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
166         struct cfi_pri_atmel atmel_pri;
167         uint32_t features = 0;
168
169         /* Reverse byteswapping */
170         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
171         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
172         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
173
174         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
175         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
176
177         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
178
179         if (atmel_pri.Features & 0x01) /* chip erase supported */
180                 features |= (1<<0);
181         if (atmel_pri.Features & 0x02) /* erase suspend supported */
182                 features |= (1<<1);
183         if (atmel_pri.Features & 0x04) /* program suspend supported */
184                 features |= (1<<2);
185         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
186                 features |= (1<<9);
187         if (atmel_pri.Features & 0x20) /* page mode read supported */
188                 features |= (1<<7);
189         if (atmel_pri.Features & 0x40) /* queued erase supported */
190                 features |= (1<<4);
191         if (atmel_pri.Features & 0x80) /* Protection bits supported */
192                 features |= (1<<6);
193
194         extp->FeatureSupport = features;
195
196         /* burst write mode not supported */
197         cfi->cfiq->BufWriteTimeoutTyp = 0;
198         cfi->cfiq->BufWriteTimeoutMax = 0;
199 }
200
201 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
202 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
203 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
204 {
205         struct map_info *map = mtd->priv;
206         struct cfi_private *cfi = map->fldrv_priv;
207         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
208
209         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
210                             "erase on write disabled.\n");
211         extp->SuspendCmdSupport &= ~1;
212 }
213 #endif
214
215 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
216 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
217 {
218         struct map_info *map = mtd->priv;
219         struct cfi_private *cfi = map->fldrv_priv;
220         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
221
222         if (cfip && (cfip->FeatureSupport&4)) {
223                 cfip->FeatureSupport &= ~4;
224                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
225         }
226 }
227 #endif
228
229 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
230 {
231         struct map_info *map = mtd->priv;
232         struct cfi_private *cfi = map->fldrv_priv;
233
234         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
235         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
236 }
237
238 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
239 {
240         struct map_info *map = mtd->priv;
241         struct cfi_private *cfi = map->fldrv_priv;
242
243         /* Note this is done after the region info is endian swapped */
244         cfi->cfiq->EraseRegionInfo[1] =
245                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
246 };
247
248 static void fixup_use_point(struct mtd_info *mtd, void *param)
249 {
250         struct map_info *map = mtd->priv;
251         if (!mtd->point && map_is_linear(map)) {
252                 mtd->point   = cfi_intelext_point;
253                 mtd->unpoint = cfi_intelext_unpoint;
254         }
255 }
256
257 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
258 {
259         struct map_info *map = mtd->priv;
260         struct cfi_private *cfi = map->fldrv_priv;
261         if (cfi->cfiq->BufWriteTimeoutTyp) {
262                 printk(KERN_INFO "Using buffer write method\n" );
263                 mtd->write = cfi_intelext_write_buffers;
264                 mtd->writev = cfi_intelext_writev;
265         }
266 }
267
268 /*
269  * Some chips power-up with all sectors locked by default.
270  */
271 static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
272 {
273         struct map_info *map = mtd->priv;
274         struct cfi_private *cfi = map->fldrv_priv;
275         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
276
277         if (cfip->FeatureSupport&32) {
278                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
279                 mtd->flags |= MTD_POWERUP_LOCK;
280         }
281 }
282
283 static struct cfi_fixup cfi_fixup_table[] = {
284         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
285 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
286         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
287 #endif
288 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
289         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
290 #endif
291 #if !FORCE_WORD_WRITE
292         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
293 #endif
294         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
295         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
296         { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
297         { 0, 0, NULL, NULL }
298 };
299
300 static struct cfi_fixup jedec_fixup_table[] = {
301         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
302         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
303         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
304         { MANUFACTURER_ST,    M50FLW080A, fixup_use_fwh_lock, NULL, },
305         { MANUFACTURER_ST,    M50FLW080B, fixup_use_fwh_lock, NULL, },
306         { 0, 0, NULL, NULL }
307 };
308 static struct cfi_fixup fixup_table[] = {
309         /* The CFI vendor ids and the JEDEC vendor IDs appear
310          * to be common.  It is like the devices id's are as
311          * well.  This table is to pick all cases where
312          * we know that is the case.
313          */
314         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
315         { 0, 0, NULL, NULL }
316 };
317
318 static inline struct cfi_pri_intelext *
319 read_pri_intelext(struct map_info *map, __u16 adr)
320 {
321         struct cfi_pri_intelext *extp;
322         unsigned int extp_size = sizeof(*extp);
323
324  again:
325         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
326         if (!extp)
327                 return NULL;
328
329         if (extp->MajorVersion != '1' ||
330             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
331                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
332                        "version %c.%c.\n",  extp->MajorVersion,
333                        extp->MinorVersion);
334                 kfree(extp);
335                 return NULL;
336         }
337
338         /* Do some byteswapping if necessary */
339         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
340         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
341         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
342
343         if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
344                 unsigned int extra_size = 0;
345                 int nb_parts, i;
346
347                 /* Protection Register info */
348                 extra_size += (extp->NumProtectionFields - 1) *
349                               sizeof(struct cfi_intelext_otpinfo);
350
351                 /* Burst Read info */
352                 extra_size += 2;
353                 if (extp_size < sizeof(*extp) + extra_size)
354                         goto need_more;
355                 extra_size += extp->extra[extra_size-1];
356
357                 /* Number of hardware-partitions */
358                 extra_size += 1;
359                 if (extp_size < sizeof(*extp) + extra_size)
360                         goto need_more;
361                 nb_parts = extp->extra[extra_size - 1];
362
363                 /* skip the sizeof(partregion) field in CFI 1.4 */
364                 if (extp->MinorVersion >= '4')
365                         extra_size += 2;
366
367                 for (i = 0; i < nb_parts; i++) {
368                         struct cfi_intelext_regioninfo *rinfo;
369                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
370                         extra_size += sizeof(*rinfo);
371                         if (extp_size < sizeof(*extp) + extra_size)
372                                 goto need_more;
373                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
374                         extra_size += (rinfo->NumBlockTypes - 1)
375                                       * sizeof(struct cfi_intelext_blockinfo);
376                 }
377
378                 if (extp->MinorVersion >= '4')
379                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
380
381                 if (extp_size < sizeof(*extp) + extra_size) {
382                         need_more:
383                         extp_size = sizeof(*extp) + extra_size;
384                         kfree(extp);
385                         if (extp_size > 4096) {
386                                 printk(KERN_ERR
387                                         "%s: cfi_pri_intelext is too fat\n",
388                                         __func__);
389                                 return NULL;
390                         }
391                         goto again;
392                 }
393         }
394
395         return extp;
396 }
397
398 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
399 {
400         struct cfi_private *cfi = map->fldrv_priv;
401         struct mtd_info *mtd;
402         int i;
403
404         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
405         if (!mtd) {
406                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
407                 return NULL;
408         }
409         mtd->priv = map;
410         mtd->type = MTD_NORFLASH;
411
412         /* Fill in the default mtd operations */
413         mtd->erase   = cfi_intelext_erase_varsize;
414         mtd->read    = cfi_intelext_read;
415         mtd->write   = cfi_intelext_write_words;
416         mtd->sync    = cfi_intelext_sync;
417         mtd->lock    = cfi_intelext_lock;
418         mtd->unlock  = cfi_intelext_unlock;
419         mtd->suspend = cfi_intelext_suspend;
420         mtd->resume  = cfi_intelext_resume;
421         mtd->flags   = MTD_CAP_NORFLASH;
422         mtd->name    = map->name;
423         mtd->writesize = 1;
424
425         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
426
427         if (cfi->cfi_mode == CFI_MODE_CFI) {
428                 /*
429                  * It's a real CFI chip, not one for which the probe
430                  * routine faked a CFI structure. So we read the feature
431                  * table from it.
432                  */
433                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
434                 struct cfi_pri_intelext *extp;
435
436                 extp = read_pri_intelext(map, adr);
437                 if (!extp) {
438                         kfree(mtd);
439                         return NULL;
440                 }
441
442                 /* Install our own private info structure */
443                 cfi->cmdset_priv = extp;
444
445                 cfi_fixup(mtd, cfi_fixup_table);
446
447 #ifdef DEBUG_CFI_FEATURES
448                 /* Tell the user about it in lots of lovely detail */
449                 cfi_tell_features(extp);
450 #endif
451
452                 if(extp->SuspendCmdSupport & 1) {
453                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
454                 }
455         }
456         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
457                 /* Apply jedec specific fixups */
458                 cfi_fixup(mtd, jedec_fixup_table);
459         }
460         /* Apply generic fixups */
461         cfi_fixup(mtd, fixup_table);
462
463         for (i=0; i< cfi->numchips; i++) {
464                 if (cfi->cfiq->WordWriteTimeoutTyp)
465                         cfi->chips[i].word_write_time =
466                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
467                 else
468                         cfi->chips[i].word_write_time = 50000;
469
470                 if (cfi->cfiq->BufWriteTimeoutTyp)
471                         cfi->chips[i].buffer_write_time =
472                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
473                 /* No default; if it isn't specified, we won't use it */
474
475                 if (cfi->cfiq->BlockEraseTimeoutTyp)
476                         cfi->chips[i].erase_time =
477                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
478                 else
479                         cfi->chips[i].erase_time = 2000000;
480
481                 if (cfi->cfiq->WordWriteTimeoutTyp &&
482                     cfi->cfiq->WordWriteTimeoutMax)
483                         cfi->chips[i].word_write_time_max =
484                                 1<<(cfi->cfiq->WordWriteTimeoutTyp +
485                                     cfi->cfiq->WordWriteTimeoutMax);
486                 else
487                         cfi->chips[i].word_write_time_max = 50000 * 8;
488
489                 if (cfi->cfiq->BufWriteTimeoutTyp &&
490                     cfi->cfiq->BufWriteTimeoutMax)
491                         cfi->chips[i].buffer_write_time_max =
492                                 1<<(cfi->cfiq->BufWriteTimeoutTyp +
493                                     cfi->cfiq->BufWriteTimeoutMax);
494
495                 if (cfi->cfiq->BlockEraseTimeoutTyp &&
496                     cfi->cfiq->BlockEraseTimeoutMax)
497                         cfi->chips[i].erase_time_max =
498                                 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
499                                        cfi->cfiq->BlockEraseTimeoutMax);
500                 else
501                         cfi->chips[i].erase_time_max = 2000000 * 8;
502
503                 cfi->chips[i].ref_point_counter = 0;
504                 init_waitqueue_head(&(cfi->chips[i].wq));
505         }
506
507         map->fldrv = &cfi_intelext_chipdrv;
508
509         return cfi_intelext_setup(mtd);
510 }
511 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
512 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
513 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
514 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
515 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
516
517 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
518 {
519         struct map_info *map = mtd->priv;
520         struct cfi_private *cfi = map->fldrv_priv;
521         unsigned long offset = 0;
522         int i,j;
523         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
524
525         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
526
527         mtd->size = devsize * cfi->numchips;
528
529         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
530         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
531                         * mtd->numeraseregions, GFP_KERNEL);
532         if (!mtd->eraseregions) {
533                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
534                 goto setup_err;
535         }
536
537         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
538                 unsigned long ernum, ersize;
539                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
540                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
541
542                 if (mtd->erasesize < ersize) {
543                         mtd->erasesize = ersize;
544                 }
545                 for (j=0; j<cfi->numchips; j++) {
546                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
547                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
548                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
549                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
550                 }
551                 offset += (ersize * ernum);
552         }
553
554         if (offset != devsize) {
555                 /* Argh */
556                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
557                 goto setup_err;
558         }
559
560         for (i=0; i<mtd->numeraseregions;i++){
561                 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
562                        i,mtd->eraseregions[i].offset,
563                        mtd->eraseregions[i].erasesize,
564                        mtd->eraseregions[i].numblocks);
565         }
566
567 #ifdef CONFIG_MTD_OTP
568         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
569         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
570         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
571         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
572         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
573         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
574 #endif
575
576         /* This function has the potential to distort the reality
577            a bit and therefore should be called last. */
578         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
579                 goto setup_err;
580
581         __module_get(THIS_MODULE);
582         register_reboot_notifier(&mtd->reboot_notifier);
583         return mtd;
584
585  setup_err:
586         if(mtd) {
587                 kfree(mtd->eraseregions);
588                 kfree(mtd);
589         }
590         kfree(cfi->cmdset_priv);
591         return NULL;
592 }
593
594 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
595                                         struct cfi_private **pcfi)
596 {
597         struct map_info *map = mtd->priv;
598         struct cfi_private *cfi = *pcfi;
599         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
600
601         /*
602          * Probing of multi-partition flash chips.
603          *
604          * To support multiple partitions when available, we simply arrange
605          * for each of them to have their own flchip structure even if they
606          * are on the same physical chip.  This means completely recreating
607          * a new cfi_private structure right here which is a blatent code
608          * layering violation, but this is still the least intrusive
609          * arrangement at this point. This can be rearranged in the future
610          * if someone feels motivated enough.  --nico
611          */
612         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
613             && extp->FeatureSupport & (1 << 9)) {
614                 struct cfi_private *newcfi;
615                 struct flchip *chip;
616                 struct flchip_shared *shared;
617                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
618
619                 /* Protection Register info */
620                 offs = (extp->NumProtectionFields - 1) *
621                        sizeof(struct cfi_intelext_otpinfo);
622
623                 /* Burst Read info */
624                 offs += extp->extra[offs+1]+2;
625
626                 /* Number of partition regions */
627                 numregions = extp->extra[offs];
628                 offs += 1;
629
630                 /* skip the sizeof(partregion) field in CFI 1.4 */
631                 if (extp->MinorVersion >= '4')
632                         offs += 2;
633
634                 /* Number of hardware partitions */
635                 numparts = 0;
636                 for (i = 0; i < numregions; i++) {
637                         struct cfi_intelext_regioninfo *rinfo;
638                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
639                         numparts += rinfo->NumIdentPartitions;
640                         offs += sizeof(*rinfo)
641                                 + (rinfo->NumBlockTypes - 1) *
642                                   sizeof(struct cfi_intelext_blockinfo);
643                 }
644
645                 if (!numparts)
646                         numparts = 1;
647
648                 /* Programming Region info */
649                 if (extp->MinorVersion >= '4') {
650                         struct cfi_intelext_programming_regioninfo *prinfo;
651                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
652                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
653                         mtd->flags &= ~MTD_BIT_WRITEABLE;
654                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
655                                map->name, mtd->writesize,
656                                cfi->interleave * prinfo->ControlValid,
657                                cfi->interleave * prinfo->ControlInvalid);
658                 }
659
660                 /*
661                  * All functions below currently rely on all chips having
662                  * the same geometry so we'll just assume that all hardware
663                  * partitions are of the same size too.
664                  */
665                 partshift = cfi->chipshift - __ffs(numparts);
666
667                 if ((1 << partshift) < mtd->erasesize) {
668                         printk( KERN_ERR
669                                 "%s: bad number of hw partitions (%d)\n",
670                                 __func__, numparts);
671                         return -EINVAL;
672                 }
673
674                 numvirtchips = cfi->numchips * numparts;
675                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
676                 if (!newcfi)
677                         return -ENOMEM;
678                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
679                 if (!shared) {
680                         kfree(newcfi);
681                         return -ENOMEM;
682                 }
683                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
684                 newcfi->numchips = numvirtchips;
685                 newcfi->chipshift = partshift;
686
687                 chip = &newcfi->chips[0];
688                 for (i = 0; i < cfi->numchips; i++) {
689                         shared[i].writing = shared[i].erasing = NULL;
690                         spin_lock_init(&shared[i].lock);
691                         for (j = 0; j < numparts; j++) {
692                                 *chip = cfi->chips[i];
693                                 chip->start += j << partshift;
694                                 chip->priv = &shared[i];
695                                 /* those should be reset too since
696                                    they create memory references. */
697                                 init_waitqueue_head(&chip->wq);
698                                 spin_lock_init(&chip->_spinlock);
699                                 chip->mutex = &chip->_spinlock;
700                                 chip++;
701                         }
702                 }
703
704                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
705                                   "--> %d partitions of %d KiB\n",
706                                   map->name, cfi->numchips, cfi->interleave,
707                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
708
709                 map->fldrv_priv = newcfi;
710                 *pcfi = newcfi;
711                 kfree(cfi);
712         }
713
714         return 0;
715 }
716
717 /*
718  *  *********** CHIP ACCESS FUNCTIONS ***********
719  */
720 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
721 {
722         DECLARE_WAITQUEUE(wait, current);
723         struct cfi_private *cfi = map->fldrv_priv;
724         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
725         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
726         unsigned long timeo = jiffies + HZ;
727
728         switch (chip->state) {
729
730         case FL_STATUS:
731                 for (;;) {
732                         status = map_read(map, adr);
733                         if (map_word_andequal(map, status, status_OK, status_OK))
734                                 break;
735
736                         /* At this point we're fine with write operations
737                            in other partitions as they don't conflict. */
738                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
739                                 break;
740
741                         spin_unlock(chip->mutex);
742                         cfi_udelay(1);
743                         spin_lock(chip->mutex);
744                         /* Someone else might have been playing with it. */
745                         return -EAGAIN;
746                 }
747                 /* Fall through */
748         case FL_READY:
749         case FL_CFI_QUERY:
750         case FL_JEDEC_QUERY:
751                 return 0;
752
753         case FL_ERASING:
754                 if (!cfip ||
755                     !(cfip->FeatureSupport & 2) ||
756                     !(mode == FL_READY || mode == FL_POINT ||
757                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
758                         goto sleep;
759
760
761                 /* Erase suspend */
762                 map_write(map, CMD(0xB0), adr);
763
764                 /* If the flash has finished erasing, then 'erase suspend'
765                  * appears to make some (28F320) flash devices switch to
766                  * 'read' mode.  Make sure that we switch to 'read status'
767                  * mode so we get the right data. --rmk
768                  */
769                 map_write(map, CMD(0x70), adr);
770                 chip->oldstate = FL_ERASING;
771                 chip->state = FL_ERASE_SUSPENDING;
772                 chip->erase_suspended = 1;
773                 for (;;) {
774                         status = map_read(map, adr);
775                         if (map_word_andequal(map, status, status_OK, status_OK))
776                                 break;
777
778                         if (time_after(jiffies, timeo)) {
779                                 /* Urgh. Resume and pretend we weren't here.  */
780                                 map_write(map, CMD(0xd0), adr);
781                                 /* Make sure we're in 'read status' mode if it had finished */
782                                 map_write(map, CMD(0x70), adr);
783                                 chip->state = FL_ERASING;
784                                 chip->oldstate = FL_READY;
785                                 printk(KERN_ERR "%s: Chip not ready after erase "
786                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
787                                 return -EIO;
788                         }
789
790                         spin_unlock(chip->mutex);
791                         cfi_udelay(1);
792                         spin_lock(chip->mutex);
793                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
794                            So we can just loop here. */
795                 }
796                 chip->state = FL_STATUS;
797                 return 0;
798
799         case FL_XIP_WHILE_ERASING:
800                 if (mode != FL_READY && mode != FL_POINT &&
801                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
802                         goto sleep;
803                 chip->oldstate = chip->state;
804                 chip->state = FL_READY;
805                 return 0;
806
807         case FL_SHUTDOWN:
808                 /* The machine is rebooting now,so no one can get chip anymore */
809                 return -EIO;
810         case FL_POINT:
811                 /* Only if there's no operation suspended... */
812                 if (mode == FL_READY && chip->oldstate == FL_READY)
813                         return 0;
814                 /* Fall through */
815         default:
816         sleep:
817                 set_current_state(TASK_UNINTERRUPTIBLE);
818                 add_wait_queue(&chip->wq, &wait);
819                 spin_unlock(chip->mutex);
820                 schedule();
821                 remove_wait_queue(&chip->wq, &wait);
822                 spin_lock(chip->mutex);
823                 return -EAGAIN;
824         }
825 }
826
827 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
828 {
829         int ret;
830         DECLARE_WAITQUEUE(wait, current);
831
832  retry:
833         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
834                            || mode == FL_OTP_WRITE || mode == FL_SHUTDOWN)) {
835                 /*
836                  * OK. We have possibility for contention on the write/erase
837                  * operations which are global to the real chip and not per
838                  * partition.  So let's fight it over in the partition which
839                  * currently has authority on the operation.
840                  *
841                  * The rules are as follows:
842                  *
843                  * - any write operation must own shared->writing.
844                  *
845                  * - any erase operation must own _both_ shared->writing and
846                  *   shared->erasing.
847                  *
848                  * - contention arbitration is handled in the owner's context.
849                  *
850                  * The 'shared' struct can be read and/or written only when
851                  * its lock is taken.
852                  */
853                 struct flchip_shared *shared = chip->priv;
854                 struct flchip *contender;
855                 spin_lock(&shared->lock);
856                 contender = shared->writing;
857                 if (contender && contender != chip) {
858                         /*
859                          * The engine to perform desired operation on this
860                          * partition is already in use by someone else.
861                          * Let's fight over it in the context of the chip
862                          * currently using it.  If it is possible to suspend,
863                          * that other partition will do just that, otherwise
864                          * it'll happily send us to sleep.  In any case, when
865                          * get_chip returns success we're clear to go ahead.
866                          */
867                         ret = spin_trylock(contender->mutex);
868                         spin_unlock(&shared->lock);
869                         if (!ret)
870                                 goto retry;
871                         spin_unlock(chip->mutex);
872                         ret = chip_ready(map, contender, contender->start, mode);
873                         spin_lock(chip->mutex);
874
875                         if (ret == -EAGAIN) {
876                                 spin_unlock(contender->mutex);
877                                 goto retry;
878                         }
879                         if (ret) {
880                                 spin_unlock(contender->mutex);
881                                 return ret;
882                         }
883                         spin_lock(&shared->lock);
884                         spin_unlock(contender->mutex);
885                 }
886
887                 /* Check if we already have suspended erase
888                  * on this chip. Sleep. */
889                 if (mode == FL_ERASING && shared->erasing
890                     && shared->erasing->oldstate == FL_ERASING) {
891                         spin_unlock(&shared->lock);
892                         set_current_state(TASK_UNINTERRUPTIBLE);
893                         add_wait_queue(&chip->wq, &wait);
894                         spin_unlock(chip->mutex);
895                         schedule();
896                         remove_wait_queue(&chip->wq, &wait);
897                         spin_lock(chip->mutex);
898                         goto retry;
899                 }
900
901                 /* We now own it */
902                 shared->writing = chip;
903                 if (mode == FL_ERASING)
904                         shared->erasing = chip;
905                 spin_unlock(&shared->lock);
906         }
907         ret = chip_ready(map, chip, adr, mode);
908         if (ret == -EAGAIN)
909                 goto retry;
910
911         return ret;
912 }
913
914 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
915 {
916         struct cfi_private *cfi = map->fldrv_priv;
917
918         if (chip->priv) {
919                 struct flchip_shared *shared = chip->priv;
920                 spin_lock(&shared->lock);
921                 if (shared->writing == chip && chip->oldstate == FL_READY) {
922                         /* We own the ability to write, but we're done */
923                         shared->writing = shared->erasing;
924                         if (shared->writing && shared->writing != chip) {
925                                 /* give back ownership to who we loaned it from */
926                                 struct flchip *loaner = shared->writing;
927                                 spin_lock(loaner->mutex);
928                                 spin_unlock(&shared->lock);
929                                 spin_unlock(chip->mutex);
930                                 put_chip(map, loaner, loaner->start);
931                                 spin_lock(chip->mutex);
932                                 spin_unlock(loaner->mutex);
933                                 wake_up(&chip->wq);
934                                 return;
935                         }
936                         shared->erasing = NULL;
937                         shared->writing = NULL;
938                 } else if (shared->erasing == chip && shared->writing != chip) {
939                         /*
940                          * We own the ability to erase without the ability
941                          * to write, which means the erase was suspended
942                          * and some other partition is currently writing.
943                          * Don't let the switch below mess things up since
944                          * we don't have ownership to resume anything.
945                          */
946                         spin_unlock(&shared->lock);
947                         wake_up(&chip->wq);
948                         return;
949                 }
950                 spin_unlock(&shared->lock);
951         }
952
953         switch(chip->oldstate) {
954         case FL_ERASING:
955                 chip->state = chip->oldstate;
956                 /* What if one interleaved chip has finished and the
957                    other hasn't? The old code would leave the finished
958                    one in READY mode. That's bad, and caused -EROFS
959                    errors to be returned from do_erase_oneblock because
960                    that's the only bit it checked for at the time.
961                    As the state machine appears to explicitly allow
962                    sending the 0x70 (Read Status) command to an erasing
963                    chip and expecting it to be ignored, that's what we
964                    do. */
965                 map_write(map, CMD(0xd0), adr);
966                 map_write(map, CMD(0x70), adr);
967                 chip->oldstate = FL_READY;
968                 chip->state = FL_ERASING;
969                 break;
970
971         case FL_XIP_WHILE_ERASING:
972                 chip->state = chip->oldstate;
973                 chip->oldstate = FL_READY;
974                 break;
975
976         case FL_READY:
977         case FL_STATUS:
978         case FL_JEDEC_QUERY:
979                 /* We should really make set_vpp() count, rather than doing this */
980                 DISABLE_VPP(map);
981                 break;
982         default:
983                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
984         }
985         wake_up(&chip->wq);
986 }
987
988 #ifdef CONFIG_MTD_XIP
989
990 /*
991  * No interrupt what so ever can be serviced while the flash isn't in array
992  * mode.  This is ensured by the xip_disable() and xip_enable() functions
993  * enclosing any code path where the flash is known not to be in array mode.
994  * And within a XIP disabled code path, only functions marked with __xipram
995  * may be called and nothing else (it's a good thing to inspect generated
996  * assembly to make sure inline functions were actually inlined and that gcc
997  * didn't emit calls to its own support functions). Also configuring MTD CFI
998  * support to a single buswidth and a single interleave is also recommended.
999  */
1000
1001 static void xip_disable(struct map_info *map, struct flchip *chip,
1002                         unsigned long adr)
1003 {
1004         /* TODO: chips with no XIP use should ignore and return */
1005         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1006         local_irq_disable();
1007 }
1008
1009 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1010                                 unsigned long adr)
1011 {
1012         struct cfi_private *cfi = map->fldrv_priv;
1013         if (chip->state != FL_POINT && chip->state != FL_READY) {
1014                 map_write(map, CMD(0xff), adr);
1015                 chip->state = FL_READY;
1016         }
1017         (void) map_read(map, adr);
1018         xip_iprefetch();
1019         local_irq_enable();
1020 }
1021
1022 /*
1023  * When a delay is required for the flash operation to complete, the
1024  * xip_wait_for_operation() function is polling for both the given timeout
1025  * and pending (but still masked) hardware interrupts.  Whenever there is an
1026  * interrupt pending then the flash erase or write operation is suspended,
1027  * array mode restored and interrupts unmasked.  Task scheduling might also
1028  * happen at that point.  The CPU eventually returns from the interrupt or
1029  * the call to schedule() and the suspended flash operation is resumed for
1030  * the remaining of the delay period.
1031  *
1032  * Warning: this function _will_ fool interrupt latency tracing tools.
1033  */
1034
1035 static int __xipram xip_wait_for_operation(
1036                 struct map_info *map, struct flchip *chip,
1037                 unsigned long adr, unsigned int chip_op_time_max)
1038 {
1039         struct cfi_private *cfi = map->fldrv_priv;
1040         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1041         map_word status, OK = CMD(0x80);
1042         unsigned long usec, suspended, start, done;
1043         flstate_t oldstate, newstate;
1044
1045         start = xip_currtime();
1046         usec = chip_op_time_max;
1047         if (usec == 0)
1048                 usec = 500000;
1049         done = 0;
1050
1051         do {
1052                 cpu_relax();
1053                 if (xip_irqpending() && cfip &&
1054                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1055                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1056                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1057                         /*
1058                          * Let's suspend the erase or write operation when
1059                          * supported.  Note that we currently don't try to
1060                          * suspend interleaved chips if there is already
1061                          * another operation suspended (imagine what happens
1062                          * when one chip was already done with the current
1063                          * operation while another chip suspended it, then
1064                          * we resume the whole thing at once).  Yes, it
1065                          * can happen!
1066                          */
1067                         usec -= done;
1068                         map_write(map, CMD(0xb0), adr);
1069                         map_write(map, CMD(0x70), adr);
1070                         suspended = xip_currtime();
1071                         do {
1072                                 if (xip_elapsed_since(suspended) > 100000) {
1073                                         /*
1074                                          * The chip doesn't want to suspend
1075                                          * after waiting for 100 msecs.
1076                                          * This is a critical error but there
1077                                          * is not much we can do here.
1078                                          */
1079                                         return -EIO;
1080                                 }
1081                                 status = map_read(map, adr);
1082                         } while (!map_word_andequal(map, status, OK, OK));
1083
1084                         /* Suspend succeeded */
1085                         oldstate = chip->state;
1086                         if (oldstate == FL_ERASING) {
1087                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1088                                         break;
1089                                 newstate = FL_XIP_WHILE_ERASING;
1090                                 chip->erase_suspended = 1;
1091                         } else {
1092                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1093                                         break;
1094                                 newstate = FL_XIP_WHILE_WRITING;
1095                                 chip->write_suspended = 1;
1096                         }
1097                         chip->state = newstate;
1098                         map_write(map, CMD(0xff), adr);
1099                         (void) map_read(map, adr);
1100                         xip_iprefetch();
1101                         local_irq_enable();
1102                         spin_unlock(chip->mutex);
1103                         xip_iprefetch();
1104                         cond_resched();
1105
1106                         /*
1107                          * We're back.  However someone else might have
1108                          * decided to go write to the chip if we are in
1109                          * a suspended erase state.  If so let's wait
1110                          * until it's done.
1111                          */
1112                         spin_lock(chip->mutex);
1113                         while (chip->state != newstate) {
1114                                 DECLARE_WAITQUEUE(wait, current);
1115                                 set_current_state(TASK_UNINTERRUPTIBLE);
1116                                 add_wait_queue(&chip->wq, &wait);
1117                                 spin_unlock(chip->mutex);
1118                                 schedule();
1119                                 remove_wait_queue(&chip->wq, &wait);
1120                                 spin_lock(chip->mutex);
1121                         }
1122                         /* Disallow XIP again */
1123                         local_irq_disable();
1124
1125                         /* Resume the write or erase operation */
1126                         map_write(map, CMD(0xd0), adr);
1127                         map_write(map, CMD(0x70), adr);
1128                         chip->state = oldstate;
1129                         start = xip_currtime();
1130                 } else if (usec >= 1000000/HZ) {
1131                         /*
1132                          * Try to save on CPU power when waiting delay
1133                          * is at least a system timer tick period.
1134                          * No need to be extremely accurate here.
1135                          */
1136                         xip_cpu_idle();
1137                 }
1138                 status = map_read(map, adr);
1139                 done = xip_elapsed_since(start);
1140         } while (!map_word_andequal(map, status, OK, OK)
1141                  && done < usec);
1142
1143         return (done >= usec) ? -ETIME : 0;
1144 }
1145
1146 /*
1147  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1148  * the flash is actively programming or erasing since we have to poll for
1149  * the operation to complete anyway.  We can't do that in a generic way with
1150  * a XIP setup so do it before the actual flash operation in this case
1151  * and stub it out from INVAL_CACHE_AND_WAIT.
1152  */
1153 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1154         INVALIDATE_CACHED_RANGE(map, from, size)
1155
1156 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1157         xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1158
1159 #else
1160
1161 #define xip_disable(map, chip, adr)
1162 #define xip_enable(map, chip, adr)
1163 #define XIP_INVAL_CACHED_RANGE(x...)
1164 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1165
1166 static int inval_cache_and_wait_for_operation(
1167                 struct map_info *map, struct flchip *chip,
1168                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1169                 unsigned int chip_op_time, unsigned int chip_op_time_max)
1170 {
1171         struct cfi_private *cfi = map->fldrv_priv;
1172         map_word status, status_OK = CMD(0x80);
1173         int chip_state = chip->state;
1174         unsigned int timeo, sleep_time, reset_timeo;
1175
1176         spin_unlock(chip->mutex);
1177         if (inval_len)
1178                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1179         spin_lock(chip->mutex);
1180
1181         timeo = chip_op_time_max;
1182         if (!timeo)
1183                 timeo = 500000;
1184         reset_timeo = timeo;
1185         sleep_time = chip_op_time / 2;
1186
1187         for (;;) {
1188                 status = map_read(map, cmd_adr);
1189                 if (map_word_andequal(map, status, status_OK, status_OK))
1190                         break;
1191
1192                 if (!timeo) {
1193                         map_write(map, CMD(0x70), cmd_adr);
1194                         chip->state = FL_STATUS;
1195                         return -ETIME;
1196                 }
1197
1198                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1199                 spin_unlock(chip->mutex);
1200                 if (sleep_time >= 1000000/HZ) {
1201                         /*
1202                          * Half of the normal delay still remaining
1203                          * can be performed with a sleeping delay instead
1204                          * of busy waiting.
1205                          */
1206                         msleep(sleep_time/1000);
1207                         timeo -= sleep_time;
1208                         sleep_time = 1000000/HZ;
1209                 } else {
1210                         udelay(1);
1211                         cond_resched();
1212                         timeo--;
1213                 }
1214                 spin_lock(chip->mutex);
1215
1216                 while (chip->state != chip_state) {
1217                         /* Someone's suspended the operation: sleep */
1218                         DECLARE_WAITQUEUE(wait, current);
1219                         set_current_state(TASK_UNINTERRUPTIBLE);
1220                         add_wait_queue(&chip->wq, &wait);
1221                         spin_unlock(chip->mutex);
1222                         schedule();
1223                         remove_wait_queue(&chip->wq, &wait);
1224                         spin_lock(chip->mutex);
1225                 }
1226                 if (chip->erase_suspended || chip->write_suspended)  {
1227                         /* Suspend has occured while sleep: reset timeout */
1228                         timeo = reset_timeo;
1229                         chip->erase_suspended = 0;
1230                         chip->write_suspended = 0;
1231                 }
1232         }
1233
1234         /* Done and happy. */
1235         chip->state = FL_STATUS;
1236         return 0;
1237 }
1238
1239 #endif
1240
1241 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1242         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1243
1244
1245 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1246 {
1247         unsigned long cmd_addr;
1248         struct cfi_private *cfi = map->fldrv_priv;
1249         int ret = 0;
1250
1251         adr += chip->start;
1252
1253         /* Ensure cmd read/writes are aligned. */
1254         cmd_addr = adr & ~(map_bankwidth(map)-1);
1255
1256         spin_lock(chip->mutex);
1257
1258         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1259
1260         if (!ret) {
1261                 if (chip->state != FL_POINT && chip->state != FL_READY)
1262                         map_write(map, CMD(0xff), cmd_addr);
1263
1264                 chip->state = FL_POINT;
1265                 chip->ref_point_counter++;
1266         }
1267         spin_unlock(chip->mutex);
1268
1269         return ret;
1270 }
1271
1272 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1273                 size_t *retlen, void **virt, resource_size_t *phys)
1274 {
1275         struct map_info *map = mtd->priv;
1276         struct cfi_private *cfi = map->fldrv_priv;
1277         unsigned long ofs, last_end = 0;
1278         int chipnum;
1279         int ret = 0;
1280
1281         if (!map->virt || (from + len > mtd->size))
1282                 return -EINVAL;
1283
1284         /* Now lock the chip(s) to POINT state */
1285
1286         /* ofs: offset within the first chip that the first read should start */
1287         chipnum = (from >> cfi->chipshift);
1288         ofs = from - (chipnum << cfi->chipshift);
1289
1290         *virt = map->virt + cfi->chips[chipnum].start + ofs;
1291         *retlen = 0;
1292         if (phys)
1293                 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1294
1295         while (len) {
1296                 unsigned long thislen;
1297
1298                 if (chipnum >= cfi->numchips)
1299                         break;
1300
1301                 /* We cannot point across chips that are virtually disjoint */
1302                 if (!last_end)
1303                         last_end = cfi->chips[chipnum].start;
1304                 else if (cfi->chips[chipnum].start != last_end)
1305                         break;
1306
1307                 if ((len + ofs -1) >> cfi->chipshift)
1308                         thislen = (1<<cfi->chipshift) - ofs;
1309                 else
1310                         thislen = len;
1311
1312                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1313                 if (ret)
1314                         break;
1315
1316                 *retlen += thislen;
1317                 len -= thislen;
1318
1319                 ofs = 0;
1320                 last_end += 1 << cfi->chipshift;
1321                 chipnum++;
1322         }
1323         return 0;
1324 }
1325
1326 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1327 {
1328         struct map_info *map = mtd->priv;
1329         struct cfi_private *cfi = map->fldrv_priv;
1330         unsigned long ofs;
1331         int chipnum;
1332
1333         /* Now unlock the chip(s) POINT state */
1334
1335         /* ofs: offset within the first chip that the first read should start */
1336         chipnum = (from >> cfi->chipshift);
1337         ofs = from - (chipnum <<  cfi->chipshift);
1338
1339         while (len) {
1340                 unsigned long thislen;
1341                 struct flchip *chip;
1342
1343                 chip = &cfi->chips[chipnum];
1344                 if (chipnum >= cfi->numchips)
1345                         break;
1346
1347                 if ((len + ofs -1) >> cfi->chipshift)
1348                         thislen = (1<<cfi->chipshift) - ofs;
1349                 else
1350                         thislen = len;
1351
1352                 spin_lock(chip->mutex);
1353                 if (chip->state == FL_POINT) {
1354                         chip->ref_point_counter--;
1355                         if(chip->ref_point_counter == 0)
1356                                 chip->state = FL_READY;
1357                 } else
1358                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1359
1360                 put_chip(map, chip, chip->start);
1361                 spin_unlock(chip->mutex);
1362
1363                 len -= thislen;
1364                 ofs = 0;
1365                 chipnum++;
1366         }
1367 }
1368
1369 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1370 {
1371         unsigned long cmd_addr;
1372         struct cfi_private *cfi = map->fldrv_priv;
1373         int ret;
1374
1375         adr += chip->start;
1376
1377         /* Ensure cmd read/writes are aligned. */
1378         cmd_addr = adr & ~(map_bankwidth(map)-1);
1379
1380         spin_lock(chip->mutex);
1381         ret = get_chip(map, chip, cmd_addr, FL_READY);
1382         if (ret) {
1383                 spin_unlock(chip->mutex);
1384                 return ret;
1385         }
1386
1387         if (chip->state != FL_POINT && chip->state != FL_READY) {
1388                 map_write(map, CMD(0xff), cmd_addr);
1389
1390                 chip->state = FL_READY;
1391         }
1392
1393         map_copy_from(map, buf, adr, len);
1394
1395         put_chip(map, chip, cmd_addr);
1396
1397         spin_unlock(chip->mutex);
1398         return 0;
1399 }
1400
1401 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1402 {
1403         struct map_info *map = mtd->priv;
1404         struct cfi_private *cfi = map->fldrv_priv;
1405         unsigned long ofs;
1406         int chipnum;
1407         int ret = 0;
1408
1409         /* ofs: offset within the first chip that the first read should start */
1410         chipnum = (from >> cfi->chipshift);
1411         ofs = from - (chipnum <<  cfi->chipshift);
1412
1413         *retlen = 0;
1414
1415         while (len) {
1416                 unsigned long thislen;
1417
1418                 if (chipnum >= cfi->numchips)
1419                         break;
1420
1421                 if ((len + ofs -1) >> cfi->chipshift)
1422                         thislen = (1<<cfi->chipshift) - ofs;
1423                 else
1424                         thislen = len;
1425
1426                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1427                 if (ret)
1428                         break;
1429
1430                 *retlen += thislen;
1431                 len -= thislen;
1432                 buf += thislen;
1433
1434                 ofs = 0;
1435                 chipnum++;
1436         }
1437         return ret;
1438 }
1439
1440 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1441                                      unsigned long adr, map_word datum, int mode)
1442 {
1443         struct cfi_private *cfi = map->fldrv_priv;
1444         map_word status, write_cmd;
1445         int ret=0;
1446
1447         adr += chip->start;
1448
1449         switch (mode) {
1450         case FL_WRITING:
1451                 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1452                 break;
1453         case FL_OTP_WRITE:
1454                 write_cmd = CMD(0xc0);
1455                 break;
1456         default:
1457                 return -EINVAL;
1458         }
1459
1460         spin_lock(chip->mutex);
1461         ret = get_chip(map, chip, adr, mode);
1462         if (ret) {
1463                 spin_unlock(chip->mutex);
1464                 return ret;
1465         }
1466
1467         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1468         ENABLE_VPP(map);
1469         xip_disable(map, chip, adr);
1470         map_write(map, write_cmd, adr);
1471         map_write(map, datum, adr);
1472         chip->state = mode;
1473
1474         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1475                                    adr, map_bankwidth(map),
1476                                    chip->word_write_time,
1477                                    chip->word_write_time_max);
1478         if (ret) {
1479                 xip_enable(map, chip, adr);
1480                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1481                 goto out;
1482         }
1483
1484         /* check for errors */
1485         status = map_read(map, adr);
1486         if (map_word_bitsset(map, status, CMD(0x1a))) {
1487                 unsigned long chipstatus = MERGESTATUS(status);
1488
1489                 /* reset status */
1490                 map_write(map, CMD(0x50), adr);
1491                 map_write(map, CMD(0x70), adr);
1492                 xip_enable(map, chip, adr);
1493
1494                 if (chipstatus & 0x02) {
1495                         ret = -EROFS;
1496                 } else if (chipstatus & 0x08) {
1497                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1498                         ret = -EIO;
1499                 } else {
1500                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1501                         ret = -EINVAL;
1502                 }
1503
1504                 goto out;
1505         }
1506
1507         xip_enable(map, chip, adr);
1508  out:   put_chip(map, chip, adr);
1509         spin_unlock(chip->mutex);
1510         return ret;
1511 }
1512
1513
1514 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1515 {
1516         struct map_info *map = mtd->priv;
1517         struct cfi_private *cfi = map->fldrv_priv;
1518         int ret = 0;
1519         int chipnum;
1520         unsigned long ofs;
1521
1522         *retlen = 0;
1523         if (!len)
1524                 return 0;
1525
1526         chipnum = to >> cfi->chipshift;
1527         ofs = to  - (chipnum << cfi->chipshift);
1528
1529         /* If it's not bus-aligned, do the first byte write */
1530         if (ofs & (map_bankwidth(map)-1)) {
1531                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1532                 int gap = ofs - bus_ofs;
1533                 int n;
1534                 map_word datum;
1535
1536                 n = min_t(int, len, map_bankwidth(map)-gap);
1537                 datum = map_word_ff(map);
1538                 datum = map_word_load_partial(map, datum, buf, gap, n);
1539
1540                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1541                                                bus_ofs, datum, FL_WRITING);
1542                 if (ret)
1543                         return ret;
1544
1545                 len -= n;
1546                 ofs += n;
1547                 buf += n;
1548                 (*retlen) += n;
1549
1550                 if (ofs >> cfi->chipshift) {
1551                         chipnum ++;
1552                         ofs = 0;
1553                         if (chipnum == cfi->numchips)
1554                                 return 0;
1555                 }
1556         }
1557
1558         while(len >= map_bankwidth(map)) {
1559                 map_word datum = map_word_load(map, buf);
1560
1561                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1562                                        ofs, datum, FL_WRITING);
1563                 if (ret)
1564                         return ret;
1565
1566                 ofs += map_bankwidth(map);
1567                 buf += map_bankwidth(map);
1568                 (*retlen) += map_bankwidth(map);
1569                 len -= map_bankwidth(map);
1570
1571                 if (ofs >> cfi->chipshift) {
1572                         chipnum ++;
1573                         ofs = 0;
1574                         if (chipnum == cfi->numchips)
1575                                 return 0;
1576                 }
1577         }
1578
1579         if (len & (map_bankwidth(map)-1)) {
1580                 map_word datum;
1581
1582                 datum = map_word_ff(map);
1583                 datum = map_word_load_partial(map, datum, buf, 0, len);
1584
1585                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1586                                        ofs, datum, FL_WRITING);
1587                 if (ret)
1588                         return ret;
1589
1590                 (*retlen) += len;
1591         }
1592
1593         return 0;
1594 }
1595
1596
1597 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1598                                     unsigned long adr, const struct kvec **pvec,
1599                                     unsigned long *pvec_seek, int len)
1600 {
1601         struct cfi_private *cfi = map->fldrv_priv;
1602         map_word status, write_cmd, datum;
1603         unsigned long cmd_adr;
1604         int ret, wbufsize, word_gap, words;
1605         const struct kvec *vec;
1606         unsigned long vec_seek;
1607         unsigned long initial_adr;
1608         int initial_len = len;
1609
1610         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1611         adr += chip->start;
1612         initial_adr = adr;
1613         cmd_adr = adr & ~(wbufsize-1);
1614
1615         /* Let's determine this according to the interleave only once */
1616         write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1617
1618         spin_lock(chip->mutex);
1619         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1620         if (ret) {
1621                 spin_unlock(chip->mutex);
1622                 return ret;
1623         }
1624
1625         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1626         ENABLE_VPP(map);
1627         xip_disable(map, chip, cmd_adr);
1628
1629         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1630            [...], the device will not accept any more Write to Buffer commands".
1631            So we must check here and reset those bits if they're set. Otherwise
1632            we're just pissing in the wind */
1633         if (chip->state != FL_STATUS) {
1634                 map_write(map, CMD(0x70), cmd_adr);
1635                 chip->state = FL_STATUS;
1636         }
1637         status = map_read(map, cmd_adr);
1638         if (map_word_bitsset(map, status, CMD(0x30))) {
1639                 xip_enable(map, chip, cmd_adr);
1640                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1641                 xip_disable(map, chip, cmd_adr);
1642                 map_write(map, CMD(0x50), cmd_adr);
1643                 map_write(map, CMD(0x70), cmd_adr);
1644         }
1645
1646         chip->state = FL_WRITING_TO_BUFFER;
1647         map_write(map, write_cmd, cmd_adr);
1648         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1649         if (ret) {
1650                 /* Argh. Not ready for write to buffer */
1651                 map_word Xstatus = map_read(map, cmd_adr);
1652                 map_write(map, CMD(0x70), cmd_adr);
1653                 chip->state = FL_STATUS;
1654                 status = map_read(map, cmd_adr);
1655                 map_write(map, CMD(0x50), cmd_adr);
1656                 map_write(map, CMD(0x70), cmd_adr);
1657                 xip_enable(map, chip, cmd_adr);
1658                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1659                                 map->name, Xstatus.x[0], status.x[0]);
1660                 goto out;
1661         }
1662
1663         /* Figure out the number of words to write */
1664         word_gap = (-adr & (map_bankwidth(map)-1));
1665         words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1666         if (!word_gap) {
1667                 words--;
1668         } else {
1669                 word_gap = map_bankwidth(map) - word_gap;
1670                 adr -= word_gap;
1671                 datum = map_word_ff(map);
1672         }
1673
1674         /* Write length of data to come */
1675         map_write(map, CMD(words), cmd_adr );
1676
1677         /* Write data */
1678         vec = *pvec;
1679         vec_seek = *pvec_seek;
1680         do {
1681                 int n = map_bankwidth(map) - word_gap;
1682                 if (n > vec->iov_len - vec_seek)
1683                         n = vec->iov_len - vec_seek;
1684                 if (n > len)
1685                         n = len;
1686
1687                 if (!word_gap && len < map_bankwidth(map))
1688                         datum = map_word_ff(map);
1689
1690                 datum = map_word_load_partial(map, datum,
1691                                               vec->iov_base + vec_seek,
1692                                               word_gap, n);
1693
1694                 len -= n;
1695                 word_gap += n;
1696                 if (!len || word_gap == map_bankwidth(map)) {
1697                         map_write(map, datum, adr);
1698                         adr += map_bankwidth(map);
1699                         word_gap = 0;
1700                 }
1701
1702                 vec_seek += n;
1703                 if (vec_seek == vec->iov_len) {
1704                         vec++;
1705                         vec_seek = 0;
1706                 }
1707         } while (len);
1708         *pvec = vec;
1709         *pvec_seek = vec_seek;
1710
1711         /* GO GO GO */
1712         map_write(map, CMD(0xd0), cmd_adr);
1713         chip->state = FL_WRITING;
1714
1715         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1716                                    initial_adr, initial_len,
1717                                    chip->buffer_write_time,
1718                                    chip->buffer_write_time_max);
1719         if (ret) {
1720                 map_write(map, CMD(0x70), cmd_adr);
1721                 chip->state = FL_STATUS;
1722                 xip_enable(map, chip, cmd_adr);
1723                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1724                 goto out;
1725         }
1726
1727         /* check for errors */
1728         status = map_read(map, cmd_adr);
1729         if (map_word_bitsset(map, status, CMD(0x1a))) {
1730                 unsigned long chipstatus = MERGESTATUS(status);
1731
1732                 /* reset status */
1733                 map_write(map, CMD(0x50), cmd_adr);
1734                 map_write(map, CMD(0x70), cmd_adr);
1735                 xip_enable(map, chip, cmd_adr);
1736
1737                 if (chipstatus & 0x02) {
1738                         ret = -EROFS;
1739                 } else if (chipstatus & 0x08) {
1740                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1741                         ret = -EIO;
1742                 } else {
1743                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1744                         ret = -EINVAL;
1745                 }
1746
1747                 goto out;
1748         }
1749
1750         xip_enable(map, chip, cmd_adr);
1751  out:   put_chip(map, chip, cmd_adr);
1752         spin_unlock(chip->mutex);
1753         return ret;
1754 }
1755
1756 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1757                                 unsigned long count, loff_t to, size_t *retlen)
1758 {
1759         struct map_info *map = mtd->priv;
1760         struct cfi_private *cfi = map->fldrv_priv;
1761         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1762         int ret = 0;
1763         int chipnum;
1764         unsigned long ofs, vec_seek, i;
1765         size_t len = 0;
1766
1767         for (i = 0; i < count; i++)
1768                 len += vecs[i].iov_len;
1769
1770         *retlen = 0;
1771         if (!len)
1772                 return 0;
1773
1774         chipnum = to >> cfi->chipshift;
1775         ofs = to - (chipnum << cfi->chipshift);
1776         vec_seek = 0;
1777
1778         do {
1779                 /* We must not cross write block boundaries */
1780                 int size = wbufsize - (ofs & (wbufsize-1));
1781
1782                 if (size > len)
1783                         size = len;
1784                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1785                                       ofs, &vecs, &vec_seek, size);
1786                 if (ret)
1787                         return ret;
1788
1789                 ofs += size;
1790                 (*retlen) += size;
1791                 len -= size;
1792
1793                 if (ofs >> cfi->chipshift) {
1794                         chipnum ++;
1795                         ofs = 0;
1796                         if (chipnum == cfi->numchips)
1797                                 return 0;
1798                 }
1799
1800                 /* Be nice and reschedule with the chip in a usable state for other
1801                    processes. */
1802                 cond_resched();
1803
1804         } while (len);
1805
1806         return 0;
1807 }
1808
1809 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1810                                        size_t len, size_t *retlen, const u_char *buf)
1811 {
1812         struct kvec vec;
1813
1814         vec.iov_base = (void *) buf;
1815         vec.iov_len = len;
1816
1817         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1818 }
1819
1820 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1821                                       unsigned long adr, int len, void *thunk)
1822 {
1823         struct cfi_private *cfi = map->fldrv_priv;
1824         map_word status;
1825         int retries = 3;
1826         int ret;
1827
1828         adr += chip->start;
1829
1830  retry:
1831         spin_lock(chip->mutex);
1832         ret = get_chip(map, chip, adr, FL_ERASING);
1833         if (ret) {
1834                 spin_unlock(chip->mutex);
1835                 return ret;
1836         }
1837
1838         XIP_INVAL_CACHED_RANGE(map, adr, len);
1839         ENABLE_VPP(map);
1840         xip_disable(map, chip, adr);
1841
1842         /* Clear the status register first */
1843         map_write(map, CMD(0x50), adr);
1844
1845         /* Now erase */
1846         map_write(map, CMD(0x20), adr);
1847         map_write(map, CMD(0xD0), adr);
1848         chip->state = FL_ERASING;
1849         chip->erase_suspended = 0;
1850
1851         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1852                                    adr, len,
1853                                    chip->erase_time,
1854                                    chip->erase_time_max);
1855         if (ret) {
1856                 map_write(map, CMD(0x70), adr);
1857                 chip->state = FL_STATUS;
1858                 xip_enable(map, chip, adr);
1859                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1860                 goto out;
1861         }
1862
1863         /* We've broken this before. It doesn't hurt to be safe */
1864         map_write(map, CMD(0x70), adr);
1865         chip->state = FL_STATUS;
1866         status = map_read(map, adr);
1867
1868         /* check for errors */
1869         if (map_word_bitsset(map, status, CMD(0x3a))) {
1870                 unsigned long chipstatus = MERGESTATUS(status);
1871
1872                 /* Reset the error bits */
1873                 map_write(map, CMD(0x50), adr);
1874                 map_write(map, CMD(0x70), adr);
1875                 xip_enable(map, chip, adr);
1876
1877                 if ((chipstatus & 0x30) == 0x30) {
1878                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1879                         ret = -EINVAL;
1880                 } else if (chipstatus & 0x02) {
1881                         /* Protection bit set */
1882                         ret = -EROFS;
1883                 } else if (chipstatus & 0x8) {
1884                         /* Voltage */
1885                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1886                         ret = -EIO;
1887                 } else if (chipstatus & 0x20 && retries--) {
1888                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1889                         put_chip(map, chip, adr);
1890                         spin_unlock(chip->mutex);
1891                         goto retry;
1892                 } else {
1893                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1894                         ret = -EIO;
1895                 }
1896
1897                 goto out;
1898         }
1899
1900         xip_enable(map, chip, adr);
1901  out:   put_chip(map, chip, adr);
1902         spin_unlock(chip->mutex);
1903         return ret;
1904 }
1905
1906 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1907 {
1908         unsigned long ofs, len;
1909         int ret;
1910
1911         ofs = instr->addr;
1912         len = instr->len;
1913
1914         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1915         if (ret)
1916                 return ret;
1917
1918         instr->state = MTD_ERASE_DONE;
1919         mtd_erase_callback(instr);
1920
1921         return 0;
1922 }
1923
1924 static void cfi_intelext_sync (struct mtd_info *mtd)
1925 {
1926         struct map_info *map = mtd->priv;
1927         struct cfi_private *cfi = map->fldrv_priv;
1928         int i;
1929         struct flchip *chip;
1930         int ret = 0;
1931
1932         for (i=0; !ret && i<cfi->numchips; i++) {
1933                 chip = &cfi->chips[i];
1934
1935                 spin_lock(chip->mutex);
1936                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1937
1938                 if (!ret) {
1939                         chip->oldstate = chip->state;
1940                         chip->state = FL_SYNCING;
1941                         /* No need to wake_up() on this state change -
1942                          * as the whole point is that nobody can do anything
1943                          * with the chip now anyway.
1944                          */
1945                 }
1946                 spin_unlock(chip->mutex);
1947         }
1948
1949         /* Unlock the chips again */
1950
1951         for (i--; i >=0; i--) {
1952                 chip = &cfi->chips[i];
1953
1954                 spin_lock(chip->mutex);
1955
1956                 if (chip->state == FL_SYNCING) {
1957                         chip->state = chip->oldstate;
1958                         chip->oldstate = FL_READY;
1959                         wake_up(&chip->wq);
1960                 }
1961                 spin_unlock(chip->mutex);
1962         }
1963 }
1964
1965 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
1966                                                 struct flchip *chip,
1967                                                 unsigned long adr,
1968                                                 int len, void *thunk)
1969 {
1970         struct cfi_private *cfi = map->fldrv_priv;
1971         int status, ofs_factor = cfi->interleave * cfi->device_type;
1972
1973         adr += chip->start;
1974         xip_disable(map, chip, adr+(2*ofs_factor));
1975         map_write(map, CMD(0x90), adr+(2*ofs_factor));
1976         chip->state = FL_JEDEC_QUERY;
1977         status = cfi_read_query(map, adr+(2*ofs_factor));
1978         xip_enable(map, chip, 0);
1979         return status;
1980 }
1981
1982 #ifdef DEBUG_LOCK_BITS
1983 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1984                                                 struct flchip *chip,
1985                                                 unsigned long adr,
1986                                                 int len, void *thunk)
1987 {
1988         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1989                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
1990         return 0;
1991 }
1992 #endif
1993
1994 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1995 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1996
1997 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1998                                        unsigned long adr, int len, void *thunk)
1999 {
2000         struct cfi_private *cfi = map->fldrv_priv;
2001         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2002         int udelay;
2003         int ret;
2004
2005         adr += chip->start;
2006
2007         spin_lock(chip->mutex);
2008         ret = get_chip(map, chip, adr, FL_LOCKING);
2009         if (ret) {
2010                 spin_unlock(chip->mutex);
2011                 return ret;
2012         }
2013
2014         ENABLE_VPP(map);
2015         xip_disable(map, chip, adr);
2016
2017         map_write(map, CMD(0x60), adr);
2018         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2019                 map_write(map, CMD(0x01), adr);
2020                 chip->state = FL_LOCKING;
2021         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2022                 map_write(map, CMD(0xD0), adr);
2023                 chip->state = FL_UNLOCKING;
2024         } else
2025                 BUG();
2026
2027         /*
2028          * If Instant Individual Block Locking supported then no need
2029          * to delay.
2030          */
2031         udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
2032
2033         ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100);
2034         if (ret) {
2035                 map_write(map, CMD(0x70), adr);
2036                 chip->state = FL_STATUS;
2037                 xip_enable(map, chip, adr);
2038                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2039                 goto out;
2040         }
2041
2042         xip_enable(map, chip, adr);
2043 out:    put_chip(map, chip, adr);
2044         spin_unlock(chip->mutex);
2045         return ret;
2046 }
2047
2048 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
2049 {
2050         int ret;
2051
2052 #ifdef DEBUG_LOCK_BITS
2053         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2054                __func__, ofs, len);
2055         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2056                 ofs, len, NULL);
2057 #endif
2058
2059         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2060                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2061
2062 #ifdef DEBUG_LOCK_BITS
2063         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2064                __func__, ret);
2065         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2066                 ofs, len, NULL);
2067 #endif
2068
2069         return ret;
2070 }
2071
2072 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2073 {
2074         int ret;
2075
2076 #ifdef DEBUG_LOCK_BITS
2077         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2078                __func__, ofs, len);
2079         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2080                 ofs, len, NULL);
2081 #endif
2082
2083         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2084                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2085
2086 #ifdef DEBUG_LOCK_BITS
2087         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2088                __func__, ret);
2089         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2090                 ofs, len, NULL);
2091 #endif
2092
2093         return ret;
2094 }
2095
2096 #ifdef CONFIG_MTD_OTP
2097
2098 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2099                         u_long data_offset, u_char *buf, u_int size,
2100                         u_long prot_offset, u_int groupno, u_int groupsize);
2101
2102 static int __xipram
2103 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2104             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2105 {
2106         struct cfi_private *cfi = map->fldrv_priv;
2107         int ret;
2108
2109         spin_lock(chip->mutex);
2110         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2111         if (ret) {
2112                 spin_unlock(chip->mutex);
2113                 return ret;
2114         }
2115
2116         /* let's ensure we're not reading back cached data from array mode */
2117         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2118
2119         xip_disable(map, chip, chip->start);
2120         if (chip->state != FL_JEDEC_QUERY) {
2121                 map_write(map, CMD(0x90), chip->start);
2122                 chip->state = FL_JEDEC_QUERY;
2123         }
2124         map_copy_from(map, buf, chip->start + offset, size);
2125         xip_enable(map, chip, chip->start);
2126
2127         /* then ensure we don't keep OTP data in the cache */
2128         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2129
2130         put_chip(map, chip, chip->start);
2131         spin_unlock(chip->mutex);
2132         return 0;
2133 }
2134
2135 static int
2136 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2137              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2138 {
2139         int ret;
2140
2141         while (size) {
2142                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2143                 int gap = offset - bus_ofs;
2144                 int n = min_t(int, size, map_bankwidth(map)-gap);
2145                 map_word datum = map_word_ff(map);
2146
2147                 datum = map_word_load_partial(map, datum, buf, gap, n);
2148                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2149                 if (ret)
2150                         return ret;
2151
2152                 offset += n;
2153                 buf += n;
2154                 size -= n;
2155         }
2156
2157         return 0;
2158 }
2159
2160 static int
2161 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2162             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2163 {
2164         struct cfi_private *cfi = map->fldrv_priv;
2165         map_word datum;
2166
2167         /* make sure area matches group boundaries */
2168         if (size != grpsz)
2169                 return -EXDEV;
2170
2171         datum = map_word_ff(map);
2172         datum = map_word_clr(map, datum, CMD(1 << grpno));
2173         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2174 }
2175
2176 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2177                                  size_t *retlen, u_char *buf,
2178                                  otp_op_t action, int user_regs)
2179 {
2180         struct map_info *map = mtd->priv;
2181         struct cfi_private *cfi = map->fldrv_priv;
2182         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2183         struct flchip *chip;
2184         struct cfi_intelext_otpinfo *otp;
2185         u_long devsize, reg_prot_offset, data_offset;
2186         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2187         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2188         int ret;
2189
2190         *retlen = 0;
2191
2192         /* Check that we actually have some OTP registers */
2193         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2194                 return -ENODATA;
2195
2196         /* we need real chips here not virtual ones */
2197         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2198         chip_step = devsize >> cfi->chipshift;
2199         chip_num = 0;
2200
2201         /* Some chips have OTP located in the _top_ partition only.
2202            For example: Intel 28F256L18T (T means top-parameter device) */
2203         if (cfi->mfr == MANUFACTURER_INTEL) {
2204                 switch (cfi->id) {
2205                 case 0x880b:
2206                 case 0x880c:
2207                 case 0x880d:
2208                         chip_num = chip_step - 1;
2209                 }
2210         }
2211
2212         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2213                 chip = &cfi->chips[chip_num];
2214                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2215
2216                 /* first OTP region */
2217                 field = 0;
2218                 reg_prot_offset = extp->ProtRegAddr;
2219                 reg_fact_groups = 1;
2220                 reg_fact_size = 1 << extp->FactProtRegSize;
2221                 reg_user_groups = 1;
2222                 reg_user_size = 1 << extp->UserProtRegSize;
2223
2224                 while (len > 0) {
2225                         /* flash geometry fixup */
2226                         data_offset = reg_prot_offset + 1;
2227                         data_offset *= cfi->interleave * cfi->device_type;
2228                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2229                         reg_fact_size *= cfi->interleave;
2230                         reg_user_size *= cfi->interleave;
2231
2232                         if (user_regs) {
2233                                 groups = reg_user_groups;
2234                                 groupsize = reg_user_size;
2235                                 /* skip over factory reg area */
2236                                 groupno = reg_fact_groups;
2237                                 data_offset += reg_fact_groups * reg_fact_size;
2238                         } else {
2239                                 groups = reg_fact_groups;
2240                                 groupsize = reg_fact_size;
2241                                 groupno = 0;
2242                         }
2243
2244                         while (len > 0 && groups > 0) {
2245                                 if (!action) {
2246                                         /*
2247                                          * Special case: if action is NULL
2248                                          * we fill buf with otp_info records.
2249                                          */
2250                                         struct otp_info *otpinfo;
2251                                         map_word lockword;
2252                                         len -= sizeof(struct otp_info);
2253                                         if (len <= 0)
2254                                                 return -ENOSPC;
2255                                         ret = do_otp_read(map, chip,
2256                                                           reg_prot_offset,
2257                                                           (u_char *)&lockword,
2258                                                           map_bankwidth(map),
2259                                                           0, 0,  0);
2260                                         if (ret)
2261                                                 return ret;
2262                                         otpinfo = (struct otp_info *)buf;
2263                                         otpinfo->start = from;
2264                                         otpinfo->length = groupsize;
2265                                         otpinfo->locked =
2266                                            !map_word_bitsset(map, lockword,
2267                                                              CMD(1 << groupno));
2268                                         from += groupsize;
2269                                         buf += sizeof(*otpinfo);
2270                                         *retlen += sizeof(*otpinfo);
2271                                 } else if (from >= groupsize) {
2272                                         from -= groupsize;
2273                                         data_offset += groupsize;
2274                                 } else {
2275                                         int size = groupsize;
2276                                         data_offset += from;
2277                                         size -= from;
2278                                         from = 0;
2279                                         if (size > len)
2280                                                 size = len;
2281                                         ret = action(map, chip, data_offset,
2282                                                      buf, size, reg_prot_offset,
2283                                                      groupno, groupsize);
2284                                         if (ret < 0)
2285                                                 return ret;
2286                                         buf += size;
2287                                         len -= size;
2288                                         *retlen += size;
2289                                         data_offset += size;
2290                                 }
2291                                 groupno++;
2292                                 groups--;
2293                         }
2294
2295                         /* next OTP region */
2296                         if (++field == extp->NumProtectionFields)
2297                                 break;
2298                         reg_prot_offset = otp->ProtRegAddr;
2299                         reg_fact_groups = otp->FactGroups;
2300                         reg_fact_size = 1 << otp->FactProtRegSize;
2301                         reg_user_groups = otp->UserGroups;
2302                         reg_user_size = 1 << otp->UserProtRegSize;
2303                         otp++;
2304                 }
2305         }
2306
2307         return 0;
2308 }
2309
2310 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2311                                            size_t len, size_t *retlen,
2312                                             u_char *buf)
2313 {
2314         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2315                                      buf, do_otp_read, 0);
2316 }
2317
2318 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2319                                            size_t len, size_t *retlen,
2320                                             u_char *buf)
2321 {
2322         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2323                                      buf, do_otp_read, 1);
2324 }
2325
2326 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2327                                             size_t len, size_t *retlen,
2328                                              u_char *buf)
2329 {
2330         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2331                                      buf, do_otp_write, 1);
2332 }
2333
2334 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2335                                            loff_t from, size_t len)
2336 {
2337         size_t retlen;
2338         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2339                                      NULL, do_otp_lock, 1);
2340 }
2341
2342 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2343                                            struct otp_info *buf, size_t len)
2344 {
2345         size_t retlen;
2346         int ret;
2347
2348         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2349         return ret ? : retlen;
2350 }
2351
2352 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2353                                            struct otp_info *buf, size_t len)
2354 {
2355         size_t retlen;
2356         int ret;
2357
2358         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2359         return ret ? : retlen;
2360 }
2361
2362 #endif
2363
2364 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2365 {
2366         struct mtd_erase_region_info *region;
2367         int block, status, i;
2368         unsigned long adr;
2369         size_t len;
2370
2371         for (i = 0; i < mtd->numeraseregions; i++) {
2372                 region = &mtd->eraseregions[i];
2373                 if (!region->lockmap)
2374                         continue;
2375
2376                 for (block = 0; block < region->numblocks; block++){
2377                         len = region->erasesize;
2378                         adr = region->offset + block * len;
2379
2380                         status = cfi_varsize_frob(mtd,
2381                                         do_getlockstatus_oneblock, adr, len, NULL);
2382                         if (status)
2383                                 set_bit(block, region->lockmap);
2384                         else
2385                                 clear_bit(block, region->lockmap);
2386                 }
2387         }
2388 }
2389
2390 static int cfi_intelext_suspend(struct mtd_info *mtd)
2391 {
2392         struct map_info *map = mtd->priv;
2393         struct cfi_private *cfi = map->fldrv_priv;
2394         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2395         int i;
2396         struct flchip *chip;
2397         int ret = 0;
2398
2399         if ((mtd->flags & MTD_POWERUP_LOCK)
2400             && extp && (extp->FeatureSupport & (1 << 5)))
2401                 cfi_intelext_save_locks(mtd);
2402
2403         for (i=0; !ret && i<cfi->numchips; i++) {
2404                 chip = &cfi->chips[i];
2405
2406                 spin_lock(chip->mutex);
2407
2408                 switch (chip->state) {
2409                 case FL_READY:
2410                 case FL_STATUS:
2411                 case FL_CFI_QUERY:
2412                 case FL_JEDEC_QUERY:
2413                         if (chip->oldstate == FL_READY) {
2414                                 /* place the chip in a known state before suspend */
2415                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2416                                 chip->oldstate = chip->state;
2417                                 chip->state = FL_PM_SUSPENDED;
2418                                 /* No need to wake_up() on this state change -
2419                                  * as the whole point is that nobody can do anything
2420                                  * with the chip now anyway.
2421                                  */
2422                         } else {
2423                                 /* There seems to be an operation pending. We must wait for it. */
2424                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2425                                 ret = -EAGAIN;
2426                         }
2427                         break;
2428                 default:
2429                         /* Should we actually wait? Once upon a time these routines weren't
2430                            allowed to. Or should we return -EAGAIN, because the upper layers
2431                            ought to have already shut down anything which was using the device
2432                            anyway? The latter for now. */
2433                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2434                         ret = -EAGAIN;
2435                 case FL_PM_SUSPENDED:
2436                         break;
2437                 }
2438                 spin_unlock(chip->mutex);
2439         }
2440
2441         /* Unlock the chips again */
2442
2443         if (ret) {
2444                 for (i--; i >=0; i--) {
2445                         chip = &cfi->chips[i];
2446
2447                         spin_lock(chip->mutex);
2448
2449                         if (chip->state == FL_PM_SUSPENDED) {
2450                                 /* No need to force it into a known state here,
2451                                    because we're returning failure, and it didn't
2452                                    get power cycled */
2453                                 chip->state = chip->oldstate;
2454                                 chip->oldstate = FL_READY;
2455                                 wake_up(&chip->wq);
2456                         }
2457                         spin_unlock(chip->mutex);
2458                 }
2459         }
2460
2461         return ret;
2462 }
2463
2464 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2465 {
2466         struct mtd_erase_region_info *region;
2467         int block, i;
2468         unsigned long adr;
2469         size_t len;
2470
2471         for (i = 0; i < mtd->numeraseregions; i++) {
2472                 region = &mtd->eraseregions[i];
2473                 if (!region->lockmap)
2474                         continue;
2475
2476                 for (block = 0; block < region->numblocks; block++) {
2477                         len = region->erasesize;
2478                         adr = region->offset + block * len;
2479
2480                         if (!test_bit(block, region->lockmap))
2481                                 cfi_intelext_unlock(mtd, adr, len);
2482                 }
2483         }
2484 }
2485
2486 static void cfi_intelext_resume(struct mtd_info *mtd)
2487 {
2488         struct map_info *map = mtd->priv;
2489         struct cfi_private *cfi = map->fldrv_priv;
2490         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2491         int i;
2492         struct flchip *chip;
2493
2494         for (i=0; i<cfi->numchips; i++) {
2495
2496                 chip = &cfi->chips[i];
2497
2498                 spin_lock(chip->mutex);
2499
2500                 /* Go to known state. Chip may have been power cycled */
2501                 if (chip->state == FL_PM_SUSPENDED) {
2502                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2503                         chip->oldstate = chip->state = FL_READY;
2504                         wake_up(&chip->wq);
2505                 }
2506
2507                 spin_unlock(chip->mutex);
2508         }
2509
2510         if ((mtd->flags & MTD_POWERUP_LOCK)
2511             && extp && (extp->FeatureSupport & (1 << 5)))
2512                 cfi_intelext_restore_locks(mtd);
2513 }
2514
2515 static int cfi_intelext_reset(struct mtd_info *mtd)
2516 {
2517         struct map_info *map = mtd->priv;
2518         struct cfi_private *cfi = map->fldrv_priv;
2519         int i, ret;
2520
2521         for (i=0; i < cfi->numchips; i++) {
2522                 struct flchip *chip = &cfi->chips[i];
2523
2524                 /* force the completion of any ongoing operation
2525                    and switch to array mode so any bootloader in
2526                    flash is accessible for soft reboot. */
2527                 spin_lock(chip->mutex);
2528                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2529                 if (!ret) {
2530                         map_write(map, CMD(0xff), chip->start);
2531                         chip->state = FL_SHUTDOWN;
2532                 }
2533                 spin_unlock(chip->mutex);
2534         }
2535
2536         return 0;
2537 }
2538
2539 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2540                                void *v)
2541 {
2542         struct mtd_info *mtd;
2543
2544         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2545         cfi_intelext_reset(mtd);
2546         return NOTIFY_DONE;
2547 }
2548
2549 static void cfi_intelext_destroy(struct mtd_info *mtd)
2550 {
2551         struct map_info *map = mtd->priv;
2552         struct cfi_private *cfi = map->fldrv_priv;
2553         struct mtd_erase_region_info *region;
2554         int i;
2555         cfi_intelext_reset(mtd);
2556         unregister_reboot_notifier(&mtd->reboot_notifier);
2557         kfree(cfi->cmdset_priv);
2558         kfree(cfi->cfiq);
2559         kfree(cfi->chips[0].priv);
2560         kfree(cfi);
2561         for (i = 0; i < mtd->numeraseregions; i++) {
2562                 region = &mtd->eraseregions[i];
2563                 if (region->lockmap)
2564                         kfree(region->lockmap);
2565         }
2566         kfree(mtd->eraseregions);
2567 }
2568
2569 MODULE_LICENSE("GPL");
2570 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2571 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2572 MODULE_ALIAS("cfi_cmdset_0003");
2573 MODULE_ALIAS("cfi_cmdset_0200");