[MTD] Avoid compile warnings for Intel CFI flash without OTP support.
[linux-block.git] / drivers / mtd / chips / cfi_cmdset_0001.c
CommitLineData
1da177e4
LT
1/*
2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
4 *
5 * (C) 2000 Red Hat. GPL'd
6 *
8048d2fc 7 * $Id: cfi_cmdset_0001.c,v 1.173 2005/03/30 23:57:30 tpoynor Exp $
1da177e4
LT
8 *
9 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/init.h>
25#include <asm/io.h>
26#include <asm/byteorder.h>
27
28#include <linux/errno.h>
29#include <linux/slab.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
32#include <linux/mtd/xip.h>
33#include <linux/mtd/map.h>
34#include <linux/mtd/mtd.h>
35#include <linux/mtd/compatmac.h>
36#include <linux/mtd/cfi.h>
37
38/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
40
41// debugging, turns off buffer write mode if set to 1
42#define FORCE_WORD_WRITE 0
43
44#define MANUFACTURER_INTEL 0x0089
45#define I82802AB 0x00ad
46#define I82802AC 0x00ac
47#define MANUFACTURER_ST 0x0020
48#define M50LPW080 0x002F
49
50static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
1da177e4
LT
51static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
52static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
54static void cfi_intelext_sync (struct mtd_info *);
55static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
56static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
8048d2fc 57#ifdef CONFIG_MTD_OTP
f77814dd
NP
58static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
59static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
60static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
62static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
63 struct otp_info *, size_t);
64static int cfi_intelext_get_user_prot_info (struct mtd_info *,
65 struct otp_info *, size_t);
8048d2fc 66#endif
1da177e4
LT
67static int cfi_intelext_suspend (struct mtd_info *);
68static void cfi_intelext_resume (struct mtd_info *);
69
70static void cfi_intelext_destroy(struct mtd_info *);
71
72struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
73
74static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
75static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
76
77static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
78 size_t *retlen, u_char **mtdbuf);
79static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
80 size_t len);
81
82static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
83static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
84#include "fwh_lock.h"
85
86
87
88/*
89 * *********** SETUP AND PROBE BITS ***********
90 */
91
92static struct mtd_chip_driver cfi_intelext_chipdrv = {
93 .probe = NULL, /* Not usable directly */
94 .destroy = cfi_intelext_destroy,
95 .name = "cfi_cmdset_0001",
96 .module = THIS_MODULE
97};
98
99/* #define DEBUG_LOCK_BITS */
100/* #define DEBUG_CFI_FEATURES */
101
102#ifdef DEBUG_CFI_FEATURES
103static void cfi_tell_features(struct cfi_pri_intelext *extp)
104{
105 int i;
106 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
107 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
108 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
109 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
110 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
111 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
112 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
113 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
114 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
115 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
116 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
117 for (i=10; i<32; i++) {
118 if (extp->FeatureSupport & (1<<i))
119 printk(" - Unknown Bit %X: supported\n", i);
120 }
121
122 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
123 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
124 for (i=1; i<8; i++) {
125 if (extp->SuspendCmdSupport & (1<<i))
126 printk(" - Unknown Bit %X: supported\n", i);
127 }
128
129 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
130 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
131 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
132 for (i=2; i<16; i++) {
133 if (extp->BlkStatusRegMask & (1<<i))
134 printk(" - Unknown Bit %X Active: yes\n",i);
135 }
136
137 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
138 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
139 if (extp->VppOptimal)
140 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
141 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
142}
143#endif
144
145#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
146/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
147static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
148{
149 struct map_info *map = mtd->priv;
150 struct cfi_private *cfi = map->fldrv_priv;
151 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
152
153 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
154 "erase on write disabled.\n");
155 extp->SuspendCmdSupport &= ~1;
156}
157#endif
158
159#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
160static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
161{
162 struct map_info *map = mtd->priv;
163 struct cfi_private *cfi = map->fldrv_priv;
164 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
165
166 if (cfip && (cfip->FeatureSupport&4)) {
167 cfip->FeatureSupport &= ~4;
168 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
169 }
170}
171#endif
172
173static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
174{
175 struct map_info *map = mtd->priv;
176 struct cfi_private *cfi = map->fldrv_priv;
177
178 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
179 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
180}
181
182static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
183{
184 struct map_info *map = mtd->priv;
185 struct cfi_private *cfi = map->fldrv_priv;
186
187 /* Note this is done after the region info is endian swapped */
188 cfi->cfiq->EraseRegionInfo[1] =
189 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
190};
191
192static void fixup_use_point(struct mtd_info *mtd, void *param)
193{
194 struct map_info *map = mtd->priv;
195 if (!mtd->point && map_is_linear(map)) {
196 mtd->point = cfi_intelext_point;
197 mtd->unpoint = cfi_intelext_unpoint;
198 }
199}
200
201static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
202{
203 struct map_info *map = mtd->priv;
204 struct cfi_private *cfi = map->fldrv_priv;
205 if (cfi->cfiq->BufWriteTimeoutTyp) {
206 printk(KERN_INFO "Using buffer write method\n" );
207 mtd->write = cfi_intelext_write_buffers;
208 }
209}
210
211static struct cfi_fixup cfi_fixup_table[] = {
212#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
213 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
214#endif
215#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
216 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
217#endif
218#if !FORCE_WORD_WRITE
219 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
220#endif
221 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
222 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
223 { 0, 0, NULL, NULL }
224};
225
226static struct cfi_fixup jedec_fixup_table[] = {
227 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
228 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
229 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
230 { 0, 0, NULL, NULL }
231};
232static struct cfi_fixup fixup_table[] = {
233 /* The CFI vendor ids and the JEDEC vendor IDs appear
234 * to be common. It is like the devices id's are as
235 * well. This table is to pick all cases where
236 * we know that is the case.
237 */
238 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
239 { 0, 0, NULL, NULL }
240};
241
242static inline struct cfi_pri_intelext *
243read_pri_intelext(struct map_info *map, __u16 adr)
244{
245 struct cfi_pri_intelext *extp;
246 unsigned int extp_size = sizeof(*extp);
247
248 again:
249 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
250 if (!extp)
251 return NULL;
252
253 /* Do some byteswapping if necessary */
254 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
255 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
256 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
257
258 if (extp->MajorVersion == '1' && extp->MinorVersion == '3') {
259 unsigned int extra_size = 0;
260 int nb_parts, i;
261
262 /* Protection Register info */
72b56a2d
NP
263 extra_size += (extp->NumProtectionFields - 1) *
264 sizeof(struct cfi_intelext_otpinfo);
1da177e4
LT
265
266 /* Burst Read info */
267 extra_size += 6;
268
269 /* Number of hardware-partitions */
270 extra_size += 1;
271 if (extp_size < sizeof(*extp) + extra_size)
272 goto need_more;
273 nb_parts = extp->extra[extra_size - 1];
274
275 for (i = 0; i < nb_parts; i++) {
276 struct cfi_intelext_regioninfo *rinfo;
277 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
278 extra_size += sizeof(*rinfo);
279 if (extp_size < sizeof(*extp) + extra_size)
280 goto need_more;
281 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
282 extra_size += (rinfo->NumBlockTypes - 1)
283 * sizeof(struct cfi_intelext_blockinfo);
284 }
285
286 if (extp_size < sizeof(*extp) + extra_size) {
287 need_more:
288 extp_size = sizeof(*extp) + extra_size;
289 kfree(extp);
290 if (extp_size > 4096) {
291 printk(KERN_ERR
292 "%s: cfi_pri_intelext is too fat\n",
293 __FUNCTION__);
294 return NULL;
295 }
296 goto again;
297 }
298 }
299
300 return extp;
301}
302
303/* This routine is made available to other mtd code via
304 * inter_module_register. It must only be accessed through
305 * inter_module_get which will bump the use count of this module. The
306 * addresses passed back in cfi are valid as long as the use count of
307 * this module is non-zero, i.e. between inter_module_get and
308 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
309 */
310struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
311{
312 struct cfi_private *cfi = map->fldrv_priv;
313 struct mtd_info *mtd;
314 int i;
315
316 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
317 if (!mtd) {
318 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
319 return NULL;
320 }
321 memset(mtd, 0, sizeof(*mtd));
322 mtd->priv = map;
323 mtd->type = MTD_NORFLASH;
324
325 /* Fill in the default mtd operations */
326 mtd->erase = cfi_intelext_erase_varsize;
327 mtd->read = cfi_intelext_read;
328 mtd->write = cfi_intelext_write_words;
329 mtd->sync = cfi_intelext_sync;
330 mtd->lock = cfi_intelext_lock;
331 mtd->unlock = cfi_intelext_unlock;
332 mtd->suspend = cfi_intelext_suspend;
333 mtd->resume = cfi_intelext_resume;
334 mtd->flags = MTD_CAP_NORFLASH;
335 mtd->name = map->name;
336
337 if (cfi->cfi_mode == CFI_MODE_CFI) {
338 /*
339 * It's a real CFI chip, not one for which the probe
340 * routine faked a CFI structure. So we read the feature
341 * table from it.
342 */
343 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
344 struct cfi_pri_intelext *extp;
345
346 extp = read_pri_intelext(map, adr);
347 if (!extp) {
348 kfree(mtd);
349 return NULL;
350 }
351
352 /* Install our own private info structure */
353 cfi->cmdset_priv = extp;
354
355 cfi_fixup(mtd, cfi_fixup_table);
356
357#ifdef DEBUG_CFI_FEATURES
358 /* Tell the user about it in lots of lovely detail */
359 cfi_tell_features(extp);
360#endif
361
362 if(extp->SuspendCmdSupport & 1) {
363 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
364 }
365 }
366 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
367 /* Apply jedec specific fixups */
368 cfi_fixup(mtd, jedec_fixup_table);
369 }
370 /* Apply generic fixups */
371 cfi_fixup(mtd, fixup_table);
372
373 for (i=0; i< cfi->numchips; i++) {
374 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
375 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
376 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
377 cfi->chips[i].ref_point_counter = 0;
378 }
379
380 map->fldrv = &cfi_intelext_chipdrv;
381
382 return cfi_intelext_setup(mtd);
383}
384
385static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
386{
387 struct map_info *map = mtd->priv;
388 struct cfi_private *cfi = map->fldrv_priv;
389 unsigned long offset = 0;
390 int i,j;
391 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
392
393 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
394
395 mtd->size = devsize * cfi->numchips;
396
397 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
398 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
399 * mtd->numeraseregions, GFP_KERNEL);
400 if (!mtd->eraseregions) {
401 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
402 goto setup_err;
403 }
404
405 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
406 unsigned long ernum, ersize;
407 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
408 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
409
410 if (mtd->erasesize < ersize) {
411 mtd->erasesize = ersize;
412 }
413 for (j=0; j<cfi->numchips; j++) {
414 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
415 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
416 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
417 }
418 offset += (ersize * ernum);
419 }
420
421 if (offset != devsize) {
422 /* Argh */
423 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
424 goto setup_err;
425 }
426
427 for (i=0; i<mtd->numeraseregions;i++){
428 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
429 i,mtd->eraseregions[i].offset,
430 mtd->eraseregions[i].erasesize,
431 mtd->eraseregions[i].numblocks);
432 }
433
f77814dd 434#ifdef CONFIG_MTD_OTP
1da177e4 435 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
f77814dd
NP
436 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
437 mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
438 mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
439 mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
440 mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
1da177e4
LT
441#endif
442
443 /* This function has the potential to distort the reality
444 a bit and therefore should be called last. */
445 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
446 goto setup_err;
447
448 __module_get(THIS_MODULE);
449 return mtd;
450
451 setup_err:
452 if(mtd) {
453 if(mtd->eraseregions)
454 kfree(mtd->eraseregions);
455 kfree(mtd);
456 }
457 kfree(cfi->cmdset_priv);
458 return NULL;
459}
460
461static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
462 struct cfi_private **pcfi)
463{
464 struct map_info *map = mtd->priv;
465 struct cfi_private *cfi = *pcfi;
466 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
467
468 /*
469 * Probing of multi-partition flash ships.
470 *
471 * To support multiple partitions when available, we simply arrange
472 * for each of them to have their own flchip structure even if they
473 * are on the same physical chip. This means completely recreating
474 * a new cfi_private structure right here which is a blatent code
475 * layering violation, but this is still the least intrusive
476 * arrangement at this point. This can be rearranged in the future
477 * if someone feels motivated enough. --nico
478 */
479 if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3'
480 && extp->FeatureSupport & (1 << 9)) {
481 struct cfi_private *newcfi;
482 struct flchip *chip;
483 struct flchip_shared *shared;
484 int offs, numregions, numparts, partshift, numvirtchips, i, j;
485
486 /* Protection Register info */
72b56a2d
NP
487 offs = (extp->NumProtectionFields - 1) *
488 sizeof(struct cfi_intelext_otpinfo);
1da177e4
LT
489
490 /* Burst Read info */
491 offs += 6;
492
493 /* Number of partition regions */
494 numregions = extp->extra[offs];
495 offs += 1;
496
497 /* Number of hardware partitions */
498 numparts = 0;
499 for (i = 0; i < numregions; i++) {
500 struct cfi_intelext_regioninfo *rinfo;
501 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
502 numparts += rinfo->NumIdentPartitions;
503 offs += sizeof(*rinfo)
504 + (rinfo->NumBlockTypes - 1) *
505 sizeof(struct cfi_intelext_blockinfo);
506 }
507
508 /*
509 * All functions below currently rely on all chips having
510 * the same geometry so we'll just assume that all hardware
511 * partitions are of the same size too.
512 */
513 partshift = cfi->chipshift - __ffs(numparts);
514
515 if ((1 << partshift) < mtd->erasesize) {
516 printk( KERN_ERR
517 "%s: bad number of hw partitions (%d)\n",
518 __FUNCTION__, numparts);
519 return -EINVAL;
520 }
521
522 numvirtchips = cfi->numchips * numparts;
523 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
524 if (!newcfi)
525 return -ENOMEM;
526 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
527 if (!shared) {
528 kfree(newcfi);
529 return -ENOMEM;
530 }
531 memcpy(newcfi, cfi, sizeof(struct cfi_private));
532 newcfi->numchips = numvirtchips;
533 newcfi->chipshift = partshift;
534
535 chip = &newcfi->chips[0];
536 for (i = 0; i < cfi->numchips; i++) {
537 shared[i].writing = shared[i].erasing = NULL;
538 spin_lock_init(&shared[i].lock);
539 for (j = 0; j < numparts; j++) {
540 *chip = cfi->chips[i];
541 chip->start += j << partshift;
542 chip->priv = &shared[i];
543 /* those should be reset too since
544 they create memory references. */
545 init_waitqueue_head(&chip->wq);
546 spin_lock_init(&chip->_spinlock);
547 chip->mutex = &chip->_spinlock;
548 chip++;
549 }
550 }
551
552 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
553 "--> %d partitions of %d KiB\n",
554 map->name, cfi->numchips, cfi->interleave,
555 newcfi->numchips, 1<<(newcfi->chipshift-10));
556
557 map->fldrv_priv = newcfi;
558 *pcfi = newcfi;
559 kfree(cfi);
560 }
561
562 return 0;
563}
564
565/*
566 * *********** CHIP ACCESS FUNCTIONS ***********
567 */
568
569static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
570{
571 DECLARE_WAITQUEUE(wait, current);
572 struct cfi_private *cfi = map->fldrv_priv;
573 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
574 unsigned long timeo;
575 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
576
577 resettime:
578 timeo = jiffies + HZ;
579 retry:
f77814dd 580 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
1da177e4
LT
581 /*
582 * OK. We have possibility for contension on the write/erase
583 * operations which are global to the real chip and not per
584 * partition. So let's fight it over in the partition which
585 * currently has authority on the operation.
586 *
587 * The rules are as follows:
588 *
589 * - any write operation must own shared->writing.
590 *
591 * - any erase operation must own _both_ shared->writing and
592 * shared->erasing.
593 *
594 * - contension arbitration is handled in the owner's context.
595 *
596 * The 'shared' struct can be read when its lock is taken.
597 * However any writes to it can only be made when the current
598 * owner's lock is also held.
599 */
600 struct flchip_shared *shared = chip->priv;
601 struct flchip *contender;
602 spin_lock(&shared->lock);
603 contender = shared->writing;
604 if (contender && contender != chip) {
605 /*
606 * The engine to perform desired operation on this
607 * partition is already in use by someone else.
608 * Let's fight over it in the context of the chip
609 * currently using it. If it is possible to suspend,
610 * that other partition will do just that, otherwise
611 * it'll happily send us to sleep. In any case, when
612 * get_chip returns success we're clear to go ahead.
613 */
614 int ret = spin_trylock(contender->mutex);
615 spin_unlock(&shared->lock);
616 if (!ret)
617 goto retry;
618 spin_unlock(chip->mutex);
619 ret = get_chip(map, contender, contender->start, mode);
620 spin_lock(chip->mutex);
621 if (ret) {
622 spin_unlock(contender->mutex);
623 return ret;
624 }
625 timeo = jiffies + HZ;
626 spin_lock(&shared->lock);
627 }
628
629 /* We now own it */
630 shared->writing = chip;
631 if (mode == FL_ERASING)
632 shared->erasing = chip;
633 if (contender && contender != chip)
634 spin_unlock(contender->mutex);
635 spin_unlock(&shared->lock);
636 }
637
638 switch (chip->state) {
639
640 case FL_STATUS:
641 for (;;) {
642 status = map_read(map, adr);
643 if (map_word_andequal(map, status, status_OK, status_OK))
644 break;
645
646 /* At this point we're fine with write operations
647 in other partitions as they don't conflict. */
648 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
649 break;
650
651 if (time_after(jiffies, timeo)) {
652 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n",
653 status.x[0]);
654 return -EIO;
655 }
656 spin_unlock(chip->mutex);
657 cfi_udelay(1);
658 spin_lock(chip->mutex);
659 /* Someone else might have been playing with it. */
660 goto retry;
661 }
662
663 case FL_READY:
664 case FL_CFI_QUERY:
665 case FL_JEDEC_QUERY:
666 return 0;
667
668 case FL_ERASING:
669 if (!cfip ||
670 !(cfip->FeatureSupport & 2) ||
671 !(mode == FL_READY || mode == FL_POINT ||
672 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
673 goto sleep;
674
675
676 /* Erase suspend */
677 map_write(map, CMD(0xB0), adr);
678
679 /* If the flash has finished erasing, then 'erase suspend'
680 * appears to make some (28F320) flash devices switch to
681 * 'read' mode. Make sure that we switch to 'read status'
682 * mode so we get the right data. --rmk
683 */
684 map_write(map, CMD(0x70), adr);
685 chip->oldstate = FL_ERASING;
686 chip->state = FL_ERASE_SUSPENDING;
687 chip->erase_suspended = 1;
688 for (;;) {
689 status = map_read(map, adr);
690 if (map_word_andequal(map, status, status_OK, status_OK))
691 break;
692
693 if (time_after(jiffies, timeo)) {
694 /* Urgh. Resume and pretend we weren't here. */
695 map_write(map, CMD(0xd0), adr);
696 /* Make sure we're in 'read status' mode if it had finished */
697 map_write(map, CMD(0x70), adr);
698 chip->state = FL_ERASING;
699 chip->oldstate = FL_READY;
700 printk(KERN_ERR "Chip not ready after erase "
701 "suspended: status = 0x%lx\n", status.x[0]);
702 return -EIO;
703 }
704
705 spin_unlock(chip->mutex);
706 cfi_udelay(1);
707 spin_lock(chip->mutex);
708 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
709 So we can just loop here. */
710 }
711 chip->state = FL_STATUS;
712 return 0;
713
714 case FL_XIP_WHILE_ERASING:
715 if (mode != FL_READY && mode != FL_POINT &&
716 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
717 goto sleep;
718 chip->oldstate = chip->state;
719 chip->state = FL_READY;
720 return 0;
721
722 case FL_POINT:
723 /* Only if there's no operation suspended... */
724 if (mode == FL_READY && chip->oldstate == FL_READY)
725 return 0;
726
727 default:
728 sleep:
729 set_current_state(TASK_UNINTERRUPTIBLE);
730 add_wait_queue(&chip->wq, &wait);
731 spin_unlock(chip->mutex);
732 schedule();
733 remove_wait_queue(&chip->wq, &wait);
734 spin_lock(chip->mutex);
735 goto resettime;
736 }
737}
738
739static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
740{
741 struct cfi_private *cfi = map->fldrv_priv;
742
743 if (chip->priv) {
744 struct flchip_shared *shared = chip->priv;
745 spin_lock(&shared->lock);
746 if (shared->writing == chip && chip->oldstate == FL_READY) {
747 /* We own the ability to write, but we're done */
748 shared->writing = shared->erasing;
749 if (shared->writing && shared->writing != chip) {
750 /* give back ownership to who we loaned it from */
751 struct flchip *loaner = shared->writing;
752 spin_lock(loaner->mutex);
753 spin_unlock(&shared->lock);
754 spin_unlock(chip->mutex);
755 put_chip(map, loaner, loaner->start);
756 spin_lock(chip->mutex);
757 spin_unlock(loaner->mutex);
758 wake_up(&chip->wq);
759 return;
760 }
761 shared->erasing = NULL;
762 shared->writing = NULL;
763 } else if (shared->erasing == chip && shared->writing != chip) {
764 /*
765 * We own the ability to erase without the ability
766 * to write, which means the erase was suspended
767 * and some other partition is currently writing.
768 * Don't let the switch below mess things up since
769 * we don't have ownership to resume anything.
770 */
771 spin_unlock(&shared->lock);
772 wake_up(&chip->wq);
773 return;
774 }
775 spin_unlock(&shared->lock);
776 }
777
778 switch(chip->oldstate) {
779 case FL_ERASING:
780 chip->state = chip->oldstate;
781 /* What if one interleaved chip has finished and the
782 other hasn't? The old code would leave the finished
783 one in READY mode. That's bad, and caused -EROFS
784 errors to be returned from do_erase_oneblock because
785 that's the only bit it checked for at the time.
786 As the state machine appears to explicitly allow
787 sending the 0x70 (Read Status) command to an erasing
788 chip and expecting it to be ignored, that's what we
789 do. */
790 map_write(map, CMD(0xd0), adr);
791 map_write(map, CMD(0x70), adr);
792 chip->oldstate = FL_READY;
793 chip->state = FL_ERASING;
794 break;
795
796 case FL_XIP_WHILE_ERASING:
797 chip->state = chip->oldstate;
798 chip->oldstate = FL_READY;
799 break;
800
801 case FL_READY:
802 case FL_STATUS:
803 case FL_JEDEC_QUERY:
804 /* We should really make set_vpp() count, rather than doing this */
805 DISABLE_VPP(map);
806 break;
807 default:
808 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
809 }
810 wake_up(&chip->wq);
811}
812
813#ifdef CONFIG_MTD_XIP
814
815/*
816 * No interrupt what so ever can be serviced while the flash isn't in array
817 * mode. This is ensured by the xip_disable() and xip_enable() functions
818 * enclosing any code path where the flash is known not to be in array mode.
819 * And within a XIP disabled code path, only functions marked with __xipram
820 * may be called and nothing else (it's a good thing to inspect generated
821 * assembly to make sure inline functions were actually inlined and that gcc
822 * didn't emit calls to its own support functions). Also configuring MTD CFI
823 * support to a single buswidth and a single interleave is also recommended.
824 * Note that not only IRQs are disabled but the preemption count is also
825 * increased to prevent other locking primitives (namely spin_unlock) from
826 * decrementing the preempt count to zero and scheduling the CPU away while
827 * not in array mode.
828 */
829
830static void xip_disable(struct map_info *map, struct flchip *chip,
831 unsigned long adr)
832{
833 /* TODO: chips with no XIP use should ignore and return */
834 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
835 preempt_disable();
836 local_irq_disable();
837}
838
839static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
840 unsigned long adr)
841{
842 struct cfi_private *cfi = map->fldrv_priv;
843 if (chip->state != FL_POINT && chip->state != FL_READY) {
844 map_write(map, CMD(0xff), adr);
845 chip->state = FL_READY;
846 }
847 (void) map_read(map, adr);
848 asm volatile (".rep 8; nop; .endr"); /* fill instruction prefetch */
849 local_irq_enable();
850 preempt_enable();
851}
852
853/*
854 * When a delay is required for the flash operation to complete, the
855 * xip_udelay() function is polling for both the given timeout and pending
856 * (but still masked) hardware interrupts. Whenever there is an interrupt
857 * pending then the flash erase or write operation is suspended, array mode
858 * restored and interrupts unmasked. Task scheduling might also happen at that
859 * point. The CPU eventually returns from the interrupt or the call to
860 * schedule() and the suspended flash operation is resumed for the remaining
861 * of the delay period.
862 *
863 * Warning: this function _will_ fool interrupt latency tracing tools.
864 */
865
866static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
867 unsigned long adr, int usec)
868{
869 struct cfi_private *cfi = map->fldrv_priv;
870 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
871 map_word status, OK = CMD(0x80);
872 unsigned long suspended, start = xip_currtime();
873 flstate_t oldstate, newstate;
874
875 do {
876 cpu_relax();
877 if (xip_irqpending() && cfip &&
878 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
879 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
880 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
881 /*
882 * Let's suspend the erase or write operation when
883 * supported. Note that we currently don't try to
884 * suspend interleaved chips if there is already
885 * another operation suspended (imagine what happens
886 * when one chip was already done with the current
887 * operation while another chip suspended it, then
888 * we resume the whole thing at once). Yes, it
889 * can happen!
890 */
891 map_write(map, CMD(0xb0), adr);
892 map_write(map, CMD(0x70), adr);
893 usec -= xip_elapsed_since(start);
894 suspended = xip_currtime();
895 do {
896 if (xip_elapsed_since(suspended) > 100000) {
897 /*
898 * The chip doesn't want to suspend
899 * after waiting for 100 msecs.
900 * This is a critical error but there
901 * is not much we can do here.
902 */
903 return;
904 }
905 status = map_read(map, adr);
906 } while (!map_word_andequal(map, status, OK, OK));
907
908 /* Suspend succeeded */
909 oldstate = chip->state;
910 if (oldstate == FL_ERASING) {
911 if (!map_word_bitsset(map, status, CMD(0x40)))
912 break;
913 newstate = FL_XIP_WHILE_ERASING;
914 chip->erase_suspended = 1;
915 } else {
916 if (!map_word_bitsset(map, status, CMD(0x04)))
917 break;
918 newstate = FL_XIP_WHILE_WRITING;
919 chip->write_suspended = 1;
920 }
921 chip->state = newstate;
922 map_write(map, CMD(0xff), adr);
923 (void) map_read(map, adr);
924 asm volatile (".rep 8; nop; .endr");
925 local_irq_enable();
926 preempt_enable();
927 asm volatile (".rep 8; nop; .endr");
928 cond_resched();
929
930 /*
931 * We're back. However someone else might have
932 * decided to go write to the chip if we are in
933 * a suspended erase state. If so let's wait
934 * until it's done.
935 */
936 preempt_disable();
937 while (chip->state != newstate) {
938 DECLARE_WAITQUEUE(wait, current);
939 set_current_state(TASK_UNINTERRUPTIBLE);
940 add_wait_queue(&chip->wq, &wait);
941 preempt_enable();
942 schedule();
943 remove_wait_queue(&chip->wq, &wait);
944 preempt_disable();
945 }
946 /* Disallow XIP again */
947 local_irq_disable();
948
949 /* Resume the write or erase operation */
950 map_write(map, CMD(0xd0), adr);
951 map_write(map, CMD(0x70), adr);
952 chip->state = oldstate;
953 start = xip_currtime();
954 } else if (usec >= 1000000/HZ) {
955 /*
956 * Try to save on CPU power when waiting delay
957 * is at least a system timer tick period.
958 * No need to be extremely accurate here.
959 */
960 xip_cpu_idle();
961 }
962 status = map_read(map, adr);
963 } while (!map_word_andequal(map, status, OK, OK)
964 && xip_elapsed_since(start) < usec);
965}
966
967#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
968
969/*
970 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
971 * the flash is actively programming or erasing since we have to poll for
972 * the operation to complete anyway. We can't do that in a generic way with
973 * a XIP setup so do it before the actual flash operation in this case.
974 */
975#undef INVALIDATE_CACHED_RANGE
976#define INVALIDATE_CACHED_RANGE(x...)
977#define XIP_INVAL_CACHED_RANGE(map, from, size) \
978 do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
979
980/*
981 * Extra notes:
982 *
983 * Activating this XIP support changes the way the code works a bit. For
984 * example the code to suspend the current process when concurrent access
985 * happens is never executed because xip_udelay() will always return with the
986 * same chip state as it was entered with. This is why there is no care for
987 * the presence of add_wait_queue() or schedule() calls from within a couple
988 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
989 * The queueing and scheduling are always happening within xip_udelay().
990 *
991 * Similarly, get_chip() and put_chip() just happen to always be executed
992 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
993 * is in array mode, therefore never executing many cases therein and not
994 * causing any problem with XIP.
995 */
996
997#else
998
999#define xip_disable(map, chip, adr)
1000#define xip_enable(map, chip, adr)
1001
1002#define UDELAY(map, chip, adr, usec) cfi_udelay(usec)
1003
1004#define XIP_INVAL_CACHED_RANGE(x...)
1005
1006#endif
1007
1008static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1009{
1010 unsigned long cmd_addr;
1011 struct cfi_private *cfi = map->fldrv_priv;
1012 int ret = 0;
1013
1014 adr += chip->start;
1015
1016 /* Ensure cmd read/writes are aligned. */
1017 cmd_addr = adr & ~(map_bankwidth(map)-1);
1018
1019 spin_lock(chip->mutex);
1020
1021 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1022
1023 if (!ret) {
1024 if (chip->state != FL_POINT && chip->state != FL_READY)
1025 map_write(map, CMD(0xff), cmd_addr);
1026
1027 chip->state = FL_POINT;
1028 chip->ref_point_counter++;
1029 }
1030 spin_unlock(chip->mutex);
1031
1032 return ret;
1033}
1034
1035static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1036{
1037 struct map_info *map = mtd->priv;
1038 struct cfi_private *cfi = map->fldrv_priv;
1039 unsigned long ofs;
1040 int chipnum;
1041 int ret = 0;
1042
1043 if (!map->virt || (from + len > mtd->size))
1044 return -EINVAL;
1045
1046 *mtdbuf = (void *)map->virt + from;
1047 *retlen = 0;
1048
1049 /* Now lock the chip(s) to POINT state */
1050
1051 /* ofs: offset within the first chip that the first read should start */
1052 chipnum = (from >> cfi->chipshift);
1053 ofs = from - (chipnum << cfi->chipshift);
1054
1055 while (len) {
1056 unsigned long thislen;
1057
1058 if (chipnum >= cfi->numchips)
1059 break;
1060
1061 if ((len + ofs -1) >> cfi->chipshift)
1062 thislen = (1<<cfi->chipshift) - ofs;
1063 else
1064 thislen = len;
1065
1066 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1067 if (ret)
1068 break;
1069
1070 *retlen += thislen;
1071 len -= thislen;
1072
1073 ofs = 0;
1074 chipnum++;
1075 }
1076 return 0;
1077}
1078
1079static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1080{
1081 struct map_info *map = mtd->priv;
1082 struct cfi_private *cfi = map->fldrv_priv;
1083 unsigned long ofs;
1084 int chipnum;
1085
1086 /* Now unlock the chip(s) POINT state */
1087
1088 /* ofs: offset within the first chip that the first read should start */
1089 chipnum = (from >> cfi->chipshift);
1090 ofs = from - (chipnum << cfi->chipshift);
1091
1092 while (len) {
1093 unsigned long thislen;
1094 struct flchip *chip;
1095
1096 chip = &cfi->chips[chipnum];
1097 if (chipnum >= cfi->numchips)
1098 break;
1099
1100 if ((len + ofs -1) >> cfi->chipshift)
1101 thislen = (1<<cfi->chipshift) - ofs;
1102 else
1103 thislen = len;
1104
1105 spin_lock(chip->mutex);
1106 if (chip->state == FL_POINT) {
1107 chip->ref_point_counter--;
1108 if(chip->ref_point_counter == 0)
1109 chip->state = FL_READY;
1110 } else
1111 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
1112
1113 put_chip(map, chip, chip->start);
1114 spin_unlock(chip->mutex);
1115
1116 len -= thislen;
1117 ofs = 0;
1118 chipnum++;
1119 }
1120}
1121
1122static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1123{
1124 unsigned long cmd_addr;
1125 struct cfi_private *cfi = map->fldrv_priv;
1126 int ret;
1127
1128 adr += chip->start;
1129
1130 /* Ensure cmd read/writes are aligned. */
1131 cmd_addr = adr & ~(map_bankwidth(map)-1);
1132
1133 spin_lock(chip->mutex);
1134 ret = get_chip(map, chip, cmd_addr, FL_READY);
1135 if (ret) {
1136 spin_unlock(chip->mutex);
1137 return ret;
1138 }
1139
1140 if (chip->state != FL_POINT && chip->state != FL_READY) {
1141 map_write(map, CMD(0xff), cmd_addr);
1142
1143 chip->state = FL_READY;
1144 }
1145
1146 map_copy_from(map, buf, adr, len);
1147
1148 put_chip(map, chip, cmd_addr);
1149
1150 spin_unlock(chip->mutex);
1151 return 0;
1152}
1153
1154static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1155{
1156 struct map_info *map = mtd->priv;
1157 struct cfi_private *cfi = map->fldrv_priv;
1158 unsigned long ofs;
1159 int chipnum;
1160 int ret = 0;
1161
1162 /* ofs: offset within the first chip that the first read should start */
1163 chipnum = (from >> cfi->chipshift);
1164 ofs = from - (chipnum << cfi->chipshift);
1165
1166 *retlen = 0;
1167
1168 while (len) {
1169 unsigned long thislen;
1170
1171 if (chipnum >= cfi->numchips)
1172 break;
1173
1174 if ((len + ofs -1) >> cfi->chipshift)
1175 thislen = (1<<cfi->chipshift) - ofs;
1176 else
1177 thislen = len;
1178
1179 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1180 if (ret)
1181 break;
1182
1183 *retlen += thislen;
1184 len -= thislen;
1185 buf += thislen;
1186
1187 ofs = 0;
1188 chipnum++;
1189 }
1190 return ret;
1191}
1192
1da177e4 1193static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
f77814dd 1194 unsigned long adr, map_word datum, int mode)
1da177e4
LT
1195{
1196 struct cfi_private *cfi = map->fldrv_priv;
f77814dd 1197 map_word status, status_OK, write_cmd;
1da177e4
LT
1198 unsigned long timeo;
1199 int z, ret=0;
1200
1201 adr += chip->start;
1202
1203 /* Let's determine this according to the interleave only once */
1204 status_OK = CMD(0x80);
f77814dd
NP
1205 switch (mode) {
1206 case FL_WRITING: write_cmd = CMD(0x40); break;
1207 case FL_OTP_WRITE: write_cmd = CMD(0xc0); break;
1208 default: return -EINVAL;
1209 }
1da177e4
LT
1210
1211 spin_lock(chip->mutex);
f77814dd 1212 ret = get_chip(map, chip, adr, mode);
1da177e4
LT
1213 if (ret) {
1214 spin_unlock(chip->mutex);
1215 return ret;
1216 }
1217
1218 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1219 ENABLE_VPP(map);
1220 xip_disable(map, chip, adr);
f77814dd 1221 map_write(map, write_cmd, adr);
1da177e4 1222 map_write(map, datum, adr);
f77814dd 1223 chip->state = mode;
1da177e4
LT
1224
1225 spin_unlock(chip->mutex);
1226 INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
1227 UDELAY(map, chip, adr, chip->word_write_time);
1228 spin_lock(chip->mutex);
1229
1230 timeo = jiffies + (HZ/2);
1231 z = 0;
1232 for (;;) {
f77814dd 1233 if (chip->state != mode) {
1da177e4
LT
1234 /* Someone's suspended the write. Sleep */
1235 DECLARE_WAITQUEUE(wait, current);
1236
1237 set_current_state(TASK_UNINTERRUPTIBLE);
1238 add_wait_queue(&chip->wq, &wait);
1239 spin_unlock(chip->mutex);
1240 schedule();
1241 remove_wait_queue(&chip->wq, &wait);
1242 timeo = jiffies + (HZ / 2); /* FIXME */
1243 spin_lock(chip->mutex);
1244 continue;
1245 }
1246
1247 status = map_read(map, adr);
1248 if (map_word_andequal(map, status, status_OK, status_OK))
1249 break;
1250
1251 /* OK Still waiting */
1252 if (time_after(jiffies, timeo)) {
1253 chip->state = FL_STATUS;
1254 xip_enable(map, chip, adr);
1255 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
1256 ret = -EIO;
1257 goto out;
1258 }
1259
1260 /* Latency issues. Drop the lock, wait a while and retry */
1261 spin_unlock(chip->mutex);
1262 z++;
1263 UDELAY(map, chip, adr, 1);
1264 spin_lock(chip->mutex);
1265 }
1266 if (!z) {
1267 chip->word_write_time--;
1268 if (!chip->word_write_time)
1269 chip->word_write_time++;
1270 }
1271 if (z > 1)
1272 chip->word_write_time++;
1273
1274 /* Done and happy. */
1275 chip->state = FL_STATUS;
1276
1277 /* check for lock bit */
1278 if (map_word_bitsset(map, status, CMD(0x02))) {
1279 /* clear status */
1280 map_write(map, CMD(0x50), adr);
1281 /* put back into read status register mode */
1282 map_write(map, CMD(0x70), adr);
1283 ret = -EROFS;
1284 }
1285
1286 xip_enable(map, chip, adr);
1287 out: put_chip(map, chip, adr);
1288 spin_unlock(chip->mutex);
1289
1290 return ret;
1291}
1292
1293
1294static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1295{
1296 struct map_info *map = mtd->priv;
1297 struct cfi_private *cfi = map->fldrv_priv;
1298 int ret = 0;
1299 int chipnum;
1300 unsigned long ofs;
1301
1302 *retlen = 0;
1303 if (!len)
1304 return 0;
1305
1306 chipnum = to >> cfi->chipshift;
1307 ofs = to - (chipnum << cfi->chipshift);
1308
1309 /* If it's not bus-aligned, do the first byte write */
1310 if (ofs & (map_bankwidth(map)-1)) {
1311 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1312 int gap = ofs - bus_ofs;
1313 int n;
1314 map_word datum;
1315
1316 n = min_t(int, len, map_bankwidth(map)-gap);
1317 datum = map_word_ff(map);
1318 datum = map_word_load_partial(map, datum, buf, gap, n);
1319
1320 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1321 bus_ofs, datum, FL_WRITING);
1da177e4
LT
1322 if (ret)
1323 return ret;
1324
1325 len -= n;
1326 ofs += n;
1327 buf += n;
1328 (*retlen) += n;
1329
1330 if (ofs >> cfi->chipshift) {
1331 chipnum ++;
1332 ofs = 0;
1333 if (chipnum == cfi->numchips)
1334 return 0;
1335 }
1336 }
1337
1338 while(len >= map_bankwidth(map)) {
1339 map_word datum = map_word_load(map, buf);
1340
1341 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1342 ofs, datum, FL_WRITING);
1da177e4
LT
1343 if (ret)
1344 return ret;
1345
1346 ofs += map_bankwidth(map);
1347 buf += map_bankwidth(map);
1348 (*retlen) += map_bankwidth(map);
1349 len -= map_bankwidth(map);
1350
1351 if (ofs >> cfi->chipshift) {
1352 chipnum ++;
1353 ofs = 0;
1354 if (chipnum == cfi->numchips)
1355 return 0;
1356 }
1357 }
1358
1359 if (len & (map_bankwidth(map)-1)) {
1360 map_word datum;
1361
1362 datum = map_word_ff(map);
1363 datum = map_word_load_partial(map, datum, buf, 0, len);
1364
1365 ret = do_write_oneword(map, &cfi->chips[chipnum],
f77814dd 1366 ofs, datum, FL_WRITING);
1da177e4
LT
1367 if (ret)
1368 return ret;
1369
1370 (*retlen) += len;
1371 }
1372
1373 return 0;
1374}
1375
1376
1377static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1378 unsigned long adr, const u_char *buf, int len)
1379{
1380 struct cfi_private *cfi = map->fldrv_priv;
1381 map_word status, status_OK;
1382 unsigned long cmd_adr, timeo;
1383 int wbufsize, z, ret=0, bytes, words;
1384
1385 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1386 adr += chip->start;
1387 cmd_adr = adr & ~(wbufsize-1);
1388
1389 /* Let's determine this according to the interleave only once */
1390 status_OK = CMD(0x80);
1391
1392 spin_lock(chip->mutex);
1393 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1394 if (ret) {
1395 spin_unlock(chip->mutex);
1396 return ret;
1397 }
1398
1399 XIP_INVAL_CACHED_RANGE(map, adr, len);
1400 ENABLE_VPP(map);
1401 xip_disable(map, chip, cmd_adr);
1402
1403