aae94e4250f60b90c1eeec8e3a534df5ca28445a
[linux-2.6-block.git] / drivers / mtd / spi-nor / core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
4  * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
5  *
6  * Copyright (C) 2005, Intec Automation Inc.
7  * Copyright (C) 2014, Freescale Semiconductor, Inc.
8  */
9
10 #include <linux/err.h>
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/mutex.h>
15 #include <linux/math64.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18
19 #include <linux/mtd/mtd.h>
20 #include <linux/of_platform.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/spi/flash.h>
23 #include <linux/mtd/spi-nor.h>
24
25 #include "core.h"
26
27 /* Define max times to check status register before we give up. */
28
29 /*
30  * For everything but full-chip erase; probably could be much smaller, but kept
31  * around for safety for now
32  */
33 #define DEFAULT_READY_WAIT_JIFFIES              (40UL * HZ)
34
35 /*
36  * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
37  * for larger flash
38  */
39 #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES       (40UL * HZ)
40
41 #define SPI_NOR_MAX_ADDR_WIDTH  4
42
43 #define JEDEC_MFR(info)        ((info)->id[0])
44
45 /**
46  * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data
47  *                           transfer
48  * @nor:        pointer to 'struct spi_nor'
49  * @op:         pointer to 'struct spi_mem_op' template for transfer
50  *
51  * If we have to use the bounce buffer, the data field in @op will be updated.
52  *
53  * Return: true if the bounce buffer is needed, false if not
54  */
55 static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op)
56 {
57         /* op->data.buf.in occupies the same memory as op->data.buf.out */
58         if (object_is_on_stack(op->data.buf.in) ||
59             !virt_addr_valid(op->data.buf.in)) {
60                 if (op->data.nbytes > nor->bouncebuf_size)
61                         op->data.nbytes = nor->bouncebuf_size;
62                 op->data.buf.in = nor->bouncebuf;
63                 return true;
64         }
65
66         return false;
67 }
68
69 /**
70  * spi_nor_spimem_exec_op() - execute a memory operation
71  * @nor:        pointer to 'struct spi_nor'
72  * @op:         pointer to 'struct spi_mem_op' template for transfer
73  *
74  * Return: 0 on success, -error otherwise.
75  */
76 static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
77 {
78         int error;
79
80         error = spi_mem_adjust_op_size(nor->spimem, op);
81         if (error)
82                 return error;
83
84         return spi_mem_exec_op(nor->spimem, op);
85 }
86
87 /**
88  * spi_nor_spimem_read_data() - read data from flash's memory region via
89  *                              spi-mem
90  * @nor:        pointer to 'struct spi_nor'
91  * @from:       offset to read from
92  * @len:        number of bytes to read
93  * @buf:        pointer to dst buffer
94  *
95  * Return: number of bytes read successfully, -errno otherwise
96  */
97 static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
98                                         size_t len, u8 *buf)
99 {
100         struct spi_mem_op op =
101                 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
102                            SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
103                            SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
104                            SPI_MEM_OP_DATA_IN(len, buf, 1));
105         bool usebouncebuf;
106         ssize_t nbytes;
107         int error;
108
109         /* get transfer protocols. */
110         op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
111         op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
112         op.dummy.buswidth = op.addr.buswidth;
113         op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
114
115         /* convert the dummy cycles to the number of bytes */
116         op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
117
118         usebouncebuf = spi_nor_spimem_bounce(nor, &op);
119
120         if (nor->dirmap.rdesc) {
121                 nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val,
122                                              op.data.nbytes, op.data.buf.in);
123         } else {
124                 error = spi_nor_spimem_exec_op(nor, &op);
125                 if (error)
126                         return error;
127                 nbytes = op.data.nbytes;
128         }
129
130         if (usebouncebuf && nbytes > 0)
131                 memcpy(buf, op.data.buf.in, nbytes);
132
133         return nbytes;
134 }
135
136 /**
137  * spi_nor_read_data() - read data from flash memory
138  * @nor:        pointer to 'struct spi_nor'
139  * @from:       offset to read from
140  * @len:        number of bytes to read
141  * @buf:        pointer to dst buffer
142  *
143  * Return: number of bytes read successfully, -errno otherwise
144  */
145 ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf)
146 {
147         if (nor->spimem)
148                 return spi_nor_spimem_read_data(nor, from, len, buf);
149
150         return nor->controller_ops->read(nor, from, len, buf);
151 }
152
153 /**
154  * spi_nor_spimem_write_data() - write data to flash memory via
155  *                               spi-mem
156  * @nor:        pointer to 'struct spi_nor'
157  * @to:         offset to write to
158  * @len:        number of bytes to write
159  * @buf:        pointer to src buffer
160  *
161  * Return: number of bytes written successfully, -errno otherwise
162  */
163 static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
164                                          size_t len, const u8 *buf)
165 {
166         struct spi_mem_op op =
167                 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
168                            SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
169                            SPI_MEM_OP_NO_DUMMY,
170                            SPI_MEM_OP_DATA_OUT(len, buf, 1));
171         ssize_t nbytes;
172         int error;
173
174         op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
175         op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
176         op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
177
178         if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
179                 op.addr.nbytes = 0;
180
181         if (spi_nor_spimem_bounce(nor, &op))
182                 memcpy(nor->bouncebuf, buf, op.data.nbytes);
183
184         if (nor->dirmap.wdesc) {
185                 nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val,
186                                               op.data.nbytes, op.data.buf.out);
187         } else {
188                 error = spi_nor_spimem_exec_op(nor, &op);
189                 if (error)
190                         return error;
191                 nbytes = op.data.nbytes;
192         }
193
194         return nbytes;
195 }
196
197 /**
198  * spi_nor_write_data() - write data to flash memory
199  * @nor:        pointer to 'struct spi_nor'
200  * @to:         offset to write to
201  * @len:        number of bytes to write
202  * @buf:        pointer to src buffer
203  *
204  * Return: number of bytes written successfully, -errno otherwise
205  */
206 ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
207                            const u8 *buf)
208 {
209         if (nor->spimem)
210                 return spi_nor_spimem_write_data(nor, to, len, buf);
211
212         return nor->controller_ops->write(nor, to, len, buf);
213 }
214
215 /**
216  * spi_nor_write_enable() - Set write enable latch with Write Enable command.
217  * @nor:        pointer to 'struct spi_nor'.
218  *
219  * Return: 0 on success, -errno otherwise.
220  */
221 int spi_nor_write_enable(struct spi_nor *nor)
222 {
223         int ret;
224
225         if (nor->spimem) {
226                 struct spi_mem_op op =
227                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
228                                    SPI_MEM_OP_NO_ADDR,
229                                    SPI_MEM_OP_NO_DUMMY,
230                                    SPI_MEM_OP_NO_DATA);
231
232                 ret = spi_mem_exec_op(nor->spimem, &op);
233         } else {
234                 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN,
235                                                      NULL, 0);
236         }
237
238         if (ret)
239                 dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
240
241         return ret;
242 }
243
244 /**
245  * spi_nor_write_disable() - Send Write Disable instruction to the chip.
246  * @nor:        pointer to 'struct spi_nor'.
247  *
248  * Return: 0 on success, -errno otherwise.
249  */
250 int spi_nor_write_disable(struct spi_nor *nor)
251 {
252         int ret;
253
254         if (nor->spimem) {
255                 struct spi_mem_op op =
256                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
257                                    SPI_MEM_OP_NO_ADDR,
258                                    SPI_MEM_OP_NO_DUMMY,
259                                    SPI_MEM_OP_NO_DATA);
260
261                 ret = spi_mem_exec_op(nor->spimem, &op);
262         } else {
263                 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRDI,
264                                                      NULL, 0);
265         }
266
267         if (ret)
268                 dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
269
270         return ret;
271 }
272
273 /**
274  * spi_nor_read_sr() - Read the Status Register.
275  * @nor:        pointer to 'struct spi_nor'.
276  * @sr:         pointer to a DMA-able buffer where the value of the
277  *              Status Register will be written.
278  *
279  * Return: 0 on success, -errno otherwise.
280  */
281 static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
282 {
283         int ret;
284
285         if (nor->spimem) {
286                 struct spi_mem_op op =
287                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
288                                    SPI_MEM_OP_NO_ADDR,
289                                    SPI_MEM_OP_NO_DUMMY,
290                                    SPI_MEM_OP_DATA_IN(1, sr, 1));
291
292                 ret = spi_mem_exec_op(nor->spimem, &op);
293         } else {
294                 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR,
295                                                     sr, 1);
296         }
297
298         if (ret)
299                 dev_dbg(nor->dev, "error %d reading SR\n", ret);
300
301         return ret;
302 }
303
304 /**
305  * spi_nor_read_fsr() - Read the Flag Status Register.
306  * @nor:        pointer to 'struct spi_nor'
307  * @fsr:        pointer to a DMA-able buffer where the value of the
308  *              Flag Status Register will be written.
309  *
310  * Return: 0 on success, -errno otherwise.
311  */
312 static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
313 {
314         int ret;
315
316         if (nor->spimem) {
317                 struct spi_mem_op op =
318                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
319                                    SPI_MEM_OP_NO_ADDR,
320                                    SPI_MEM_OP_NO_DUMMY,
321                                    SPI_MEM_OP_DATA_IN(1, fsr, 1));
322
323                 ret = spi_mem_exec_op(nor->spimem, &op);
324         } else {
325                 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDFSR,
326                                                     fsr, 1);
327         }
328
329         if (ret)
330                 dev_dbg(nor->dev, "error %d reading FSR\n", ret);
331
332         return ret;
333 }
334
335 /**
336  * spi_nor_read_cr() - Read the Configuration Register using the
337  * SPINOR_OP_RDCR (35h) command.
338  * @nor:        pointer to 'struct spi_nor'
339  * @cr:         pointer to a DMA-able buffer where the value of the
340  *              Configuration Register will be written.
341  *
342  * Return: 0 on success, -errno otherwise.
343  */
344 static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
345 {
346         int ret;
347
348         if (nor->spimem) {
349                 struct spi_mem_op op =
350                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
351                                    SPI_MEM_OP_NO_ADDR,
352                                    SPI_MEM_OP_NO_DUMMY,
353                                    SPI_MEM_OP_DATA_IN(1, cr, 1));
354
355                 ret = spi_mem_exec_op(nor->spimem, &op);
356         } else {
357                 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDCR, cr, 1);
358         }
359
360         if (ret)
361                 dev_dbg(nor->dev, "error %d reading CR\n", ret);
362
363         return ret;
364 }
365
366 /**
367  * spi_nor_set_4byte_addr_mode() - Enter/Exit 4-byte address mode.
368  * @nor:        pointer to 'struct spi_nor'.
369  * @enable:     true to enter the 4-byte address mode, false to exit the 4-byte
370  *              address mode.
371  *
372  * Return: 0 on success, -errno otherwise.
373  */
374 int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
375 {
376         int ret;
377
378         if (nor->spimem) {
379                 struct spi_mem_op op =
380                         SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
381                                                   SPINOR_OP_EN4B :
382                                                   SPINOR_OP_EX4B,
383                                                   1),
384                                   SPI_MEM_OP_NO_ADDR,
385                                   SPI_MEM_OP_NO_DUMMY,
386                                   SPI_MEM_OP_NO_DATA);
387
388                 ret = spi_mem_exec_op(nor->spimem, &op);
389         } else {
390                 ret = nor->controller_ops->write_reg(nor,
391                                                      enable ? SPINOR_OP_EN4B :
392                                                               SPINOR_OP_EX4B,
393                                                      NULL, 0);
394         }
395
396         if (ret)
397                 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
398
399         return ret;
400 }
401
402 /**
403  * st_micron_set_4byte_addr_mode() - Set 4-byte address mode for ST and Micron
404  * flashes.
405  * @nor:        pointer to 'struct spi_nor'.
406  * @enable:     true to enter the 4-byte address mode, false to exit the 4-byte
407  *              address mode.
408  *
409  * Return: 0 on success, -errno otherwise.
410  */
411 static int st_micron_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
412 {
413         int ret;
414
415         ret = spi_nor_write_enable(nor);
416         if (ret)
417                 return ret;
418
419         ret = spi_nor_set_4byte_addr_mode(nor, enable);
420         if (ret)
421                 return ret;
422
423         return spi_nor_write_disable(nor);
424 }
425
426 /**
427  * spansion_set_4byte_addr_mode() - Set 4-byte address mode for Spansion
428  * flashes.
429  * @nor:        pointer to 'struct spi_nor'.
430  * @enable:     true to enter the 4-byte address mode, false to exit the 4-byte
431  *              address mode.
432  *
433  * Return: 0 on success, -errno otherwise.
434  */
435 static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
436 {
437         int ret;
438
439         nor->bouncebuf[0] = enable << 7;
440
441         if (nor->spimem) {
442                 struct spi_mem_op op =
443                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 1),
444                                    SPI_MEM_OP_NO_ADDR,
445                                    SPI_MEM_OP_NO_DUMMY,
446                                    SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
447
448                 ret = spi_mem_exec_op(nor->spimem, &op);
449         } else {
450                 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_BRWR,
451                                                      nor->bouncebuf, 1);
452         }
453
454         if (ret)
455                 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
456
457         return ret;
458 }
459
460 /**
461  * spi_nor_write_ear() - Write Extended Address Register.
462  * @nor:        pointer to 'struct spi_nor'.
463  * @ear:        value to write to the Extended Address Register.
464  *
465  * Return: 0 on success, -errno otherwise.
466  */
467 int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
468 {
469         int ret;
470
471         nor->bouncebuf[0] = ear;
472
473         if (nor->spimem) {
474                 struct spi_mem_op op =
475                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 1),
476                                    SPI_MEM_OP_NO_ADDR,
477                                    SPI_MEM_OP_NO_DUMMY,
478                                    SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
479
480                 ret = spi_mem_exec_op(nor->spimem, &op);
481         } else {
482                 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREAR,
483                                                      nor->bouncebuf, 1);
484         }
485
486         if (ret)
487                 dev_dbg(nor->dev, "error %d writing EAR\n", ret);
488
489         return ret;
490 }
491
492 /**
493  * winbond_set_4byte_addr_mode() - Set 4-byte address mode for Winbond flashes.
494  * @nor:        pointer to 'struct spi_nor'.
495  * @enable:     true to enter the 4-byte address mode, false to exit the 4-byte
496  *              address mode.
497  *
498  * Return: 0 on success, -errno otherwise.
499  */
500 static int winbond_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
501 {
502         int ret;
503
504         ret = spi_nor_set_4byte_addr_mode(nor, enable);
505         if (ret || enable)
506                 return ret;
507
508         /*
509          * On Winbond W25Q256FV, leaving 4byte mode causes the Extended Address
510          * Register to be set to 1, so all 3-byte-address reads come from the
511          * second 16M. We must clear the register to enable normal behavior.
512          */
513         ret = spi_nor_write_enable(nor);
514         if (ret)
515                 return ret;
516
517         ret = spi_nor_write_ear(nor, 0);
518         if (ret)
519                 return ret;
520
521         return spi_nor_write_disable(nor);
522 }
523
524 /**
525  * spi_nor_xread_sr() - Read the Status Register on S3AN flashes.
526  * @nor:        pointer to 'struct spi_nor'.
527  * @sr:         pointer to a DMA-able buffer where the value of the
528  *              Status Register will be written.
529  *
530  * Return: 0 on success, -errno otherwise.
531  */
532 int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
533 {
534         int ret;
535
536         if (nor->spimem) {
537                 struct spi_mem_op op =
538                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1),
539                                    SPI_MEM_OP_NO_ADDR,
540                                    SPI_MEM_OP_NO_DUMMY,
541                                    SPI_MEM_OP_DATA_IN(1, sr, 1));
542
543                 ret = spi_mem_exec_op(nor->spimem, &op);
544         } else {
545                 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_XRDSR,
546                                                     sr, 1);
547         }
548
549         if (ret)
550                 dev_dbg(nor->dev, "error %d reading XRDSR\n", ret);
551
552         return ret;
553 }
554
555 /**
556  * spi_nor_xsr_ready() - Query the Status Register of the S3AN flash to see if
557  * the flash is ready for new commands.
558  * @nor:        pointer to 'struct spi_nor'.
559  *
560  * Return: 0 on success, -errno otherwise.
561  */
562 static int spi_nor_xsr_ready(struct spi_nor *nor)
563 {
564         int ret;
565
566         ret = spi_nor_xread_sr(nor, nor->bouncebuf);
567         if (ret)
568                 return ret;
569
570         return !!(nor->bouncebuf[0] & XSR_RDY);
571 }
572
573 /**
574  * spi_nor_clear_sr() - Clear the Status Register.
575  * @nor:        pointer to 'struct spi_nor'.
576  */
577 static void spi_nor_clear_sr(struct spi_nor *nor)
578 {
579         int ret;
580
581         if (nor->spimem) {
582                 struct spi_mem_op op =
583                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1),
584                                    SPI_MEM_OP_NO_ADDR,
585                                    SPI_MEM_OP_NO_DUMMY,
586                                    SPI_MEM_OP_NO_DATA);
587
588                 ret = spi_mem_exec_op(nor->spimem, &op);
589         } else {
590                 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLSR,
591                                                      NULL, 0);
592         }
593
594         if (ret)
595                 dev_dbg(nor->dev, "error %d clearing SR\n", ret);
596 }
597
598 /**
599  * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
600  * for new commands.
601  * @nor:        pointer to 'struct spi_nor'.
602  *
603  * Return: 0 on success, -errno otherwise.
604  */
605 static int spi_nor_sr_ready(struct spi_nor *nor)
606 {
607         int ret = spi_nor_read_sr(nor, nor->bouncebuf);
608
609         if (ret)
610                 return ret;
611
612         if (nor->flags & SNOR_F_USE_CLSR &&
613             nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
614                 if (nor->bouncebuf[0] & SR_E_ERR)
615                         dev_err(nor->dev, "Erase Error occurred\n");
616                 else
617                         dev_err(nor->dev, "Programming Error occurred\n");
618
619                 spi_nor_clear_sr(nor);
620                 return -EIO;
621         }
622
623         return !(nor->bouncebuf[0] & SR_WIP);
624 }
625
626 /**
627  * spi_nor_clear_fsr() - Clear the Flag Status Register.
628  * @nor:        pointer to 'struct spi_nor'.
629  */
630 static void spi_nor_clear_fsr(struct spi_nor *nor)
631 {
632         int ret;
633
634         if (nor->spimem) {
635                 struct spi_mem_op op =
636                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1),
637                                    SPI_MEM_OP_NO_ADDR,
638                                    SPI_MEM_OP_NO_DUMMY,
639                                    SPI_MEM_OP_NO_DATA);
640
641                 ret = spi_mem_exec_op(nor->spimem, &op);
642         } else {
643                 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLFSR,
644                                                      NULL, 0);
645         }
646
647         if (ret)
648                 dev_dbg(nor->dev, "error %d clearing FSR\n", ret);
649 }
650
651 /**
652  * spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is
653  * ready for new commands.
654  * @nor:        pointer to 'struct spi_nor'.
655  *
656  * Return: 0 on success, -errno otherwise.
657  */
658 static int spi_nor_fsr_ready(struct spi_nor *nor)
659 {
660         int ret = spi_nor_read_fsr(nor, nor->bouncebuf);
661
662         if (ret)
663                 return ret;
664
665         if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
666                 if (nor->bouncebuf[0] & FSR_E_ERR)
667                         dev_err(nor->dev, "Erase operation failed.\n");
668                 else
669                         dev_err(nor->dev, "Program operation failed.\n");
670
671                 if (nor->bouncebuf[0] & FSR_PT_ERR)
672                         dev_err(nor->dev,
673                         "Attempted to modify a protected sector.\n");
674
675                 spi_nor_clear_fsr(nor);
676                 return -EIO;
677         }
678
679         return nor->bouncebuf[0] & FSR_READY;
680 }
681
682 /**
683  * spi_nor_ready() - Query the flash to see if it is ready for new commands.
684  * @nor:        pointer to 'struct spi_nor'.
685  *
686  * Return: 0 on success, -errno otherwise.
687  */
688 static int spi_nor_ready(struct spi_nor *nor)
689 {
690         int sr, fsr;
691
692         if (nor->flags & SNOR_F_READY_XSR_RDY)
693                 sr = spi_nor_xsr_ready(nor);
694         else
695                 sr = spi_nor_sr_ready(nor);
696         if (sr < 0)
697                 return sr;
698         fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
699         if (fsr < 0)
700                 return fsr;
701         return sr && fsr;
702 }
703
704 /**
705  * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
706  * Status Register until ready, or timeout occurs.
707  * @nor:                pointer to "struct spi_nor".
708  * @timeout_jiffies:    jiffies to wait until timeout.
709  *
710  * Return: 0 on success, -errno otherwise.
711  */
712 static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
713                                                 unsigned long timeout_jiffies)
714 {
715         unsigned long deadline;
716         int timeout = 0, ret;
717
718         deadline = jiffies + timeout_jiffies;
719
720         while (!timeout) {
721                 if (time_after_eq(jiffies, deadline))
722                         timeout = 1;
723
724                 ret = spi_nor_ready(nor);
725                 if (ret < 0)
726                         return ret;
727                 if (ret)
728                         return 0;
729
730                 cond_resched();
731         }
732
733         dev_dbg(nor->dev, "flash operation timed out\n");
734
735         return -ETIMEDOUT;
736 }
737
738 /**
739  * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
740  * flash to be ready, or timeout occurs.
741  * @nor:        pointer to "struct spi_nor".
742  *
743  * Return: 0 on success, -errno otherwise.
744  */
745 int spi_nor_wait_till_ready(struct spi_nor *nor)
746 {
747         return spi_nor_wait_till_ready_with_timeout(nor,
748                                                     DEFAULT_READY_WAIT_JIFFIES);
749 }
750
751 /**
752  * spi_nor_write_sr() - Write the Status Register.
753  * @nor:        pointer to 'struct spi_nor'.
754  * @sr:         pointer to DMA-able buffer to write to the Status Register.
755  * @len:        number of bytes to write to the Status Register.
756  *
757  * Return: 0 on success, -errno otherwise.
758  */
759 static int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
760 {
761         int ret;
762
763         ret = spi_nor_write_enable(nor);
764         if (ret)
765                 return ret;
766
767         if (nor->spimem) {
768                 struct spi_mem_op op =
769                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
770                                    SPI_MEM_OP_NO_ADDR,
771                                    SPI_MEM_OP_NO_DUMMY,
772                                    SPI_MEM_OP_DATA_OUT(len, sr, 1));
773
774                 ret = spi_mem_exec_op(nor->spimem, &op);
775         } else {
776                 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR,
777                                                      sr, len);
778         }
779
780         if (ret) {
781                 dev_dbg(nor->dev, "error %d writing SR\n", ret);
782                 return ret;
783         }
784
785         return spi_nor_wait_till_ready(nor);
786 }
787
788 /**
789  * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
790  * ensure that the byte written match the received value.
791  * @nor:        pointer to a 'struct spi_nor'.
792  * @sr1:        byte value to be written to the Status Register.
793  *
794  * Return: 0 on success, -errno otherwise.
795  */
796 static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
797 {
798         int ret;
799
800         nor->bouncebuf[0] = sr1;
801
802         ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
803         if (ret)
804                 return ret;
805
806         ret = spi_nor_read_sr(nor, nor->bouncebuf);
807         if (ret)
808                 return ret;
809
810         if (nor->bouncebuf[0] != sr1) {
811                 dev_dbg(nor->dev, "SR1: read back test failed\n");
812                 return -EIO;
813         }
814
815         return 0;
816 }
817
818 /**
819  * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
820  * Status Register 2 in one shot. Ensure that the byte written in the Status
821  * Register 1 match the received value, and that the 16-bit Write did not
822  * affect what was already in the Status Register 2.
823  * @nor:        pointer to a 'struct spi_nor'.
824  * @sr1:        byte value to be written to the Status Register 1.
825  *
826  * Return: 0 on success, -errno otherwise.
827  */
828 static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
829 {
830         int ret;
831         u8 *sr_cr = nor->bouncebuf;
832         u8 cr_written;
833
834         /* Make sure we don't overwrite the contents of Status Register 2. */
835         if (!(nor->flags & SNOR_F_NO_READ_CR)) {
836                 ret = spi_nor_read_cr(nor, &sr_cr[1]);
837                 if (ret)
838                         return ret;
839         } else if (nor->params.quad_enable) {
840                 /*
841                  * If the Status Register 2 Read command (35h) is not
842                  * supported, we should at least be sure we don't
843                  * change the value of the SR2 Quad Enable bit.
844                  *
845                  * We can safely assume that when the Quad Enable method is
846                  * set, the value of the QE bit is one, as a consequence of the
847                  * nor->params.quad_enable() call.
848                  *
849                  * We can safely assume that the Quad Enable bit is present in
850                  * the Status Register 2 at BIT(1). According to the JESD216
851                  * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit
852                  * Write Status (01h) command is available just for the cases
853                  * in which the QE bit is described in SR2 at BIT(1).
854                  */
855                 sr_cr[1] = SR2_QUAD_EN_BIT1;
856         } else {
857                 sr_cr[1] = 0;
858         }
859
860         sr_cr[0] = sr1;
861
862         ret = spi_nor_write_sr(nor, sr_cr, 2);
863         if (ret)
864                 return ret;
865
866         if (nor->flags & SNOR_F_NO_READ_CR)
867                 return 0;
868
869         cr_written = sr_cr[1];
870
871         ret = spi_nor_read_cr(nor, &sr_cr[1]);
872         if (ret)
873                 return ret;
874
875         if (cr_written != sr_cr[1]) {
876                 dev_dbg(nor->dev, "CR: read back test failed\n");
877                 return -EIO;
878         }
879
880         return 0;
881 }
882
883 /**
884  * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
885  * Configuration Register in one shot. Ensure that the byte written in the
886  * Configuration Register match the received value, and that the 16-bit Write
887  * did not affect what was already in the Status Register 1.
888  * @nor:        pointer to a 'struct spi_nor'.
889  * @cr:         byte value to be written to the Configuration Register.
890  *
891  * Return: 0 on success, -errno otherwise.
892  */
893 static int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
894 {
895         int ret;
896         u8 *sr_cr = nor->bouncebuf;
897         u8 sr_written;
898
899         /* Keep the current value of the Status Register 1. */
900         ret = spi_nor_read_sr(nor, sr_cr);
901         if (ret)
902                 return ret;
903
904         sr_cr[1] = cr;
905
906         ret = spi_nor_write_sr(nor, sr_cr, 2);
907         if (ret)
908                 return ret;
909
910         sr_written = sr_cr[0];
911
912         ret = spi_nor_read_sr(nor, sr_cr);
913         if (ret)
914                 return ret;
915
916         if (sr_written != sr_cr[0]) {
917                 dev_dbg(nor->dev, "SR: Read back test failed\n");
918                 return -EIO;
919         }
920
921         if (nor->flags & SNOR_F_NO_READ_CR)
922                 return 0;
923
924         ret = spi_nor_read_cr(nor, &sr_cr[1]);
925         if (ret)
926                 return ret;
927
928         if (cr != sr_cr[1]) {
929                 dev_dbg(nor->dev, "CR: read back test failed\n");
930                 return -EIO;
931         }
932
933         return 0;
934 }
935
936 /**
937  * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
938  * the byte written match the received value without affecting other bits in the
939  * Status Register 1 and 2.
940  * @nor:        pointer to a 'struct spi_nor'.
941  * @sr1:        byte value to be written to the Status Register.
942  *
943  * Return: 0 on success, -errno otherwise.
944  */
945 static int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
946 {
947         if (nor->flags & SNOR_F_HAS_16BIT_SR)
948                 return spi_nor_write_16bit_sr_and_check(nor, sr1);
949
950         return spi_nor_write_sr1_and_check(nor, sr1);
951 }
952
953 /**
954  * spi_nor_write_sr2() - Write the Status Register 2 using the
955  * SPINOR_OP_WRSR2 (3eh) command.
956  * @nor:        pointer to 'struct spi_nor'.
957  * @sr2:        pointer to DMA-able buffer to write to the Status Register 2.
958  *
959  * Return: 0 on success, -errno otherwise.
960  */
961 static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
962 {
963         int ret;
964
965         ret = spi_nor_write_enable(nor);
966         if (ret)
967                 return ret;
968
969         if (nor->spimem) {
970                 struct spi_mem_op op =
971                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
972                                    SPI_MEM_OP_NO_ADDR,
973                                    SPI_MEM_OP_NO_DUMMY,
974                                    SPI_MEM_OP_DATA_OUT(1, sr2, 1));
975
976                 ret = spi_mem_exec_op(nor->spimem, &op);
977         } else {
978                 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR2,
979                                                      sr2, 1);
980         }
981
982         if (ret) {
983                 dev_dbg(nor->dev, "error %d writing SR2\n", ret);
984                 return ret;
985         }
986
987         return spi_nor_wait_till_ready(nor);
988 }
989
990 /**
991  * spi_nor_read_sr2() - Read the Status Register 2 using the
992  * SPINOR_OP_RDSR2 (3fh) command.
993  * @nor:        pointer to 'struct spi_nor'.
994  * @sr2:        pointer to DMA-able buffer where the value of the
995  *              Status Register 2 will be written.
996  *
997  * Return: 0 on success, -errno otherwise.
998  */
999 static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
1000 {
1001         int ret;
1002
1003         if (nor->spimem) {
1004                 struct spi_mem_op op =
1005                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
1006                                    SPI_MEM_OP_NO_ADDR,
1007                                    SPI_MEM_OP_NO_DUMMY,
1008                                    SPI_MEM_OP_DATA_IN(1, sr2, 1));
1009
1010                 ret = spi_mem_exec_op(nor->spimem, &op);
1011         } else {
1012                 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR2,
1013                                                     sr2, 1);
1014         }
1015
1016         if (ret)
1017                 dev_dbg(nor->dev, "error %d reading SR2\n", ret);
1018
1019         return ret;
1020 }
1021
1022 /**
1023  * spi_nor_erase_chip() - Erase the entire flash memory.
1024  * @nor:        pointer to 'struct spi_nor'.
1025  *
1026  * Return: 0 on success, -errno otherwise.
1027  */
1028 static int spi_nor_erase_chip(struct spi_nor *nor)
1029 {
1030         int ret;
1031
1032         dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
1033
1034         if (nor->spimem) {
1035                 struct spi_mem_op op =
1036                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 1),
1037                                    SPI_MEM_OP_NO_ADDR,
1038                                    SPI_MEM_OP_NO_DUMMY,
1039                                    SPI_MEM_OP_NO_DATA);
1040
1041                 ret = spi_mem_exec_op(nor->spimem, &op);
1042         } else {
1043                 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CHIP_ERASE,
1044                                                      NULL, 0);
1045         }
1046
1047         if (ret)
1048                 dev_dbg(nor->dev, "error %d erasing chip\n", ret);
1049
1050         return ret;
1051 }
1052
1053 static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
1054 {
1055         size_t i;
1056
1057         for (i = 0; i < size; i++)
1058                 if (table[i][0] == opcode)
1059                         return table[i][1];
1060
1061         /* No conversion found, keep input op code. */
1062         return opcode;
1063 }
1064
1065 u8 spi_nor_convert_3to4_read(u8 opcode)
1066 {
1067         static const u8 spi_nor_3to4_read[][2] = {
1068                 { SPINOR_OP_READ,       SPINOR_OP_READ_4B },
1069                 { SPINOR_OP_READ_FAST,  SPINOR_OP_READ_FAST_4B },
1070                 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
1071                 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
1072                 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
1073                 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
1074                 { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
1075                 { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
1076
1077                 { SPINOR_OP_READ_1_1_1_DTR,     SPINOR_OP_READ_1_1_1_DTR_4B },
1078                 { SPINOR_OP_READ_1_2_2_DTR,     SPINOR_OP_READ_1_2_2_DTR_4B },
1079                 { SPINOR_OP_READ_1_4_4_DTR,     SPINOR_OP_READ_1_4_4_DTR_4B },
1080         };
1081
1082         return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
1083                                       ARRAY_SIZE(spi_nor_3to4_read));
1084 }
1085
1086 static u8 spi_nor_convert_3to4_program(u8 opcode)
1087 {
1088         static const u8 spi_nor_3to4_program[][2] = {
1089                 { SPINOR_OP_PP,         SPINOR_OP_PP_4B },
1090                 { SPINOR_OP_PP_1_1_4,   SPINOR_OP_PP_1_1_4_4B },
1091                 { SPINOR_OP_PP_1_4_4,   SPINOR_OP_PP_1_4_4_4B },
1092                 { SPINOR_OP_PP_1_1_8,   SPINOR_OP_PP_1_1_8_4B },
1093                 { SPINOR_OP_PP_1_8_8,   SPINOR_OP_PP_1_8_8_4B },
1094         };
1095
1096         return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
1097                                       ARRAY_SIZE(spi_nor_3to4_program));
1098 }
1099
1100 static u8 spi_nor_convert_3to4_erase(u8 opcode)
1101 {
1102         static const u8 spi_nor_3to4_erase[][2] = {
1103                 { SPINOR_OP_BE_4K,      SPINOR_OP_BE_4K_4B },
1104                 { SPINOR_OP_BE_32K,     SPINOR_OP_BE_32K_4B },
1105                 { SPINOR_OP_SE,         SPINOR_OP_SE_4B },
1106         };
1107
1108         return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
1109                                       ARRAY_SIZE(spi_nor_3to4_erase));
1110 }
1111
1112 static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
1113 {
1114         nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
1115         nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
1116         nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
1117
1118         if (!spi_nor_has_uniform_erase(nor)) {
1119                 struct spi_nor_erase_map *map = &nor->params.erase_map;
1120                 struct spi_nor_erase_type *erase;
1121                 int i;
1122
1123                 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
1124                         erase = &map->erase_type[i];
1125                         erase->opcode =
1126                                 spi_nor_convert_3to4_erase(erase->opcode);
1127                 }
1128         }
1129 }
1130
1131 int spi_nor_lock_and_prep(struct spi_nor *nor)
1132 {
1133         int ret = 0;
1134
1135         mutex_lock(&nor->lock);
1136
1137         if (nor->controller_ops &&  nor->controller_ops->prepare) {
1138                 ret = nor->controller_ops->prepare(nor);
1139                 if (ret) {
1140                         mutex_unlock(&nor->lock);
1141                         return ret;
1142                 }
1143         }
1144         return ret;
1145 }
1146
1147 void spi_nor_unlock_and_unprep(struct spi_nor *nor)
1148 {
1149         if (nor->controller_ops && nor->controller_ops->unprepare)
1150                 nor->controller_ops->unprepare(nor);
1151         mutex_unlock(&nor->lock);
1152 }
1153
1154 /*
1155  * This code converts an address to the Default Address Mode, that has non
1156  * power of two page sizes. We must support this mode because it is the default
1157  * mode supported by Xilinx tools, it can access the whole flash area and
1158  * changing over to the Power-of-two mode is irreversible and corrupts the
1159  * original data.
1160  * Addr can safely be unsigned int, the biggest S3AN device is smaller than
1161  * 4 MiB.
1162  */
1163 static u32 s3an_convert_addr(struct spi_nor *nor, u32 addr)
1164 {
1165         u32 offset, page;
1166
1167         offset = addr % nor->page_size;
1168         page = addr / nor->page_size;
1169         page <<= (nor->page_size > 512) ? 10 : 9;
1170
1171         return page | offset;
1172 }
1173
1174 static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
1175 {
1176         if (!nor->params.convert_addr)
1177                 return addr;
1178
1179         return nor->params.convert_addr(nor, addr);
1180 }
1181
1182 /*
1183  * Initiate the erasure of a single sector
1184  */
1185 static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
1186 {
1187         int i;
1188
1189         addr = spi_nor_convert_addr(nor, addr);
1190
1191         if (nor->spimem) {
1192                 struct spi_mem_op op =
1193                         SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
1194                                    SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
1195                                    SPI_MEM_OP_NO_DUMMY,
1196                                    SPI_MEM_OP_NO_DATA);
1197
1198                 return spi_mem_exec_op(nor->spimem, &op);
1199         } else if (nor->controller_ops->erase) {
1200                 return nor->controller_ops->erase(nor, addr);
1201         }
1202
1203         /*
1204          * Default implementation, if driver doesn't have a specialized HW
1205          * control
1206          */
1207         for (i = nor->addr_width - 1; i >= 0; i--) {
1208                 nor->bouncebuf[i] = addr & 0xff;
1209                 addr >>= 8;
1210         }
1211
1212         return nor->controller_ops->write_reg(nor, nor->erase_opcode,
1213                                               nor->bouncebuf, nor->addr_width);
1214 }
1215
1216 /**
1217  * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
1218  * @erase:      pointer to a structure that describes a SPI NOR erase type
1219  * @dividend:   dividend value
1220  * @remainder:  pointer to u32 remainder (will be updated)
1221  *
1222  * Return: the result of the division
1223  */
1224 static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
1225                                      u64 dividend, u32 *remainder)
1226 {
1227         /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
1228         *remainder = (u32)dividend & erase->size_mask;
1229         return dividend >> erase->size_shift;
1230 }
1231
1232 /**
1233  * spi_nor_find_best_erase_type() - find the best erase type for the given
1234  *                                  offset in the serial flash memory and the
1235  *                                  number of bytes to erase. The region in
1236  *                                  which the address fits is expected to be
1237  *                                  provided.
1238  * @map:        the erase map of the SPI NOR
1239  * @region:     pointer to a structure that describes a SPI NOR erase region
1240  * @addr:       offset in the serial flash memory
1241  * @len:        number of bytes to erase
1242  *
1243  * Return: a pointer to the best fitted erase type, NULL otherwise.
1244  */
1245 static const struct spi_nor_erase_type *
1246 spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
1247                              const struct spi_nor_erase_region *region,
1248                              u64 addr, u32 len)
1249 {
1250         const struct spi_nor_erase_type *erase;
1251         u32 rem;
1252         int i;
1253         u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
1254
1255         /*
1256          * Erase types are ordered by size, with the smallest erase type at
1257          * index 0.
1258          */
1259         for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
1260                 /* Does the erase region support the tested erase type? */
1261                 if (!(erase_mask & BIT(i)))
1262                         continue;
1263
1264                 erase = &map->erase_type[i];
1265
1266                 /* Don't erase more than what the user has asked for. */
1267                 if (erase->size > len)
1268                         continue;
1269
1270                 /* Alignment is not mandatory for overlaid regions */
1271                 if (region->offset & SNOR_OVERLAID_REGION)
1272                         return erase;
1273
1274                 spi_nor_div_by_erase_size(erase, addr, &rem);
1275                 if (rem)
1276                         continue;
1277                 else
1278                         return erase;
1279         }
1280
1281         return NULL;
1282 }
1283
1284 /**
1285  * spi_nor_region_next() - get the next spi nor region
1286  * @region:     pointer to a structure that describes a SPI NOR erase region
1287  *
1288  * Return: the next spi nor region or NULL if last region.
1289  */
1290 struct spi_nor_erase_region *
1291 spi_nor_region_next(struct spi_nor_erase_region *region)
1292 {
1293         if (spi_nor_region_is_last(region))
1294                 return NULL;
1295         region++;
1296         return region;
1297 }
1298
1299 /**
1300  * spi_nor_find_erase_region() - find the region of the serial flash memory in
1301  *                               which the offset fits
1302  * @map:        the erase map of the SPI NOR
1303  * @addr:       offset in the serial flash memory
1304  *
1305  * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
1306  *         otherwise.
1307  */
1308 static struct spi_nor_erase_region *
1309 spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
1310 {
1311         struct spi_nor_erase_region *region = map->regions;
1312         u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1313         u64 region_end = region_start + region->size;
1314
1315         while (addr < region_start || addr >= region_end) {
1316                 region = spi_nor_region_next(region);
1317                 if (!region)
1318                         return ERR_PTR(-EINVAL);
1319
1320                 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1321                 region_end = region_start + region->size;
1322         }
1323
1324         return region;
1325 }
1326
1327 /**
1328  * spi_nor_init_erase_cmd() - initialize an erase command
1329  * @region:     pointer to a structure that describes a SPI NOR erase region
1330  * @erase:      pointer to a structure that describes a SPI NOR erase type
1331  *
1332  * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
1333  *         otherwise.
1334  */
1335 static struct spi_nor_erase_command *
1336 spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
1337                        const struct spi_nor_erase_type *erase)
1338 {
1339         struct spi_nor_erase_command *cmd;
1340
1341         cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
1342         if (!cmd)
1343                 return ERR_PTR(-ENOMEM);
1344
1345         INIT_LIST_HEAD(&cmd->list);
1346         cmd->opcode = erase->opcode;
1347         cmd->count = 1;
1348
1349         if (region->offset & SNOR_OVERLAID_REGION)
1350                 cmd->size = region->size;
1351         else
1352                 cmd->size = erase->size;
1353
1354         return cmd;
1355 }
1356
1357 /**
1358  * spi_nor_destroy_erase_cmd_list() - destroy erase command list
1359  * @erase_list: list of erase commands
1360  */
1361 static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
1362 {
1363         struct spi_nor_erase_command *cmd, *next;
1364
1365         list_for_each_entry_safe(cmd, next, erase_list, list) {
1366                 list_del(&cmd->list);
1367                 kfree(cmd);
1368         }
1369 }
1370
1371 /**
1372  * spi_nor_init_erase_cmd_list() - initialize erase command list
1373  * @nor:        pointer to a 'struct spi_nor'
1374  * @erase_list: list of erase commands to be executed once we validate that the
1375  *              erase can be performed
1376  * @addr:       offset in the serial flash memory
1377  * @len:        number of bytes to erase
1378  *
1379  * Builds the list of best fitted erase commands and verifies if the erase can
1380  * be performed.
1381  *
1382  * Return: 0 on success, -errno otherwise.
1383  */
1384 static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
1385                                        struct list_head *erase_list,
1386                                        u64 addr, u32 len)
1387 {
1388         const struct spi_nor_erase_map *map = &nor->params.erase_map;
1389         const struct spi_nor_erase_type *erase, *prev_erase = NULL;
1390         struct spi_nor_erase_region *region;
1391         struct spi_nor_erase_command *cmd = NULL;
1392         u64 region_end;
1393         int ret = -EINVAL;
1394
1395         region = spi_nor_find_erase_region(map, addr);
1396         if (IS_ERR(region))
1397                 return PTR_ERR(region);
1398
1399         region_end = spi_nor_region_end(region);
1400
1401         while (len) {
1402                 erase = spi_nor_find_best_erase_type(map, region, addr, len);
1403                 if (!erase)
1404                         goto destroy_erase_cmd_list;
1405
1406                 if (prev_erase != erase ||
1407                     region->offset & SNOR_OVERLAID_REGION) {
1408                         cmd = spi_nor_init_erase_cmd(region, erase);
1409                         if (IS_ERR(cmd)) {
1410                                 ret = PTR_ERR(cmd);
1411                                 goto destroy_erase_cmd_list;
1412                         }
1413
1414                         list_add_tail(&cmd->list, erase_list);
1415                 } else {
1416                         cmd->count++;
1417                 }
1418
1419                 addr += cmd->size;
1420                 len -= cmd->size;
1421
1422                 if (len && addr >= region_end) {
1423                         region = spi_nor_region_next(region);
1424                         if (!region)
1425                                 goto destroy_erase_cmd_list;
1426                         region_end = spi_nor_region_end(region);
1427                 }
1428
1429                 prev_erase = erase;
1430         }
1431
1432         return 0;
1433
1434 destroy_erase_cmd_list:
1435         spi_nor_destroy_erase_cmd_list(erase_list);
1436         return ret;
1437 }
1438
1439 /**
1440  * spi_nor_erase_multi_sectors() - perform a non-uniform erase
1441  * @nor:        pointer to a 'struct spi_nor'
1442  * @addr:       offset in the serial flash memory
1443  * @len:        number of bytes to erase
1444  *
1445  * Build a list of best fitted erase commands and execute it once we validate
1446  * that the erase can be performed.
1447  *
1448  * Return: 0 on success, -errno otherwise.
1449  */
1450 static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
1451 {
1452         LIST_HEAD(erase_list);
1453         struct spi_nor_erase_command *cmd, *next;
1454         int ret;
1455
1456         ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
1457         if (ret)
1458                 return ret;
1459
1460         list_for_each_entry_safe(cmd, next, &erase_list, list) {
1461                 nor->erase_opcode = cmd->opcode;
1462                 while (cmd->count) {
1463                         ret = spi_nor_write_enable(nor);
1464                         if (ret)
1465                                 goto destroy_erase_cmd_list;
1466
1467                         ret = spi_nor_erase_sector(nor, addr);
1468                         if (ret)
1469                                 goto destroy_erase_cmd_list;
1470
1471                         addr += cmd->size;
1472                         cmd->count--;
1473
1474                         ret = spi_nor_wait_till_ready(nor);
1475                         if (ret)
1476                                 goto destroy_erase_cmd_list;
1477                 }
1478                 list_del(&cmd->list);
1479                 kfree(cmd);
1480         }
1481
1482         return 0;
1483
1484 destroy_erase_cmd_list:
1485         spi_nor_destroy_erase_cmd_list(&erase_list);
1486         return ret;
1487 }
1488
1489 /*
1490  * Erase an address range on the nor chip.  The address range may extend
1491  * one or more erase sectors.  Return an error is there is a problem erasing.
1492  */
1493 static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
1494 {
1495         struct spi_nor *nor = mtd_to_spi_nor(mtd);
1496         u32 addr, len;
1497         uint32_t rem;
1498         int ret;
1499
1500         dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
1501                         (long long)instr->len);
1502
1503         if (spi_nor_has_uniform_erase(nor)) {
1504                 div_u64_rem(instr->len, mtd->erasesize, &rem);
1505                 if (rem)
1506                         return -EINVAL;
1507         }
1508
1509         addr = instr->addr;
1510         len = instr->len;
1511
1512         ret = spi_nor_lock_and_prep(nor);
1513         if (ret)
1514                 return ret;
1515
1516         /* whole-chip erase? */
1517         if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
1518                 unsigned long timeout;
1519
1520                 ret = spi_nor_write_enable(nor);
1521                 if (ret)
1522                         goto erase_err;
1523
1524                 ret = spi_nor_erase_chip(nor);
1525                 if (ret)
1526                         goto erase_err;
1527
1528                 /*
1529                  * Scale the timeout linearly with the size of the flash, with
1530                  * a minimum calibrated to an old 2MB flash. We could try to
1531                  * pull these from CFI/SFDP, but these values should be good
1532                  * enough for now.
1533                  */
1534                 timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
1535                               CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
1536                               (unsigned long)(mtd->size / SZ_2M));
1537                 ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
1538                 if (ret)
1539                         goto erase_err;
1540
1541         /* REVISIT in some cases we could speed up erasing large regions
1542          * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K.  We may have set up
1543          * to use "small sector erase", but that's not always optimal.
1544          */
1545
1546         /* "sector"-at-a-time erase */
1547         } else if (spi_nor_has_uniform_erase(nor)) {
1548                 while (len) {
1549                         ret = spi_nor_write_enable(nor);
1550                         if (ret)
1551                                 goto erase_err;
1552
1553                         ret = spi_nor_erase_sector(nor, addr);
1554                         if (ret)
1555                                 goto erase_err;
1556
1557                         addr += mtd->erasesize;
1558                         len -= mtd->erasesize;
1559
1560                         ret = spi_nor_wait_till_ready(nor);
1561                         if (ret)
1562                                 goto erase_err;
1563                 }
1564
1565         /* erase multiple sectors */
1566         } else {
1567                 ret = spi_nor_erase_multi_sectors(nor, addr, len);
1568                 if (ret)
1569                         goto erase_err;
1570         }
1571
1572         ret = spi_nor_write_disable(nor);
1573
1574 erase_err:
1575         spi_nor_unlock_and_unprep(nor);
1576
1577         return ret;
1578 }
1579
1580 static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
1581                                         uint64_t *len)
1582 {
1583         struct mtd_info *mtd = &nor->mtd;
1584         u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1585         u8 tb_mask = SR_TB_BIT5;
1586         int pow;
1587
1588         if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
1589                 tb_mask = SR_TB_BIT6;
1590
1591         if (!(sr & mask)) {
1592                 /* No protection */
1593                 *ofs = 0;
1594                 *len = 0;
1595         } else {
1596                 pow = ((sr & mask) ^ mask) >> SR_BP_SHIFT;
1597                 *len = mtd->size >> pow;
1598                 if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask)
1599                         *ofs = 0;
1600                 else
1601                         *ofs = mtd->size - *len;
1602         }
1603 }
1604
1605 /*
1606  * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
1607  * @locked is false); 0 otherwise
1608  */
1609 static int spi_nor_check_lock_status_sr(struct spi_nor *nor, loff_t ofs,
1610                                         uint64_t len, u8 sr, bool locked)
1611 {
1612         loff_t lock_offs;
1613         uint64_t lock_len;
1614
1615         if (!len)
1616                 return 1;
1617
1618         spi_nor_get_locked_range_sr(nor, sr, &lock_offs, &lock_len);
1619
1620         if (locked)
1621                 /* Requested range is a sub-range of locked range */
1622                 return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
1623         else
1624                 /* Requested range does not overlap with locked range */
1625                 return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
1626 }
1627
1628 static int spi_nor_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1629                                 u8 sr)
1630 {
1631         return spi_nor_check_lock_status_sr(nor, ofs, len, sr, true);
1632 }
1633
1634 static int spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1635                                   u8 sr)
1636 {
1637         return spi_nor_check_lock_status_sr(nor, ofs, len, sr, false);
1638 }
1639
1640 /*
1641  * Lock a region of the flash. Compatible with ST Micro and similar flash.
1642  * Supports the block protection bits BP{0,1,2} in the status register
1643  * (SR). Does not support these features found in newer SR bitfields:
1644  *   - SEC: sector/block protect - only handle SEC=0 (block protect)
1645  *   - CMP: complement protect - only support CMP=0 (range is not complemented)
1646  *
1647  * Support for the following is provided conditionally for some flash:
1648  *   - TB: top/bottom protect
1649  *
1650  * Sample table portion for 8MB flash (Winbond w25q64fw):
1651  *
1652  *   SEC  |  TB   |  BP2  |  BP1  |  BP0  |  Prot Length  | Protected Portion
1653  *  --------------------------------------------------------------------------
1654  *    X   |   X   |   0   |   0   |   0   |  NONE         | NONE
1655  *    0   |   0   |   0   |   0   |   1   |  128 KB       | Upper 1/64
1656  *    0   |   0   |   0   |   1   |   0   |  256 KB       | Upper 1/32
1657  *    0   |   0   |   0   |   1   |   1   |  512 KB       | Upper 1/16
1658  *    0   |   0   |   1   |   0   |   0   |  1 MB         | Upper 1/8
1659  *    0   |   0   |   1   |   0   |   1   |  2 MB         | Upper 1/4
1660  *    0   |   0   |   1   |   1   |   0   |  4 MB         | Upper 1/2
1661  *    X   |   X   |   1   |   1   |   1   |  8 MB         | ALL
1662  *  ------|-------|-------|-------|-------|---------------|-------------------
1663  *    0   |   1   |   0   |   0   |   1   |  128 KB       | Lower 1/64
1664  *    0   |   1   |   0   |   1   |   0   |  256 KB       | Lower 1/32
1665  *    0   |   1   |   0   |   1   |   1   |  512 KB       | Lower 1/16
1666  *    0   |   1   |   1   |   0   |   0   |  1 MB         | Lower 1/8
1667  *    0   |   1   |   1   |   0   |   1   |  2 MB         | Lower 1/4
1668  *    0   |   1   |   1   |   1   |   0   |  4 MB         | Lower 1/2
1669  *
1670  * Returns negative on errors, 0 on success.
1671  */
1672 static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1673 {
1674         struct mtd_info *mtd = &nor->mtd;
1675         int ret, status_old, status_new;
1676         u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1677         u8 tb_mask = SR_TB_BIT5;
1678         u8 pow, val;
1679         loff_t lock_len;
1680         bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1681         bool use_top;
1682
1683         ret = spi_nor_read_sr(nor, nor->bouncebuf);
1684         if (ret)
1685                 return ret;
1686
1687         status_old = nor->bouncebuf[0];
1688
1689         /* If nothing in our range is unlocked, we don't need to do anything */
1690         if (spi_nor_is_locked_sr(nor, ofs, len, status_old))
1691                 return 0;
1692
1693         /* If anything below us is unlocked, we can't use 'bottom' protection */
1694         if (!spi_nor_is_locked_sr(nor, 0, ofs, status_old))
1695                 can_be_bottom = false;
1696
1697         /* If anything above us is unlocked, we can't use 'top' protection */
1698         if (!spi_nor_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
1699                                   status_old))
1700                 can_be_top = false;
1701
1702         if (!can_be_bottom && !can_be_top)
1703                 return -EINVAL;
1704
1705         /* Prefer top, if both are valid */
1706         use_top = can_be_top;
1707
1708         /* lock_len: length of region that should end up locked */
1709         if (use_top)
1710                 lock_len = mtd->size - ofs;
1711         else
1712                 lock_len = ofs + len;
1713
1714         if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
1715                 tb_mask = SR_TB_BIT6;
1716
1717         /*
1718          * Need smallest pow such that:
1719          *
1720          *   1 / (2^pow) <= (len / size)
1721          *
1722          * so (assuming power-of-2 size) we do:
1723          *
1724          *   pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
1725          */
1726         pow = ilog2(mtd->size) - ilog2(lock_len);
1727         val = mask - (pow << SR_BP_SHIFT);
1728         if (val & ~mask)
1729                 return -EINVAL;
1730         /* Don't "lock" with no region! */
1731         if (!(val & mask))
1732                 return -EINVAL;
1733
1734         status_new = (status_old & ~mask & ~tb_mask) | val;
1735
1736         /* Disallow further writes if WP pin is asserted */
1737         status_new |= SR_SRWD;
1738
1739         if (!use_top)
1740                 status_new |= tb_mask;
1741
1742         /* Don't bother if they're the same */
1743         if (status_new == status_old)
1744                 return 0;
1745
1746         /* Only modify protection if it will not unlock other areas */
1747         if ((status_new & mask) < (status_old & mask))
1748                 return -EINVAL;
1749
1750         return spi_nor_write_sr_and_check(nor, status_new);
1751 }
1752
1753 /*
1754  * Unlock a region of the flash. See spi_nor_sr_lock() for more info
1755  *
1756  * Returns negative on errors, 0 on success.
1757  */
1758 static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1759 {
1760         struct mtd_info *mtd = &nor->mtd;
1761         int ret, status_old, status_new;
1762         u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1763         u8 tb_mask = SR_TB_BIT5;
1764         u8 pow, val;
1765         loff_t lock_len;
1766         bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1767         bool use_top;
1768
1769         ret = spi_nor_read_sr(nor, nor->bouncebuf);
1770         if (ret)
1771                 return ret;
1772
1773         status_old = nor->bouncebuf[0];
1774
1775         /* If nothing in our range is locked, we don't need to do anything */
1776         if (spi_nor_is_unlocked_sr(nor, ofs, len, status_old))
1777                 return 0;
1778
1779         /* If anything below us is locked, we can't use 'top' protection */
1780         if (!spi_nor_is_unlocked_sr(nor, 0, ofs, status_old))
1781                 can_be_top = false;
1782
1783         /* If anything above us is locked, we can't use 'bottom' protection */
1784         if (!spi_nor_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
1785                                     status_old))
1786                 can_be_bottom = false;
1787
1788         if (!can_be_bottom && !can_be_top)
1789                 return -EINVAL;
1790
1791         /* Prefer top, if both are valid */
1792         use_top = can_be_top;
1793
1794         /* lock_len: length of region that should remain locked */
1795         if (use_top)
1796                 lock_len = mtd->size - (ofs + len);
1797         else
1798                 lock_len = ofs;
1799
1800         if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
1801                 tb_mask = SR_TB_BIT6;
1802         /*
1803          * Need largest pow such that:
1804          *
1805          *   1 / (2^pow) >= (len / size)
1806          *
1807          * so (assuming power-of-2 size) we do:
1808          *
1809          *   pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
1810          */
1811         pow = ilog2(mtd->size) - order_base_2(lock_len);
1812         if (lock_len == 0) {
1813                 val = 0; /* fully unlocked */
1814         } else {
1815                 val = mask - (pow << SR_BP_SHIFT);
1816                 /* Some power-of-two sizes are not supported */
1817                 if (val & ~mask)
1818                         return -EINVAL;
1819         }
1820
1821         status_new = (status_old & ~mask & ~tb_mask) | val;
1822
1823         /* Don't protect status register if we're fully unlocked */
1824         if (lock_len == 0)
1825                 status_new &= ~SR_SRWD;
1826
1827         if (!use_top)
1828                 status_new |= tb_mask;
1829
1830         /* Don't bother if they're the same */
1831         if (status_new == status_old)
1832                 return 0;
1833
1834         /* Only modify protection if it will not lock other areas */
1835         if ((status_new & mask) > (status_old & mask))
1836                 return -EINVAL;
1837
1838         return spi_nor_write_sr_and_check(nor, status_new);
1839 }
1840
1841 /*
1842  * Check if a region of the flash is (completely) locked. See spi_nor_sr_lock()
1843  * for more info.
1844  *
1845  * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
1846  * negative on errors.
1847  */
1848 static int spi_nor_sr_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
1849 {
1850         int ret;
1851
1852         ret = spi_nor_read_sr(nor, nor->bouncebuf);
1853         if (ret)
1854                 return ret;
1855
1856         return spi_nor_is_locked_sr(nor, ofs, len, nor->bouncebuf[0]);
1857 }
1858
1859 static const struct spi_nor_locking_ops spi_nor_sr_locking_ops = {
1860         .lock = spi_nor_sr_lock,
1861         .unlock = spi_nor_sr_unlock,
1862         .is_locked = spi_nor_sr_is_locked,
1863 };
1864
1865 static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1866 {
1867         struct spi_nor *nor = mtd_to_spi_nor(mtd);
1868         int ret;
1869
1870         ret = spi_nor_lock_and_prep(nor);
1871         if (ret)
1872                 return ret;
1873
1874         ret = nor->params.locking_ops->lock(nor, ofs, len);
1875
1876         spi_nor_unlock_and_unprep(nor);
1877         return ret;
1878 }
1879
1880 static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1881 {
1882         struct spi_nor *nor = mtd_to_spi_nor(mtd);
1883         int ret;
1884
1885         ret = spi_nor_lock_and_prep(nor);
1886         if (ret)
1887                 return ret;
1888
1889         ret = nor->params.locking_ops->unlock(nor, ofs, len);
1890
1891         spi_nor_unlock_and_unprep(nor);
1892         return ret;
1893 }
1894
1895 static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1896 {
1897         struct spi_nor *nor = mtd_to_spi_nor(mtd);
1898         int ret;
1899
1900         ret = spi_nor_lock_and_prep(nor);
1901         if (ret)
1902                 return ret;
1903
1904         ret = nor->params.locking_ops->is_locked(nor, ofs, len);
1905
1906         spi_nor_unlock_and_unprep(nor);
1907         return ret;
1908 }
1909
1910 /**
1911  * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
1912  * Register 1.
1913  * @nor:        pointer to a 'struct spi_nor'
1914  *
1915  * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
1916  *
1917  * Return: 0 on success, -errno otherwise.
1918  */
1919 int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
1920 {
1921         int ret;
1922
1923         ret = spi_nor_read_sr(nor, nor->bouncebuf);
1924         if (ret)
1925                 return ret;
1926
1927         if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
1928                 return 0;
1929
1930         nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
1931
1932         return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
1933 }
1934
1935 /**
1936  * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
1937  * Register 2.
1938  * @nor:       pointer to a 'struct spi_nor'.
1939  *
1940  * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
1941  *
1942  * Return: 0 on success, -errno otherwise.
1943  */
1944 int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
1945 {
1946         int ret;
1947
1948         if (nor->flags & SNOR_F_NO_READ_CR)
1949                 return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
1950
1951         ret = spi_nor_read_cr(nor, nor->bouncebuf);
1952         if (ret)
1953                 return ret;
1954
1955         if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
1956                 return 0;
1957
1958         nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
1959
1960         return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
1961 }
1962
1963 /**
1964  * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
1965  * @nor:        pointer to a 'struct spi_nor'
1966  *
1967  * Set the Quad Enable (QE) bit in the Status Register 2.
1968  *
1969  * This is one of the procedures to set the QE bit described in the SFDP
1970  * (JESD216 rev B) specification but no manufacturer using this procedure has
1971  * been identified yet, hence the name of the function.
1972  *
1973  * Return: 0 on success, -errno otherwise.
1974  */
1975 int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
1976 {
1977         u8 *sr2 = nor->bouncebuf;
1978         int ret;
1979         u8 sr2_written;
1980
1981         /* Check current Quad Enable bit value. */
1982         ret = spi_nor_read_sr2(nor, sr2);
1983         if (ret)
1984                 return ret;
1985         if (*sr2 & SR2_QUAD_EN_BIT7)
1986                 return 0;
1987
1988         /* Update the Quad Enable bit. */
1989         *sr2 |= SR2_QUAD_EN_BIT7;
1990
1991         ret = spi_nor_write_sr2(nor, sr2);
1992         if (ret)
1993                 return ret;
1994
1995         sr2_written = *sr2;
1996
1997         /* Read back and check it. */
1998         ret = spi_nor_read_sr2(nor, sr2);
1999         if (ret)
2000                 return ret;
2001
2002         if (*sr2 != sr2_written) {
2003                 dev_dbg(nor->dev, "SR2: Read back test failed\n");
2004                 return -EIO;
2005         }
2006
2007         return 0;
2008 }
2009
2010 static int
2011 is25lp256_post_bfpt_fixups(struct spi_nor *nor,
2012                            const struct sfdp_parameter_header *bfpt_header,
2013                            const struct sfdp_bfpt *bfpt,
2014                            struct spi_nor_flash_parameter *params)
2015 {
2016         /*
2017          * IS25LP256 supports 4B opcodes, but the BFPT advertises a
2018          * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY address width.
2019          * Overwrite the address width advertised by the BFPT.
2020          */
2021         if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
2022                 BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
2023                 nor->addr_width = 4;
2024
2025         return 0;
2026 }
2027
2028 static struct spi_nor_fixups is25lp256_fixups = {
2029         .post_bfpt = is25lp256_post_bfpt_fixups,
2030 };
2031
2032 static int
2033 mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
2034                             const struct sfdp_parameter_header *bfpt_header,
2035                             const struct sfdp_bfpt *bfpt,
2036                             struct spi_nor_flash_parameter *params)
2037 {
2038         /*
2039          * MX25L25635F supports 4B opcodes but MX25L25635E does not.
2040          * Unfortunately, Macronix has re-used the same JEDEC ID for both
2041          * variants which prevents us from defining a new entry in the parts
2042          * table.
2043          * We need a way to differentiate MX25L25635E and MX25L25635F, and it
2044          * seems that the F version advertises support for Fast Read 4-4-4 in
2045          * its BFPT table.
2046          */
2047         if (bfpt->dwords[BFPT_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4)
2048                 nor->flags |= SNOR_F_4B_OPCODES;
2049
2050         return 0;
2051 }
2052
2053 static struct spi_nor_fixups mx25l25635_fixups = {
2054         .post_bfpt = mx25l25635_post_bfpt_fixups,
2055 };
2056
2057 static void gd25q256_default_init(struct spi_nor *nor)
2058 {
2059         /*
2060          * Some manufacturer like GigaDevice may use different
2061          * bit to set QE on different memories, so the MFR can't
2062          * indicate the quad_enable method for this case, we need
2063          * to set it in the default_init fixup hook.
2064          */
2065         nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable;
2066 }
2067
2068 static struct spi_nor_fixups gd25q256_fixups = {
2069         .default_init = gd25q256_default_init,
2070 };
2071
2072 /* NOTE: double check command sets and memory organization when you add
2073  * more nor chips.  This current list focusses on newer chips, which
2074  * have been converging on command sets which including JEDEC ID.
2075  *
2076  * All newly added entries should describe *hardware* and should use SECT_4K
2077  * (or SECT_4K_PMC) if hardware supports erasing 4 KiB sectors. For usage
2078  * scenarios excluding small sectors there is config option that can be
2079  * disabled: CONFIG_MTD_SPI_NOR_USE_4K_SECTORS.
2080  * For historical (and compatibility) reasons (before we got above config) some
2081  * old entries may be missing 4K flag.
2082  */
2083 static const struct flash_info spi_nor_ids[] = {
2084         /* Atmel -- some are (confusingly) marketed as "DataFlash" */
2085         { "at25fs010",  INFO(0x1f6601, 0, 32 * 1024,   4, SECT_4K) },
2086         { "at25fs040",  INFO(0x1f6604, 0, 64 * 1024,   8, SECT_4K) },
2087
2088         { "at25df041a", INFO(0x1f4401, 0, 64 * 1024,   8, SECT_4K) },
2089         { "at25df321",  INFO(0x1f4700, 0, 64 * 1024,  64, SECT_4K) },
2090         { "at25df321a", INFO(0x1f4701, 0, 64 * 1024,  64, SECT_4K) },
2091         { "at25df641",  INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
2092
2093         { "at25sl321",  INFO(0x1f4216, 0, 64 * 1024, 64,
2094                              SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2095
2096         { "at26f004",   INFO(0x1f0400, 0, 64 * 1024,  8, SECT_4K) },
2097         { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
2098         { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
2099         { "at26df321",  INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
2100
2101         { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
2102
2103         /* EON -- en25xxx */
2104         { "en25f32",    INFO(0x1c3116, 0, 64 * 1024,   64, SECT_4K) },
2105         { "en25p32",    INFO(0x1c2016, 0, 64 * 1024,   64, 0) },
2106         { "en25q32b",   INFO(0x1c3016, 0, 64 * 1024,   64, 0) },
2107         { "en25p64",    INFO(0x1c2017, 0, 64 * 1024,  128, 0) },
2108         { "en25q64",    INFO(0x1c3017, 0, 64 * 1024,  128, SECT_4K) },
2109         { "en25q80a",   INFO(0x1c3014, 0, 64 * 1024,   16,
2110                         SECT_4K | SPI_NOR_DUAL_READ) },
2111         { "en25qh16",   INFO(0x1c7015, 0, 64 * 1024,   32,
2112                         SECT_4K | SPI_NOR_DUAL_READ) },
2113         { "en25qh32",   INFO(0x1c7016, 0, 64 * 1024,   64, 0) },
2114         { "en25qh64",   INFO(0x1c7017, 0, 64 * 1024,  128,
2115                         SECT_4K | SPI_NOR_DUAL_READ) },
2116         { "en25qh128",  INFO(0x1c7018, 0, 64 * 1024,  256, 0) },
2117         { "en25qh256",  INFO(0x1c7019, 0, 64 * 1024,  512, 0) },
2118         { "en25s64",    INFO(0x1c3817, 0, 64 * 1024,  128, SECT_4K) },
2119
2120         /* ESMT */
2121         { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
2122         { "f25l32qa", INFO(0x8c4116, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_HAS_LOCK) },
2123         { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) },
2124
2125         /* Everspin */
2126         { "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2127         { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2128         { "mr25h10",  CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2129         { "mr25h40",  CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2130
2131         /* Fujitsu */
2132         { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
2133
2134         /* GigaDevice */
2135         {
2136                 "gd25q16", INFO(0xc84015, 0, 64 * 1024,  32,
2137                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2138                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2139         },
2140         {
2141                 "gd25q32", INFO(0xc84016, 0, 64 * 1024,  64,
2142                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2143                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2144         },
2145         {
2146                 "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
2147                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2148                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2149         },
2150         {
2151                 "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
2152                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2153                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2154         },
2155         {
2156                 "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128,
2157                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2158                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2159         },
2160         {
2161                 "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256,
2162                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2163                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2164         },
2165         {
2166                 "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256,
2167                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2168                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2169         },
2170         {
2171                 "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512,
2172                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2173                         SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB |
2174                         SPI_NOR_TB_SR_BIT6)
2175                         .fixups = &gd25q256_fixups,
2176         },
2177
2178         /* Intel/Numonyx -- xxxs33b */
2179         { "160s33b",  INFO(0x898911, 0, 64 * 1024,  32, 0) },
2180         { "320s33b",  INFO(0x898912, 0, 64 * 1024,  64, 0) },
2181         { "640s33b",  INFO(0x898913, 0, 64 * 1024, 128, 0) },
2182
2183         /* ISSI */
2184         { "is25cd512",  INFO(0x7f9d20, 0, 32 * 1024,   2, SECT_4K) },
2185         { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024,   8,
2186                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2187         { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024,  32,
2188                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2189         { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024,  16,
2190                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2191         { "is25lp032",  INFO(0x9d6016, 0, 64 * 1024,  64,
2192                         SECT_4K | SPI_NOR_DUAL_READ) },
2193         { "is25lp064",  INFO(0x9d6017, 0, 64 * 1024, 128,
2194                         SECT_4K | SPI_NOR_DUAL_READ) },
2195         { "is25lp128",  INFO(0x9d6018, 0, 64 * 1024, 256,
2196                         SECT_4K | SPI_NOR_DUAL_READ) },
2197         { "is25lp256",  INFO(0x9d6019, 0, 64 * 1024, 512,
2198                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2199                         SPI_NOR_4B_OPCODES)
2200                         .fixups = &is25lp256_fixups },
2201         { "is25wp032",  INFO(0x9d7016, 0, 64 * 1024,  64,
2202                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2203         { "is25wp064",  INFO(0x9d7017, 0, 64 * 1024, 128,
2204                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2205         { "is25wp128",  INFO(0x9d7018, 0, 64 * 1024, 256,
2206                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2207         { "is25wp256", INFO(0x9d7019, 0, 64 * 1024, 512,
2208                             SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2209                             SPI_NOR_4B_OPCODES)
2210                        .fixups = &is25lp256_fixups },
2211
2212         /* Macronix */
2213         { "mx25l512e",   INFO(0xc22010, 0, 64 * 1024,   1, SECT_4K) },
2214         { "mx25l2005a",  INFO(0xc22012, 0, 64 * 1024,   4, SECT_4K) },
2215         { "mx25l4005a",  INFO(0xc22013, 0, 64 * 1024,   8, SECT_4K) },
2216         { "mx25l8005",   INFO(0xc22014, 0, 64 * 1024,  16, 0) },
2217         { "mx25l1606e",  INFO(0xc22015, 0, 64 * 1024,  32, SECT_4K) },
2218         { "mx25l3205d",  INFO(0xc22016, 0, 64 * 1024,  64, SECT_4K) },
2219         { "mx25l3255e",  INFO(0xc29e16, 0, 64 * 1024,  64, SECT_4K) },
2220         { "mx25l6405d",  INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
2221         { "mx25u2033e",  INFO(0xc22532, 0, 64 * 1024,   4, SECT_4K) },
2222         { "mx25u3235f",  INFO(0xc22536, 0, 64 * 1024,  64,
2223                          SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2224         { "mx25u4035",   INFO(0xc22533, 0, 64 * 1024,   8, SECT_4K) },
2225         { "mx25u8035",   INFO(0xc22534, 0, 64 * 1024,  16, SECT_4K) },
2226         { "mx25u6435f",  INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
2227         { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
2228         { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
2229         { "mx25r3235f",  INFO(0xc22816, 0, 64 * 1024,  64,
2230                          SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2231         { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
2232                          SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2233         { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
2234                          SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
2235                          .fixups = &mx25l25635_fixups },
2236         { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
2237         { "mx25v8035f",  INFO(0xc22314, 0, 64 * 1024,  16,
2238                          SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2239         { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
2240         { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2241         { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2242         { "mx66l1g45g",  INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2243         { "mx66l1g55g",  INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
2244
2245         /* Micron <--> ST Micro */
2246         { "n25q016a",    INFO(0x20bb15, 0, 64 * 1024,   32, SECT_4K | SPI_NOR_QUAD_READ) },
2247         { "n25q032",     INFO(0x20ba16, 0, 64 * 1024,   64, SPI_NOR_QUAD_READ) },
2248         { "n25q032a",    INFO(0x20bb16, 0, 64 * 1024,   64, SPI_NOR_QUAD_READ) },
2249         { "n25q064",     INFO(0x20ba17, 0, 64 * 1024,  128, SECT_4K | SPI_NOR_QUAD_READ) },
2250         { "n25q064a",    INFO(0x20bb17, 0, 64 * 1024,  128, SECT_4K | SPI_NOR_QUAD_READ) },
2251         { "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024,  256, SECT_4K |
2252                               USE_FSR | SPI_NOR_QUAD_READ) },
2253         { "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024,  256, SECT_4K |
2254                               USE_FSR | SPI_NOR_QUAD_READ) },
2255         { "mt25ql256a",  INFO6(0x20ba19, 0x104400, 64 * 1024,  512,
2256                                SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
2257                                SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2258         { "n25q256a",    INFO(0x20ba19, 0, 64 * 1024,  512, SECT_4K |
2259                               USE_FSR | SPI_NOR_DUAL_READ |
2260                               SPI_NOR_QUAD_READ) },
2261         { "mt25qu256a",  INFO6(0x20bb19, 0x104400, 64 * 1024,  512,
2262                                SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
2263                                SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2264         { "n25q256ax1",  INFO(0x20bb19, 0, 64 * 1024,  512, SECT_4K |
2265                               USE_FSR | SPI_NOR_QUAD_READ) },
2266         { "mt25ql512a",  INFO6(0x20ba20, 0x104400, 64 * 1024, 1024,
2267                                SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
2268                                SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2269         { "n25q512ax3",  INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
2270         { "mt25qu512a",  INFO6(0x20bb20, 0x104400, 64 * 1024, 1024,
2271                                SECT_4K | USE_FSR | SPI_NOR_DUAL_READ |
2272                                SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2273         { "n25q512a",    INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K |
2274                               USE_FSR | SPI_NOR_QUAD_READ) },
2275         { "n25q00",      INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
2276         { "n25q00a",     INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
2277         { "mt25ql02g",   INFO(0x20ba22, 0, 64 * 1024, 4096,
2278                               SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
2279                               NO_CHIP_ERASE) },
2280         { "mt25qu02g",   INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
2281
2282         /* Micron */
2283         {
2284                 "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
2285                         SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
2286                         SPI_NOR_4B_OPCODES)
2287         },
2288         { "mt35xu02g",  INFO(0x2c5b1c, 0, 128 * 1024, 2048,
2289                              SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ |
2290                              SPI_NOR_4B_OPCODES) },
2291
2292         /* PMC */
2293         { "pm25lv512",   INFO(0,        0, 32 * 1024,    2, SECT_4K_PMC) },
2294         { "pm25lv010",   INFO(0,        0, 32 * 1024,    4, SECT_4K_PMC) },
2295         { "pm25lq032",   INFO(0x7f9d46, 0, 64 * 1024,   64, SECT_4K) },
2296
2297         /* Spansion/Cypress -- single (large) sector size only, at least
2298          * for the chips listed here (without boot sectors).
2299          */
2300         { "s25sl032p",  INFO(0x010215, 0x4d00,  64 * 1024,  64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2301         { "s25sl064p",  INFO(0x010216, 0x4d00,  64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2302         { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64,
2303                         SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2304         { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256,
2305                         SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2306         { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
2307         { "s25fl256s1", INFO(0x010219, 0x4d01,  64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2308         { "s25fl512s",  INFO6(0x010220, 0x4d0080, 256 * 1024, 256,
2309                         SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2310                         SPI_NOR_HAS_LOCK | USE_CLSR) },
2311         { "s25fs512s",  INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2312         { "s70fl01gs",  INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
2313         { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024,  64, 0) },
2314         { "s25sl12801", INFO(0x012018, 0x0301,  64 * 1024, 256, 0) },
2315         { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024,  64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2316         { "s25fl129p1", INFO(0x012018, 0x4d01,  64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
2317         { "s25sl004a",  INFO(0x010212,      0,  64 * 1024,   8, 0) },
2318         { "s25sl008a",  INFO(0x010213,      0,  64 * 1024,  16, 0) },
2319         { "s25sl016a",  INFO(0x010214,      0,  64 * 1024,  32, 0) },
2320         { "s25sl032a",  INFO(0x010215,      0,  64 * 1024,  64, 0) },
2321         { "s25sl064a",  INFO(0x010216,      0,  64 * 1024, 128, 0) },
2322         { "s25fl004k",  INFO(0xef4013,      0,  64 * 1024,   8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2323         { "s25fl008k",  INFO(0xef4014,      0,  64 * 1024,  16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2324         { "s25fl016k",  INFO(0xef4015,      0,  64 * 1024,  32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2325         { "s25fl064k",  INFO(0xef4017,      0,  64 * 1024, 128, SECT_4K) },
2326         { "s25fl116k",  INFO(0x014015,      0,  64 * 1024,  32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2327         { "s25fl132k",  INFO(0x014016,      0,  64 * 1024,  64, SECT_4K) },
2328         { "s25fl164k",  INFO(0x014017,      0,  64 * 1024, 128, SECT_4K) },
2329         { "s25fl204k",  INFO(0x014013,      0,  64 * 1024,   8, SECT_4K | SPI_NOR_DUAL_READ) },
2330         { "s25fl208k",  INFO(0x014014,      0,  64 * 1024,  16, SECT_4K | SPI_NOR_DUAL_READ) },
2331         { "s25fl064l",  INFO(0x016017,      0,  64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2332         { "s25fl128l",  INFO(0x016018,      0,  64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2333         { "s25fl256l",  INFO(0x016019,      0,  64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
2334
2335         /* SST -- large erase sizes are "overlays", "sectors" are 4K */
2336         { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024,  8, SECT_4K | SST_WRITE) },
2337         { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
2338         { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
2339         { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
2340         { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
2341         { "sst25wf512",  INFO(0xbf2501, 0, 64 * 1024,  1, SECT_4K | SST_WRITE) },
2342         { "sst25wf010",  INFO(0xbf2502, 0, 64 * 1024,  2, SECT_4K | SST_WRITE) },
2343         { "sst25wf020",  INFO(0xbf2503, 0, 64 * 1024,  4, SECT_4K | SST_WRITE) },
2344         { "sst25wf020a", INFO(0x621612, 0, 64 * 1024,  4, SECT_4K) },
2345         { "sst25wf040b", INFO(0x621613, 0, 64 * 1024,  8, SECT_4K) },
2346         { "sst25wf040",  INFO(0xbf2504, 0, 64 * 1024,  8, SECT_4K | SST_WRITE) },
2347         { "sst25wf080",  INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
2348         { "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32, SECT_4K |
2349                               SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2350         { "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32, SECT_4K |
2351                               SPI_NOR_DUAL_READ) },
2352         { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2353
2354         /* ST Microelectronics -- newer production may have feature updates */
2355         { "m25p05",  INFO(0x202010,  0,  32 * 1024,   2, 0) },
2356         { "m25p10",  INFO(0x202011,  0,  32 * 1024,   4, 0) },
2357         { "m25p20",  INFO(0x202012,  0,  64 * 1024,   4, 0) },
2358         { "m25p40",  INFO(0x202013,  0,  64 * 1024,   8, 0) },
2359         { "m25p80",  INFO(0x202014,  0,  64 * 1024,  16, 0) },
2360         { "m25p16",  INFO(0x202015,  0,  64 * 1024,  32, 0) },
2361         { "m25p32",  INFO(0x202016,  0,  64 * 1024,  64, 0) },
2362         { "m25p64",  INFO(0x202017,  0,  64 * 1024, 128, 0) },
2363         { "m25p128", INFO(0x202018,  0, 256 * 1024,  64, 0) },
2364
2365         { "m25p05-nonjedec",  INFO(0, 0,  32 * 1024,   2, 0) },
2366         { "m25p10-nonjedec",  INFO(0, 0,  32 * 1024,   4, 0) },
2367         { "m25p20-nonjedec",  INFO(0, 0,  64 * 1024,   4, 0) },
2368         { "m25p40-nonjedec",  INFO(0, 0,  64 * 1024,   8, 0) },
2369         { "m25p80-nonjedec",  INFO(0, 0,  64 * 1024,  16, 0) },
2370         { "m25p16-nonjedec",  INFO(0, 0,  64 * 1024,  32, 0) },
2371         { "m25p32-nonjedec",  INFO(0, 0,  64 * 1024,  64, 0) },
2372         { "m25p64-nonjedec",  INFO(0, 0,  64 * 1024, 128, 0) },
2373         { "m25p128-nonjedec", INFO(0, 0, 256 * 1024,  64, 0) },
2374
2375         { "m45pe10", INFO(0x204011,  0, 64 * 1024,    2, 0) },
2376         { "m45pe80", INFO(0x204014,  0, 64 * 1024,   16, 0) },
2377         { "m45pe16", INFO(0x204015,  0, 64 * 1024,   32, 0) },
2378
2379         { "m25pe20", INFO(0x208012,  0, 64 * 1024,  4,       0) },
2380         { "m25pe80", INFO(0x208014,  0, 64 * 1024, 16,       0) },
2381         { "m25pe16", INFO(0x208015,  0, 64 * 1024, 32, SECT_4K) },
2382
2383         { "m25px16",    INFO(0x207115,  0, 64 * 1024, 32, SECT_4K) },
2384         { "m25px32",    INFO(0x207116,  0, 64 * 1024, 64, SECT_4K) },
2385         { "m25px32-s0", INFO(0x207316,  0, 64 * 1024, 64, SECT_4K) },
2386         { "m25px32-s1", INFO(0x206316,  0, 64 * 1024, 64, SECT_4K) },
2387         { "m25px64",    INFO(0x207117,  0, 64 * 1024, 128, 0) },
2388         { "m25px80",    INFO(0x207114,  0, 64 * 1024, 16, 0) },
2389
2390         /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
2391         { "w25x05", INFO(0xef3010, 0, 64 * 1024,  1,  SECT_4K) },
2392         { "w25x10", INFO(0xef3011, 0, 64 * 1024,  2,  SECT_4K) },
2393         { "w25x20", INFO(0xef3012, 0, 64 * 1024,  4,  SECT_4K) },
2394         { "w25x40", INFO(0xef3013, 0, 64 * 1024,  8,  SECT_4K) },
2395         { "w25x80", INFO(0xef3014, 0, 64 * 1024,  16, SECT_4K) },
2396         { "w25x16", INFO(0xef3015, 0, 64 * 1024,  32, SECT_4K) },
2397         {
2398                 "w25q16dw", INFO(0xef6015, 0, 64 * 1024,  32,
2399                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2400                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2401         },
2402         { "w25x32", INFO(0xef3016, 0, 64 * 1024,  64, SECT_4K) },
2403         {
2404                 "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024,  32,
2405                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2406                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2407         },
2408         { "w25q20cl", INFO(0xef4012, 0, 64 * 1024,  4, SECT_4K) },
2409         { "w25q20bw", INFO(0xef5012, 0, 64 * 1024,  4, SECT_4K) },
2410         { "w25q20ew", INFO(0xef6012, 0, 64 * 1024,  4, SECT_4K) },
2411         { "w25q32", INFO(0xef4016, 0, 64 * 1024,  64, SECT_4K) },
2412         {
2413                 "w25q32dw", INFO(0xef6016, 0, 64 * 1024,  64,
2414                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2415                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2416         },
2417         {
2418                 "w25q32jv", INFO(0xef7016, 0, 64 * 1024,  64,
2419                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2420                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2421         },
2422         {
2423                 "w25q32jwm", INFO(0xef8016, 0, 64 * 1024,  64,
2424                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2425                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2426         },
2427         { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
2428         { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
2429         {
2430                 "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
2431                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2432                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2433         },
2434         {
2435                 "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
2436                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2437                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2438         },
2439         {
2440                 "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256,
2441                         SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2442                         SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2443         },
2444         { "w25q80", INFO(0xef5014, 0, 64 * 1024,  16, SECT_4K) },
2445         { "w25q80bl", INFO(0xef4014, 0, 64 * 1024,  16, SECT_4K) },
2446         { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
2447         { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512,
2448                           SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2449                           SPI_NOR_4B_OPCODES) },
2450         { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
2451                              SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2452         { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512,
2453                              SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2454         { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
2455                         SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
2456
2457         /* Catalyst / On Semiconductor -- non-JEDEC */
2458         { "cat25c11", CAT25_INFO(  16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2459         { "cat25c03", CAT25_INFO(  32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2460         { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2461         { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2462         { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
2463
2464         /* Xilinx S3AN Internal Flash */
2465         { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
2466         { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
2467         { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
2468         { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
2469         { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
2470
2471         /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
2472         { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2473         { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
2474         { },
2475 };
2476
2477 static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
2478 {
2479         u8 *id = nor->bouncebuf;
2480         unsigned int i;
2481         int ret;
2482
2483         if (nor->spimem) {
2484                 struct spi_mem_op op =
2485                         SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
2486                                    SPI_MEM_OP_NO_ADDR,
2487                                    SPI_MEM_OP_NO_DUMMY,
2488                                    SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1));
2489
2490                 ret = spi_mem_exec_op(nor->spimem, &op);
2491         } else {
2492                 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
2493                                                     SPI_NOR_MAX_ID_LEN);
2494         }
2495         if (ret) {
2496                 dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret);
2497                 return ERR_PTR(ret);
2498         }
2499
2500         for (i = 0; i < ARRAY_SIZE(spi_nor_ids) - 1; i++) {
2501                 if (spi_nor_ids[i].id_len &&
2502                     !memcmp(spi_nor_ids[i].id, id, spi_nor_ids[i].id_len))
2503                         return &spi_nor_ids[i];
2504         }
2505         dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
2506                 SPI_NOR_MAX_ID_LEN, id);
2507         return ERR_PTR(-ENODEV);
2508 }
2509
2510 static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
2511                         size_t *retlen, u_char *buf)
2512 {
2513         struct spi_nor *nor = mtd_to_spi_nor(mtd);
2514         ssize_t ret;
2515
2516         dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
2517
2518         ret = spi_nor_lock_and_prep(nor);
2519         if (ret)
2520                 return ret;
2521
2522         while (len) {
2523                 loff_t addr = from;
2524
2525                 addr = spi_nor_convert_addr(nor, addr);
2526
2527                 ret = spi_nor_read_data(nor, addr, len, buf);
2528                 if (ret == 0) {
2529                         /* We shouldn't see 0-length reads */
2530                         ret = -EIO;
2531                         goto read_err;
2532                 }
2533                 if (ret < 0)
2534                         goto read_err;
2535
2536                 WARN_ON(ret > len);
2537                 *retlen += ret;
2538                 buf += ret;
2539                 from += ret;
2540                 len -= ret;
2541         }
2542         ret = 0;
2543
2544 read_err:
2545         spi_nor_unlock_and_unprep(nor);
2546         return ret;
2547 }
2548
2549 static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
2550                 size_t *retlen, const u_char *buf)
2551 {
2552         struct spi_nor *nor = mtd_to_spi_nor(mtd);
2553         size_t actual = 0;
2554         int ret;
2555
2556         dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2557
2558         ret = spi_nor_lock_and_prep(nor);
2559         if (ret)
2560                 return ret;
2561
2562         ret = spi_nor_write_enable(nor);
2563         if (ret)
2564                 goto out;
2565
2566         nor->sst_write_second = false;
2567
2568         /* Start write from odd address. */
2569         if (to % 2) {
2570                 nor->program_opcode = SPINOR_OP_BP;
2571
2572                 /* write one byte. */
2573                 ret = spi_nor_write_data(nor, to, 1, buf);
2574                 if (ret < 0)
2575                         goto out;
2576                 WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
2577                 ret = spi_nor_wait_till_ready(nor);
2578                 if (ret)
2579                         goto out;
2580
2581                 to++;
2582                 actual++;
2583         }
2584
2585         /* Write out most of the data here. */
2586         for (; actual < len - 1; actual += 2) {
2587                 nor->program_opcode = SPINOR_OP_AAI_WP;
2588
2589                 /* write two bytes. */
2590                 ret = spi_nor_write_data(nor, to, 2, buf + actual);
2591                 if (ret < 0)
2592                         goto out;
2593                 WARN(ret != 2, "While writing 2 bytes written %i bytes\n", ret);
2594                 ret = spi_nor_wait_till_ready(nor);
2595                 if (ret)
2596                         goto out;
2597                 to += 2;
2598                 nor->sst_write_second = true;
2599         }
2600         nor->sst_write_second = false;
2601
2602         ret = spi_nor_write_disable(nor);
2603         if (ret)
2604                 goto out;
2605
2606         ret = spi_nor_wait_till_ready(nor);
2607         if (ret)
2608                 goto out;
2609
2610         /* Write out trailing byte if it exists. */
2611         if (actual != len) {
2612                 ret = spi_nor_write_enable(nor);
2613                 if (ret)
2614                         goto out;
2615
2616                 nor->program_opcode = SPINOR_OP_BP;
2617                 ret = spi_nor_write_data(nor, to, 1, buf + actual);
2618                 if (ret < 0)
2619                         goto out;
2620                 WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
2621                 ret = spi_nor_wait_till_ready(nor);
2622                 if (ret)
2623                         goto out;
2624
2625                 actual += 1;
2626
2627                 ret = spi_nor_write_disable(nor);
2628         }
2629 out:
2630         *retlen += actual;
2631         spi_nor_unlock_and_unprep(nor);
2632         return ret;
2633 }
2634
2635 /*
2636  * Write an address range to the nor chip.  Data must be written in
2637  * FLASH_PAGESIZE chunks.  The address range may be any size provided
2638  * it is within the physical boundaries.
2639  */
2640 static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
2641         size_t *retlen, const u_char *buf)
2642 {
2643         struct spi_nor *nor = mtd_to_spi_nor(mtd);
2644         size_t page_offset, page_remain, i;
2645         ssize_t ret;
2646
2647         dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2648
2649         ret = spi_nor_lock_and_prep(nor);
2650         if (ret)
2651                 return ret;
2652
2653         for (i = 0; i < len; ) {
2654                 ssize_t written;
2655                 loff_t addr = to + i;
2656
2657                 /*
2658                  * If page_size is a power of two, the offset can be quickly
2659                  * calculated with an AND operation. On the other cases we
2660                  * need to do a modulus operation (more expensive).
2661                  * Power of two numbers have only one bit set and we can use
2662                  * the instruction hweight32 to detect if we need to do a
2663                  * modulus (do_div()) or not.
2664                  */
2665                 if (hweight32(nor->page_size) == 1) {
2666                         page_offset = addr & (nor->page_size - 1);
2667                 } else {
2668                         uint64_t aux = addr;
2669
2670                         page_offset = do_div(aux, nor->page_size);
2671                 }
2672                 /* the size of data remaining on the first page */
2673                 page_remain = min_t(size_t,
2674                                     nor->page_size - page_offset, len - i);
2675
2676                 addr = spi_nor_convert_addr(nor, addr);
2677
2678                 ret = spi_nor_write_enable(nor);
2679                 if (ret)
2680                         goto write_err;
2681
2682                 ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
2683                 if (ret < 0)
2684                         goto write_err;
2685                 written = ret;
2686
2687                 ret = spi_nor_wait_till_ready(nor);
2688                 if (ret)
2689                         goto write_err;
2690                 *retlen += written;
2691                 i += written;
2692         }
2693
2694 write_err:
2695         spi_nor_unlock_and_unprep(nor);
2696         return ret;
2697 }
2698
2699 static int spi_nor_check(struct spi_nor *nor)
2700 {
2701         if (!nor->dev ||
2702             (!nor->spimem && !nor->controller_ops) ||
2703             (!nor->spimem && nor->controller_ops &&
2704             (!nor->controller_ops->read ||
2705              !nor->controller_ops->write ||
2706              !nor->controller_ops->read_reg ||
2707              !nor->controller_ops->write_reg))) {
2708                 pr_err("spi-nor: please fill all the necessary fields!\n");
2709                 return -EINVAL;
2710         }
2711
2712         if (nor->spimem && nor->controller_ops) {
2713                 dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
2714                 return -EINVAL;
2715         }
2716
2717         return 0;
2718 }
2719
2720 static int s3an_nor_setup(struct spi_nor *nor,
2721                           const struct spi_nor_hwcaps *hwcaps)
2722 {
2723         int ret;
2724
2725         ret = spi_nor_xread_sr(nor, nor->bouncebuf);
2726         if (ret)
2727                 return ret;
2728
2729         nor->erase_opcode = SPINOR_OP_XSE;
2730         nor->program_opcode = SPINOR_OP_XPP;
2731         nor->read_opcode = SPINOR_OP_READ;
2732         nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
2733
2734         /*
2735          * This flashes have a page size of 264 or 528 bytes (known as
2736          * Default addressing mode). It can be changed to a more standard
2737          * Power of two mode where the page size is 256/512. This comes
2738          * with a price: there is 3% less of space, the data is corrupted
2739          * and the page size cannot be changed back to default addressing
2740          * mode.
2741          *
2742          * The current addressing mode can be read from the XRDSR register
2743          * and should not be changed, because is a destructive operation.
2744          */
2745         if (nor->bouncebuf[0] & XSR_PAGESIZE) {
2746                 /* Flash in Power of 2 mode */
2747                 nor->page_size = (nor->page_size == 264) ? 256 : 512;
2748                 nor->mtd.writebufsize = nor->page_size;
2749                 nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors;
2750                 nor->mtd.erasesize = 8 * nor->page_size;
2751         } else {
2752                 /* Flash in Default addressing mode */
2753                 nor->params.convert_addr = s3an_convert_addr;
2754                 nor->mtd.erasesize = nor->info->sector_size;
2755         }
2756
2757         return 0;
2758 }
2759
2760 static void
2761 spi_nor_set_read_settings(struct spi_nor_read_command *read,
2762                           u8 num_mode_clocks,
2763                           u8 num_wait_states,
2764                           u8 opcode,
2765                           enum spi_nor_protocol proto)
2766 {
2767         read->num_mode_clocks = num_mode_clocks;
2768         read->num_wait_states = num_wait_states;
2769         read->opcode = opcode;
2770         read->proto = proto;
2771 }
2772
2773 void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
2774                              enum spi_nor_protocol proto)
2775 {
2776         pp->opcode = opcode;
2777         pp->proto = proto;
2778 }
2779
2780 static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
2781 {
2782         size_t i;
2783
2784         for (i = 0; i < size; i++)
2785                 if (table[i][0] == (int)hwcaps)
2786                         return table[i][1];
2787
2788         return -EINVAL;
2789 }
2790
2791 int spi_nor_hwcaps_read2cmd(u32 hwcaps)
2792 {
2793         static const int hwcaps_read2cmd[][2] = {
2794                 { SNOR_HWCAPS_READ,             SNOR_CMD_READ },
2795                 { SNOR_HWCAPS_READ_FAST,        SNOR_CMD_READ_FAST },
2796                 { SNOR_HWCAPS_READ_1_1_1_DTR,   SNOR_CMD_READ_1_1_1_DTR },
2797                 { SNOR_HWCAPS_READ_1_1_2,       SNOR_CMD_READ_1_1_2 },
2798                 { SNOR_HWCAPS_READ_1_2_2,       SNOR_CMD_READ_1_2_2 },
2799                 { SNOR_HWCAPS_READ_2_2_2,       SNOR_CMD_READ_2_2_2 },
2800                 { SNOR_HWCAPS_READ_1_2_2_DTR,   SNOR_CMD_READ_1_2_2_DTR },
2801                 { SNOR_HWCAPS_READ_1_1_4,       SNOR_CMD_READ_1_1_4 },
2802                 { SNOR_HWCAPS_READ_1_4_4,       SNOR_CMD_READ_1_4_4 },
2803                 { SNOR_HWCAPS_READ_4_4_4,       SNOR_CMD_READ_4_4_4 },
2804                 { SNOR_HWCAPS_READ_1_4_4_DTR,   SNOR_CMD_READ_1_4_4_DTR },
2805                 { SNOR_HWCAPS_READ_1_1_8,       SNOR_CMD_READ_1_1_8 },
2806                 { SNOR_HWCAPS_READ_1_8_8,       SNOR_CMD_READ_1_8_8 },
2807                 { SNOR_HWCAPS_READ_8_8_8,       SNOR_CMD_READ_8_8_8 },
2808                 { SNOR_HWCAPS_READ_1_8_8_DTR,   SNOR_CMD_READ_1_8_8_DTR },
2809         };
2810
2811         return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
2812                                   ARRAY_SIZE(hwcaps_read2cmd));
2813 }
2814
2815 static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
2816 {
2817         static const int hwcaps_pp2cmd[][2] = {
2818                 { SNOR_HWCAPS_PP,               SNOR_CMD_PP },
2819                 { SNOR_HWCAPS_PP_1_1_4,         SNOR_CMD_PP_1_1_4 },
2820                 { SNOR_HWCAPS_PP_1_4_4,         SNOR_CMD_PP_1_4_4 },
2821                 { SNOR_HWCAPS_PP_4_4_4,         SNOR_CMD_PP_4_4_4 },
2822                 { SNOR_HWCAPS_PP_1_1_8,         SNOR_CMD_PP_1_1_8 },
2823                 { SNOR_HWCAPS_PP_1_8_8,         SNOR_CMD_PP_1_8_8 },
2824                 { SNOR_HWCAPS_PP_8_8_8,         SNOR_CMD_PP_8_8_8 },
2825         };
2826
2827         return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
2828                                   ARRAY_SIZE(hwcaps_pp2cmd));
2829 }
2830
2831 /**
2832  * spi_nor_spimem_check_op - check if the operation is supported
2833  *                           by controller
2834  *@nor:        pointer to a 'struct spi_nor'
2835  *@op:         pointer to op template to be checked
2836  *
2837  * Returns 0 if operation is supported, -ENOTSUPP otherwise.
2838  */
2839 static int spi_nor_spimem_check_op(struct spi_nor *nor,
2840                                    struct spi_mem_op *op)
2841 {
2842         /*
2843          * First test with 4 address bytes. The opcode itself might
2844          * be a 3B addressing opcode but we don't care, because
2845          * SPI controller implementation should not check the opcode,
2846          * but just the sequence.
2847          */
2848         op->addr.nbytes = 4;
2849         if (!spi_mem_supports_op(nor->spimem, op)) {
2850                 if (nor->mtd.size > SZ_16M)
2851                         return -ENOTSUPP;
2852
2853                 /* If flash size <= 16MB, 3 address bytes are sufficient */
2854                 op->addr.nbytes = 3;
2855                 if (!spi_mem_supports_op(nor->spimem, op))
2856                         return -ENOTSUPP;
2857         }
2858
2859         return 0;
2860 }
2861
2862 /**
2863  * spi_nor_spimem_check_readop - check if the read op is supported
2864  *                               by controller
2865  *@nor:         pointer to a 'struct spi_nor'
2866  *@read:        pointer to op template to be checked
2867  *
2868  * Returns 0 if operation is supported, -ENOTSUPP otherwise.
2869  */
2870 static int spi_nor_spimem_check_readop(struct spi_nor *nor,
2871                                        const struct spi_nor_read_command *read)
2872 {
2873         struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 1),
2874                                           SPI_MEM_OP_ADDR(3, 0, 1),
2875                                           SPI_MEM_OP_DUMMY(0, 1),
2876                                           SPI_MEM_OP_DATA_IN(0, NULL, 1));
2877
2878         op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(read->proto);
2879         op.addr.buswidth = spi_nor_get_protocol_addr_nbits(read->proto);
2880         op.data.buswidth = spi_nor_get_protocol_data_nbits(read->proto);
2881         op.dummy.buswidth = op.addr.buswidth;
2882         op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
2883                           op.dummy.buswidth / 8;
2884
2885         return spi_nor_spimem_check_op(nor, &op);
2886 }
2887
2888 /**
2889  * spi_nor_spimem_check_pp - check if the page program op is supported
2890  *                           by controller
2891  *@nor:         pointer to a 'struct spi_nor'
2892  *@pp:          pointer to op template to be checked
2893  *
2894  * Returns 0 if operation is supported, -ENOTSUPP otherwise.
2895  */
2896 static int spi_nor_spimem_check_pp(struct spi_nor *nor,
2897                                    const struct spi_nor_pp_command *pp)
2898 {
2899         struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 1),
2900                                           SPI_MEM_OP_ADDR(3, 0, 1),
2901                                           SPI_MEM_OP_NO_DUMMY,
2902                                           SPI_MEM_OP_DATA_OUT(0, NULL, 1));
2903
2904         op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(pp->proto);
2905         op.addr.buswidth = spi_nor_get_protocol_addr_nbits(pp->proto);
2906         op.data.buswidth = spi_nor_get_protocol_data_nbits(pp->proto);
2907
2908         return spi_nor_spimem_check_op(nor, &op);
2909 }
2910
2911 /**
2912  * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
2913  *                                based on SPI controller capabilities
2914  * @nor:        pointer to a 'struct spi_nor'
2915  * @hwcaps:     pointer to resulting capabilities after adjusting
2916  *              according to controller and flash's capability
2917  */
2918 static void
2919 spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
2920 {
2921         struct spi_nor_flash_parameter *params =  &nor->params;
2922         unsigned int cap;
2923
2924         /* DTR modes are not supported yet, mask them all. */
2925         *hwcaps &= ~SNOR_HWCAPS_DTR;
2926
2927         /* X-X-X modes are not supported yet, mask them all. */
2928         *hwcaps &= ~SNOR_HWCAPS_X_X_X;
2929
2930         for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
2931                 int rdidx, ppidx;
2932
2933                 if (!(*hwcaps & BIT(cap)))
2934                         continue;
2935
2936                 rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
2937                 if (rdidx >= 0 &&
2938                     spi_nor_spimem_check_readop(nor, &params->reads[rdidx]))
2939                         *hwcaps &= ~BIT(cap);
2940
2941                 ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
2942                 if (ppidx < 0)
2943                         continue;
2944
2945                 if (spi_nor_spimem_check_pp(nor,
2946                                             &params->page_programs[ppidx]))
2947                         *hwcaps &= ~BIT(cap);
2948         }
2949 }
2950
2951 /**
2952  * spi_nor_set_erase_type() - set a SPI NOR erase type
2953  * @erase:      pointer to a structure that describes a SPI NOR erase type
2954  * @size:       the size of the sector/block erased by the erase type
2955  * @opcode:     the SPI command op code to erase the sector/block
2956  */
2957 void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
2958                             u8 opcode)
2959 {
2960         erase->size = size;
2961         erase->opcode = opcode;
2962         /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
2963         erase->size_shift = ffs(erase->size) - 1;
2964         erase->size_mask = (1 << erase->size_shift) - 1;
2965 }
2966
2967 /**
2968  * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
2969  * @map:                the erase map of the SPI NOR
2970  * @erase_mask:         bitmask encoding erase types that can erase the entire
2971  *                      flash memory
2972  * @flash_size:         the spi nor flash memory size
2973  */
2974 void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
2975                                     u8 erase_mask, u64 flash_size)
2976 {
2977         /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
2978         map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
2979                                      SNOR_LAST_REGION;
2980         map->uniform_region.size = flash_size;
2981         map->regions = &map->uniform_region;
2982         map->uniform_erase_type = erase_mask;
2983 }
2984
2985 int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
2986                              const struct sfdp_parameter_header *bfpt_header,
2987                              const struct sfdp_bfpt *bfpt,
2988                              struct spi_nor_flash_parameter *params)
2989 {
2990         if (nor->info->fixups && nor->info->fixups->post_bfpt)
2991                 return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt,
2992                                                     params);
2993
2994         return 0;
2995 }
2996
2997 static int spi_nor_select_read(struct spi_nor *nor,
2998                                u32 shared_hwcaps)
2999 {
3000         int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
3001         const struct spi_nor_read_command *read;
3002
3003         if (best_match < 0)
3004                 return -EINVAL;
3005
3006         cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
3007         if (cmd < 0)
3008                 return -EINVAL;
3009
3010         read = &nor->params.reads[cmd];
3011         nor->read_opcode = read->opcode;
3012         nor->read_proto = read->proto;
3013
3014         /*
3015          * In the spi-nor framework, we don't need to make the difference
3016          * between mode clock cycles and wait state clock cycles.
3017          * Indeed, the value of the mode clock cycles is used by a QSPI
3018          * flash memory to know whether it should enter or leave its 0-4-4
3019          * (Continuous Read / XIP) mode.
3020          * eXecution In Place is out of the scope of the mtd sub-system.
3021          * Hence we choose to merge both mode and wait state clock cycles
3022          * into the so called dummy clock cycles.
3023          */
3024         nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
3025         return 0;
3026 }
3027
3028 static int spi_nor_select_pp(struct spi_nor *nor,
3029                              u32 shared_hwcaps)
3030 {
3031         int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
3032         const struct spi_nor_pp_command *pp;
3033
3034         if (best_match < 0)
3035                 return -EINVAL;
3036
3037         cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
3038         if (cmd < 0)
3039                 return -EINVAL;
3040
3041         pp = &nor->params.page_programs[cmd];
3042         nor->program_opcode = pp->opcode;
3043         nor->write_proto = pp->proto;
3044         return 0;
3045 }
3046
3047 /**
3048  * spi_nor_select_uniform_erase() - select optimum uniform erase type
3049  * @map:                the erase map of the SPI NOR
3050  * @wanted_size:        the erase type size to search for. Contains the value of
3051  *                      info->sector_size or of the "small sector" size in case
3052  *                      CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined.
3053  *
3054  * Once the optimum uniform sector erase command is found, disable all the
3055  * other.
3056  *
3057  * Return: pointer to erase type on success, NULL otherwise.
3058  */
3059 static const struct spi_nor_erase_type *
3060 spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
3061                              const u32 wanted_size)
3062 {
3063         const struct spi_nor_erase_type *tested_erase, *erase = NULL;
3064         int i;
3065         u8 uniform_erase_type = map->uniform_erase_type;
3066
3067         for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
3068                 if (!(uniform_erase_type & BIT(i)))
3069                         continue;
3070
3071                 tested_erase = &map->erase_type[i];
3072
3073                 /*
3074                  * If the current erase size is the one, stop here:
3075                  * we have found the right uniform Sector Erase command.
3076                  */
3077                 if (tested_erase->size == wanted_size) {
3078                         erase = tested_erase;
3079                         break;
3080                 }
3081
3082                 /*
3083                  * Otherwise, the current erase size is still a valid canditate.
3084                  * Select the biggest valid candidate.
3085                  */
3086                 if (!erase && tested_erase->size)
3087                         erase = tested_erase;
3088                         /* keep iterating to find the wanted_size */
3089         }
3090
3091         if (!erase)
3092                 return NULL;
3093
3094         /* Disable all other Sector Erase commands. */
3095         map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
3096         map->uniform_erase_type |= BIT(erase - map->erase_type);
3097         return erase;
3098 }
3099
3100 static int spi_nor_select_erase(struct spi_nor *nor)
3101 {
3102         struct spi_nor_erase_map *map = &nor->params.erase_map;
3103         const struct spi_nor_erase_type *erase = NULL;
3104         struct mtd_info *mtd = &nor->mtd;
3105         u32 wanted_size = nor->info->sector_size;
3106         int i;
3107
3108         /*
3109          * The previous implementation handling Sector Erase commands assumed
3110          * that the SPI flash memory has an uniform layout then used only one
3111          * of the supported erase sizes for all Sector Erase commands.
3112          * So to be backward compatible, the new implementation also tries to
3113          * manage the SPI flash memory as uniform with a single erase sector
3114          * size, when possible.
3115          */
3116 #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
3117         /* prefer "small sector" erase if possible */
3118         wanted_size = 4096u;
3119 #endif
3120
3121         if (spi_nor_has_uniform_erase(nor)) {
3122                 erase = spi_nor_select_uniform_erase(map, wanted_size);
3123                 if (!erase)
3124                         return -EINVAL;
3125                 nor->erase_opcode = erase->opcode;
3126                 mtd->erasesize = erase->size;
3127                 return 0;
3128         }
3129
3130         /*
3131          * For non-uniform SPI flash memory, set mtd->erasesize to the
3132          * maximum erase sector size. No need to set nor->erase_opcode.
3133          */
3134         for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
3135                 if (map->erase_type[i].size) {
3136                         erase = &map->erase_type[i];
3137                         break;
3138                 }
3139         }
3140
3141         if (!erase)
3142                 return -EINVAL;
3143
3144         mtd->erasesize = erase->size;
3145         return 0;
3146 }
3147
3148 static int spi_nor_default_setup(struct spi_nor *nor,
3149                                  const struct spi_nor_hwcaps *hwcaps)
3150 {
3151         struct spi_nor_flash_parameter *params = &nor->params;
3152         u32 ignored_mask, shared_mask;
3153         int err;
3154
3155         /*
3156          * Keep only the hardware capabilities supported by both the SPI
3157          * controller and the SPI flash memory.
3158          */
3159         shared_mask = hwcaps->mask & params->hwcaps.mask;
3160
3161         if (nor->spimem) {
3162                 /*
3163                  * When called from spi_nor_probe(), all caps are set and we
3164                  * need to discard some of them based on what the SPI
3165                  * controller actually supports (using spi_mem_supports_op()).
3166                  */
3167                 spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
3168         } else {
3169                 /*
3170                  * SPI n-n-n protocols are not supported when the SPI
3171                  * controller directly implements the spi_nor interface.
3172                  * Yet another reason to switch to spi-mem.
3173                  */
3174                 ignored_mask = SNOR_HWCAPS_X_X_X;
3175                 if (shared_mask & ignored_mask) {
3176                         dev_dbg(nor->dev,
3177                                 "SPI n-n-n protocols are not supported.\n");
3178                         shared_mask &= ~ignored_mask;
3179                 }
3180         }
3181
3182         /* Select the (Fast) Read command. */
3183         err = spi_nor_select_read(nor, shared_mask);
3184         if (err) {
3185                 dev_dbg(nor->dev,
3186                         "can't select read settings supported by both the SPI controller and memory.\n");
3187                 return err;
3188         }
3189
3190         /* Select the Page Program command. */
3191         err = spi_nor_select_pp(nor, shared_mask);
3192         if (err) {
3193                 dev_dbg(nor->dev,
3194                         "can't select write settings supported by both the SPI controller and memory.\n");
3195                 return err;
3196         }
3197
3198         /* Select the Sector Erase command. */
3199         err = spi_nor_select_erase(nor);
3200         if (err) {
3201                 dev_dbg(nor->dev,
3202                         "can't select erase settings supported by both the SPI controller and memory.\n");
3203                 return err;
3204         }
3205
3206         return 0;
3207 }
3208
3209 static int spi_nor_setup(struct spi_nor *nor,
3210                          const struct spi_nor_hwcaps *hwcaps)
3211 {
3212         if (!nor->params.setup)
3213                 return 0;
3214
3215         return nor->params.setup(nor, hwcaps);
3216 }
3217
3218 static void atmel_set_default_init(struct spi_nor *nor)
3219 {
3220         nor->flags |= SNOR_F_HAS_LOCK;
3221 }
3222
3223 static void intel_set_default_init(struct spi_nor *nor)
3224 {
3225         nor->flags |= SNOR_F_HAS_LOCK;
3226 }
3227
3228 static void issi_set_default_init(struct spi_nor *nor)
3229 {
3230         nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable;
3231 }
3232
3233 static void macronix_set_default_init(struct spi_nor *nor)
3234 {
3235         nor->params.quad_enable = spi_nor_sr1_bit6_quad_enable;
3236         nor->params.set_4byte_addr_mode = spi_nor_set_4byte_addr_mode;
3237 }
3238
3239 static void sst_set_default_init(struct spi_nor *nor)
3240 {
3241         nor->flags |= SNOR_F_HAS_LOCK;
3242 }
3243
3244 static void st_micron_set_default_init(struct spi_nor *nor)
3245 {
3246         nor->flags |= SNOR_F_HAS_LOCK;
3247         nor->flags &= ~SNOR_F_HAS_16BIT_SR;
3248         nor->params.quad_enable = NULL;
3249         nor->params.set_4byte_addr_mode = st_micron_set_4byte_addr_mode;
3250 }
3251
3252 static void winbond_set_default_init(struct spi_nor *nor)
3253 {
3254         nor->params.set_4byte_addr_mode = winbond_set_4byte_addr_mode;
3255 }
3256
3257 /**
3258  * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
3259  * settings based on MFR register and ->default_init() hook.
3260  * @nor:        pointer to a 'struct spi-nor'.
3261  */
3262 static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
3263 {
3264         /* Init flash parameters based on MFR */
3265         switch (JEDEC_MFR(nor->info)) {
3266         case SNOR_MFR_ATMEL:
3267                 atmel_set_default_init(nor);
3268                 break;
3269
3270         case SNOR_MFR_INTEL:
3271                 intel_set_default_init(nor);
3272                 break;
3273
3274         case SNOR_MFR_ISSI:
3275                 issi_set_default_init(nor);
3276                 break;
3277
3278         case SNOR_MFR_MACRONIX:
3279                 macronix_set_default_init(nor);
3280                 break;
3281
3282         case SNOR_MFR_ST:
3283         case SNOR_MFR_MICRON:
3284                 st_micron_set_default_init(nor);
3285                 break;
3286
3287         case SNOR_MFR_SST:
3288                 sst_set_default_init(nor);
3289                 break;
3290
3291         case SNOR_MFR_WINBOND:
3292                 winbond_set_default_init(nor);
3293                 break;
3294
3295         default:
3296                 break;
3297         }
3298
3299         if (nor->info->fixups && nor->info->fixups->default_init)
3300                 nor->info->fixups->default_init(nor);
3301 }
3302
3303 /**
3304  * spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings
3305  * based on JESD216 SFDP standard.
3306  * @nor:        pointer to a 'struct spi-nor'.
3307  *
3308  * The method has a roll-back mechanism: in case the SFDP parsing fails, the
3309  * legacy flash parameters and settings will be restored.
3310  */
3311 static void spi_nor_sfdp_init_params(struct spi_nor *nor)
3312 {
3313         struct spi_nor_flash_parameter sfdp_params;
3314
3315         memcpy(&sfdp_params, &nor->params, sizeof(sfdp_params));
3316
3317         if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
3318                 nor->addr_width = 0;
3319                 nor->flags &= ~SNOR_F_4B_OPCODES;
3320         } else {
3321                 memcpy(&nor->params, &sfdp_params, sizeof(nor->params));
3322         }
3323 }
3324
3325 /**
3326  * spi_nor_info_init_params() - Initialize the flash's parameters and settings
3327  * based on nor->info data.
3328  * @nor:        pointer to a 'struct spi-nor'.
3329  */
3330 static void spi_nor_info_init_params(struct spi_nor *nor)
3331 {
3332         struct spi_nor_flash_parameter *params = &nor->params;
3333         struct spi_nor_erase_map *map = &params->erase_map;
3334         const struct flash_info *info = nor->info;
3335         struct device_node *np = spi_nor_get_flash_node(nor);
3336         u8 i, erase_mask;
3337
3338         /* Initialize legacy flash parameters and settings. */
3339         params->quad_enable = spi_nor_sr2_bit1_quad_enable;
3340         params->set_4byte_addr_mode = spansion_set_4byte_addr_mode;
3341         params->setup = spi_nor_default_setup;
3342         /* Default to 16-bit Write Status (01h) Command */
3343         nor->flags |= SNOR_F_HAS_16BIT_SR;
3344
3345         /* Set SPI NOR sizes. */
3346         params->size = (u64)info->sector_size * info->n_sectors;
3347         params->page_size = info->page_size;
3348
3349         if (!(info->flags & SPI_NOR_NO_FR)) {
3350                 /* Default to Fast Read for DT and non-DT platform devices. */
3351                 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
3352
3353                 /* Mask out Fast Read if not requested at DT instantiation. */
3354                 if (np && !of_property_read_bool(np, "m25p,fast-read"))
3355                         params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
3356         }
3357
3358         /* (Fast) Read settings. */
3359         params->hwcaps.mask |= SNOR_HWCAPS_READ;
3360         spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
3361                                   0, 0, SPINOR_OP_READ,
3362                                   SNOR_PROTO_1_1_1);
3363
3364         if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
3365                 spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
3366                                           0, 8, SPINOR_OP_READ_FAST,
3367                                           SNOR_PROTO_1_1_1);
3368
3369         if (info->flags & SPI_NOR_DUAL_READ) {
3370                 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
3371                 spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
3372                                           0, 8, SPINOR_OP_READ_1_1_2,
3373                                           SNOR_PROTO_1_1_2);
3374         }
3375
3376         if (info->flags & SPI_NOR_QUAD_READ) {
3377                 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
3378                 spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
3379                                           0, 8, SPINOR_OP_READ_1_1_4,
3380                                           SNOR_PROTO_1_1_4);
3381         }
3382
3383         if (info->flags & SPI_NOR_OCTAL_READ) {
3384                 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
3385                 spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_8],
3386                                           0, 8, SPINOR_OP_READ_1_1_8,
3387                                           SNOR_PROTO_1_1_8);
3388         }
3389
3390         /* Page Program settings. */
3391         params->hwcaps.mask |= SNOR_HWCAPS_PP;
3392         spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
3393                                 SPINOR_OP_PP, SNOR_PROTO_1_1_1);
3394
3395         /*
3396          * Sector Erase settings. Sort Erase Types in ascending order, with the
3397          * smallest erase size starting at BIT(0).
3398          */
3399         erase_mask = 0;
3400         i = 0;
3401         if (info->flags & SECT_4K_PMC) {
3402                 erase_mask |= BIT(i);
3403                 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
3404                                        SPINOR_OP_BE_4K_PMC);
3405                 i++;
3406         } else if (info->flags & SECT_4K) {
3407                 erase_mask |= BIT(i);
3408                 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
3409                                        SPINOR_OP_BE_4K);
3410                 i++;
3411         }
3412         erase_mask |= BIT(i);
3413         spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
3414                                SPINOR_OP_SE);
3415         spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
3416 }
3417
3418 static void spansion_post_sfdp_fixups(struct spi_nor *nor)
3419 {
3420         if (nor->params.size <= SZ_16M)
3421                 return;
3422
3423         nor->flags |= SNOR_F_4B_OPCODES;
3424         /* No small sector erase for 4-byte command set */
3425         nor->erase_opcode = SPINOR_OP_SE;
3426         nor->mtd.erasesize = nor->info->sector_size;
3427 }
3428
3429 static void s3an_post_sfdp_fixups(struct spi_nor *nor)
3430 {
3431         nor->params.setup = s3an_nor_setup;
3432 }
3433
3434 /**
3435  * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
3436  * after SFDP has been parsed (is also called for SPI NORs that do not
3437  * support RDSFDP).
3438  * @nor:        pointer to a 'struct spi_nor'
3439  *
3440  * Typically used to tweak various parameters that could not be extracted by
3441  * other means (i.e. when information provided by the SFDP/flash_info tables
3442  * are incomplete or wrong).
3443  */
3444 static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
3445 {
3446         switch (JEDEC_MFR(nor->info)) {
3447         case SNOR_MFR_SPANSION:
3448                 spansion_post_sfdp_fixups(nor);
3449                 break;
3450
3451         default:
3452                 break;
3453         }
3454
3455         if (nor->info->flags & SPI_S3AN)
3456                 s3an_post_sfdp_fixups(nor);
3457
3458         if (nor->info->fixups && nor->info->fixups->post_sfdp)
3459                 nor->info->fixups->post_sfdp(nor);
3460 }
3461
3462 /**
3463  * spi_nor_late_init_params() - Late initialization of default flash parameters.
3464  * @nor:        pointer to a 'struct spi_nor'
3465  *
3466  * Used to set default flash parameters and settings when the ->default_init()
3467  * hook or the SFDP parser let voids.
3468  */
3469 static void spi_nor_late_init_params(struct spi_nor *nor)
3470 {
3471         /*
3472          * NOR protection support. When locking_ops are not provided, we pick
3473          * the default ones.
3474          */
3475         if (nor->flags & SNOR_F_HAS_LOCK && !nor->params.locking_ops)
3476                 nor->params.locking_ops = &spi_nor_sr_locking_ops;
3477 }
3478
3479 /**
3480  * spi_nor_init_params() - Initialize the flash's parameters and settings.
3481  * @nor:        pointer to a 'struct spi-nor'.
3482  *
3483  * The flash parameters and settings are initialized based on a sequence of
3484  * calls that are ordered by priority:
3485  *
3486  * 1/ Default flash parameters initialization. The initializations are done
3487  *    based on nor->info data:
3488  *              spi_nor_info_init_params()
3489  *
3490  * which can be overwritten by:
3491  * 2/ Manufacturer flash parameters initialization. The initializations are
3492  *    done based on MFR register, or when the decisions can not be done solely
3493  *    based on MFR, by using specific flash_info tweeks, ->default_init():
3494  *              spi_nor_manufacturer_init_params()
3495  *
3496  * which can be overwritten by:
3497  * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
3498  *    should be more accurate that the above.
3499  *              spi_nor_sfdp_init_params()
3500  *
3501  *    Please note that there is a ->post_bfpt() fixup hook that can overwrite
3502  *    the flash parameters and settings immediately after parsing the Basic
3503  *    Flash Parameter Table.
3504  *
3505  * which can be overwritten by:
3506  * 4/ Post SFDP flash parameters initialization. Used to tweak various
3507  *    parameters that could not be extracted by other means (i.e. when
3508  *    information provided by the SFDP/flash_info tables are incomplete or
3509  *    wrong).
3510  *              spi_nor_post_sfdp_fixups()
3511  *
3512  * 5/ Late default flash parameters initialization, used when the
3513  * ->default_init() hook or the SFDP parser do not set specific params.
3514  *              spi_nor_late_init_params()
3515  */
3516 static void spi_nor_init_params(struct spi_nor *nor)
3517 {
3518         spi_nor_info_init_params(nor);
3519
3520         spi_nor_manufacturer_init_params(nor);
3521
3522         if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
3523             !(nor->info->flags & SPI_NOR_SKIP_SFDP))
3524                 spi_nor_sfdp_init_params(nor);
3525
3526         spi_nor_post_sfdp_fixups(nor);
3527
3528         spi_nor_late_init_params(nor);
3529 }
3530
3531 /**
3532  * spi_nor_quad_enable() - enable Quad I/O if needed.
3533  * @nor:                pointer to a 'struct spi_nor'
3534  *
3535  * Return: 0 on success, -errno otherwise.
3536  */
3537 static int spi_nor_quad_enable(struct spi_nor *nor)
3538 {
3539         if (!nor->params.quad_enable)
3540                 return 0;
3541
3542         if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
3543               spi_nor_get_protocol_width(nor->write_proto) == 4))
3544                 return 0;
3545
3546         return nor->params.quad_enable(nor);
3547 }
3548
3549 /**
3550  * spi_nor_unlock_all() - Unlocks the entire flash memory array.
3551  * @nor:        pointer to a 'struct spi_nor'.
3552  *
3553  * Some SPI NOR flashes are write protected by default after a power-on reset
3554  * cycle, in order to avoid inadvertent writes during power-up. Backward
3555  * compatibility imposes to unlock the entire flash memory array at power-up
3556  * by default.
3557  */
3558 static int spi_nor_unlock_all(struct spi_nor *nor)
3559 {
3560         if (nor->flags & SNOR_F_HAS_LOCK)
3561                 return spi_nor_unlock(&nor->mtd, 0, nor->params.size);
3562
3563         return 0;
3564 }
3565
3566 static int spi_nor_init(struct spi_nor *nor)
3567 {
3568         int err;
3569
3570         err = spi_nor_quad_enable(nor);
3571         if (err) {
3572                 dev_dbg(nor->dev, "quad mode not supported\n");
3573                 return err;
3574         }
3575
3576         err = spi_nor_unlock_all(nor);
3577         if (err) {
3578                 dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
3579                 return err;
3580         }
3581
3582         if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
3583                 /*
3584                  * If the RESET# pin isn't hooked up properly, or the system
3585                  * otherwise doesn't perform a reset command in the boot
3586                  * sequence, it's impossible to 100% protect against unexpected
3587                  * reboots (e.g., crashes). Warn the user (or hopefully, system
3588                  * designer) that this is bad.
3589                  */
3590                 WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
3591                           "enabling reset hack; may not recover from unexpected reboots\n");
3592                 nor->params.set_4byte_addr_mode(nor, true);
3593         }
3594
3595         return 0;
3596 }
3597
3598 /* mtd resume handler */
3599 static void spi_nor_resume(struct mtd_info *mtd)
3600 {
3601         struct spi_nor *nor = mtd_to_spi_nor(mtd);
3602         struct device *dev = nor->dev;
3603         int ret;
3604
3605         /* re-initialize the nor chip */
3606         ret = spi_nor_init(nor);
3607         if (ret)
3608                 dev_err(dev, "resume() failed\n");
3609 }
3610
3611 void spi_nor_restore(struct spi_nor *nor)
3612 {
3613         /* restore the addressing mode */
3614         if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
3615             nor->flags & SNOR_F_BROKEN_RESET)
3616                 nor->params.set_4byte_addr_mode(nor, false);
3617 }
3618 EXPORT_SYMBOL_GPL(spi_nor_restore);
3619
3620 static const struct flash_info *spi_nor_match_id(const char *name)
3621 {
3622         const struct flash_info *id = spi_nor_ids;
3623
3624         while (id->name) {
3625                 if (!strcmp(name, id->name))
3626                         return id;
3627                 id++;
3628         }
3629         return NULL;
3630 }
3631
3632 static int spi_nor_set_addr_width(struct spi_nor *nor)
3633 {
3634         if (nor->addr_width) {
3635                 /* already configured from SFDP */
3636         } else if (nor->info->addr_width) {
3637                 nor->addr_width = nor->info->addr_width;
3638         } else if (nor->mtd.size > 0x1000000) {
3639                 /* enable 4-byte addressing if the device exceeds 16MiB */
3640                 nor->addr_width = 4;
3641         } else {
3642                 nor->addr_width = 3;
3643         }
3644
3645         if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
3646                 dev_dbg(nor->dev, "address width is too large: %u\n",
3647                         nor->addr_width);
3648                 return -EINVAL;
3649         }
3650
3651         /* Set 4byte opcodes when possible. */
3652         if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
3653             !(nor->flags & SNOR_F_HAS_4BAIT))
3654                 spi_nor_set_4byte_opcodes(nor);
3655
3656         return 0;
3657 }
3658
3659 static void spi_nor_debugfs_init(struct spi_nor *nor,
3660                                  const struct flash_info *info)
3661 {
3662         struct mtd_info *mtd = &nor->mtd;
3663
3664         mtd->dbg.partname = info->name;
3665         mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN",
3666                                          info->id_len, info->id);
3667 }
3668
3669 static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
3670                                                        const char *name)
3671 {
3672         const struct flash_info *info = NULL;
3673
3674         if (name)
3675                 info = spi_nor_match_id(name);
3676         /* Try to auto-detect if chip name wasn't specified or not found */
3677         if (!info)
3678                 info = spi_nor_read_id(nor);
3679         if (IS_ERR_OR_NULL(info))
3680                 return ERR_PTR(-ENOENT);
3681
3682         /*
3683          * If caller has specified name of flash model that can normally be
3684          * detected using JEDEC, let's verify it.
3685          */
3686         if (name && info->id_len) {
3687                 const struct flash_info *jinfo;
3688
3689                 jinfo = spi_nor_read_id(nor);
3690                 if (IS_ERR(jinfo)) {
3691                         return jinfo;
3692                 } else if (jinfo != info) {
3693                         /*
3694                          * JEDEC knows better, so overwrite platform ID. We
3695                          * can't trust partitions any longer, but we'll let
3696                          * mtd apply them anyway, since some partitions may be
3697                          * marked read-only, and we don't want to lose that
3698                          * information, even if it's not 100% accurate.
3699                          */
3700                         dev_warn(nor->dev, "found %s, expected %s\n",
3701                                  jinfo->name, info->name);
3702                         info = jinfo;
3703                 }
3704         }
3705
3706         return info;
3707 }
3708
3709 int spi_nor_scan(struct spi_nor *nor, const char *name,
3710                  const struct spi_nor_hwcaps *hwcaps)
3711 {
3712         const struct flash_info *info;
3713         struct device *dev = nor->dev;
3714         struct mtd_info *mtd = &nor->mtd;
3715         struct device_node *np = spi_nor_get_flash_node(nor);
3716         struct spi_nor_flash_parameter *params = &nor->params;
3717         int ret;
3718         int i;
3719
3720         ret = spi_nor_check(nor);
3721         if (ret)
3722                 return ret;
3723
3724         /* Reset SPI protocol for all commands. */
3725         nor->reg_proto = SNOR_PROTO_1_1_1;
3726         nor->read_proto = SNOR_PROTO_1_1_1;
3727         nor->write_proto = SNOR_PROTO_1_1_1;
3728
3729         /*
3730          * We need the bounce buffer early to read/write registers when going
3731          * through the spi-mem layer (buffers have to be DMA-able).
3732          * For spi-mem drivers, we'll reallocate a new buffer if
3733          * nor->page_size turns out to be greater than PAGE_SIZE (which
3734          * shouldn't happen before long since NOR pages are usually less
3735          * than 1KB) after spi_nor_scan() returns.
3736          */
3737         nor->bouncebuf_size = PAGE_SIZE;
3738         nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
3739                                       GFP_KERNEL);
3740         if (!nor->bouncebuf)
3741                 return -ENOMEM;
3742
3743         info = spi_nor_get_flash_info(nor, name);
3744         if (IS_ERR(info))
3745                 return PTR_ERR(info);
3746
3747         nor->info = info;
3748
3749         spi_nor_debugfs_init(nor, info);
3750
3751         mutex_init(&nor->lock);
3752
3753         /*
3754          * Make sure the XSR_RDY flag is set before calling
3755          * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
3756          * with Atmel spi-nor
3757          */
3758         if (info->flags & SPI_NOR_XSR_RDY)
3759                 nor->flags |=  SNOR_F_READY_XSR_RDY;
3760
3761         if (info->flags & SPI_NOR_HAS_LOCK)
3762                 nor->flags |= SNOR_F_HAS_LOCK;
3763
3764         /* Init flash parameters based on flash_info struct and SFDP */
3765         spi_nor_init_params(nor);
3766
3767         if (!mtd->name)
3768                 mtd->name = dev_name(dev);
3769         mtd->priv = nor;
3770         mtd->type = MTD_NORFLASH;
3771         mtd->writesize = 1;
3772         mtd->flags = MTD_CAP_NORFLASH;
3773         mtd->size = params->size;
3774         mtd->_erase = spi_nor_erase;
3775         mtd->_read = spi_nor_read;
3776         mtd->_resume = spi_nor_resume;
3777
3778         if (nor->params.locking_ops) {
3779                 mtd->_lock = spi_nor_lock;
3780                 mtd->_unlock = spi_nor_unlock;
3781                 mtd->_is_locked = spi_nor_is_locked;
3782         }
3783
3784         /* sst nor chips use AAI word program */
3785         if (info->flags & SST_WRITE)
3786                 mtd->_write = sst_write;
3787         else
3788                 mtd->_write = spi_nor_write;
3789
3790         if (info->flags & USE_FSR)
3791                 nor->flags |= SNOR_F_USE_FSR;
3792         if (info->flags & SPI_NOR_HAS_TB) {
3793                 nor->flags |= SNOR_F_HAS_SR_TB;
3794                 if (info->flags & SPI_NOR_TB_SR_BIT6)
3795                         nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
3796         }
3797
3798         if (info->flags & NO_CHIP_ERASE)
3799                 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
3800         if (info->flags & USE_CLSR)
3801                 nor->flags |= SNOR_F_USE_CLSR;
3802
3803         if (info->flags & SPI_NOR_NO_ERASE)
3804                 mtd->flags |= MTD_NO_ERASE;
3805
3806         mtd->dev.parent = dev;
3807         nor->page_size = params->page_size;
3808         mtd->writebufsize = nor->page_size;
3809
3810         if (of_property_read_bool(np, "broken-flash-reset"))
3811                 nor->flags |= SNOR_F_BROKEN_RESET;
3812
3813         /*
3814          * Configure the SPI memory:
3815          * - select op codes for (Fast) Read, Page Program and Sector Erase.
3816          * - set the number of dummy cycles (mode cycles + wait states).
3817          * - set the SPI protocols for register and memory accesses.
3818          */
3819         ret = spi_nor_setup(nor, hwcaps);
3820         if (ret)
3821                 return ret;
3822
3823         if (info->flags & SPI_NOR_4B_OPCODES)
3824                 nor->flags |= SNOR_F_4B_OPCODES;
3825
3826         ret = spi_nor_set_addr_width(nor);
3827         if (ret)
3828                 return ret;
3829
3830         /* Send all the required SPI flash commands to initialize device */
3831         ret = spi_nor_init(nor);
3832         if (ret)
3833                 return ret;
3834
3835         dev_info(dev, "%s (%lld Kbytes)\n", info->name,
3836                         (long long)mtd->size >> 10);
3837
3838         dev_dbg(dev,
3839                 "mtd .name = %s, .size = 0x%llx (%lldMiB), "
3840                 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
3841                 mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
3842                 mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
3843
3844         if (mtd->numeraseregions)
3845                 for (i = 0; i < mtd->numeraseregions; i++)
3846                         dev_dbg(dev,
3847                                 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
3848                                 ".erasesize = 0x%.8x (%uKiB), "
3849                                 ".numblocks = %d }\n",
3850                                 i, (long long)mtd->eraseregions[i].offset,
3851                                 mtd->eraseregions[i].erasesize,
3852                                 mtd->eraseregions[i].erasesize / 1024,
3853                                 mtd->eraseregions[i].numblocks);
3854         return 0;
3855 }
3856 EXPORT_SYMBOL_GPL(spi_nor_scan);
3857
3858 static int spi_nor_create_read_dirmap(struct spi_nor *nor)
3859 {
3860         struct spi_mem_dirmap_info info = {
3861                 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
3862                                       SPI_MEM_OP_ADDR(nor->addr_width, 0, 1),
3863                                       SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
3864                                       SPI_MEM_OP_DATA_IN(0, NULL, 1)),
3865                 .offset = 0,
3866                 .length = nor->mtd.size,
3867         };
3868         struct spi_mem_op *op = &info.op_tmpl;
3869
3870         /* get transfer protocols. */
3871         op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
3872         op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
3873         op->dummy.buswidth = op->addr.buswidth;
3874         op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
3875
3876         /* convert the dummy cycles to the number of bytes */
3877         op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8;
3878
3879         nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3880                                                        &info);
3881         return PTR_ERR_OR_ZERO(nor->dirmap.rdesc);
3882 }
3883
3884 static int spi_nor_create_write_dirmap(struct spi_nor *nor)
3885 {
3886         struct spi_mem_dirmap_info info = {
3887                 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
3888                                       SPI_MEM_OP_ADDR(nor->addr_width, 0, 1),
3889                                       SPI_MEM_OP_NO_DUMMY,
3890                                       SPI_MEM_OP_DATA_OUT(0, NULL, 1)),
3891                 .offset = 0,
3892                 .length = nor->mtd.size,
3893         };
3894         struct spi_mem_op *op = &info.op_tmpl;
3895
3896         /* get transfer protocols. */
3897         op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
3898         op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
3899         op->dummy.buswidth = op->addr.buswidth;
3900         op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
3901
3902         if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
3903                 op->addr.nbytes = 0;
3904
3905         nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3906                                                        &info);
3907         return PTR_ERR_OR_ZERO(nor->dirmap.wdesc);
3908 }
3909
3910 static int spi_nor_probe(struct spi_mem *spimem)
3911 {
3912         struct spi_device *spi = spimem->spi;
3913         struct flash_platform_data *data = dev_get_platdata(&spi->dev);
3914         struct spi_nor *nor;
3915         /*
3916          * Enable all caps by default. The core will mask them after
3917          * checking what's really supported using spi_mem_supports_op().
3918          */
3919         const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
3920         char *flash_name;
3921         int ret;
3922
3923         nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
3924         if (!nor)
3925                 return -ENOMEM;
3926
3927         nor->spimem = spimem;
3928         nor->dev = &spi->dev;
3929         spi_nor_set_flash_node(nor, spi->dev.of_node);
3930
3931         spi_mem_set_drvdata(spimem, nor);
3932
3933         if (data && data->name)
3934                 nor->mtd.name = data->name;
3935
3936         if (!nor->mtd.name)
3937                 nor->mtd.name = spi_mem_get_name(spimem);
3938
3939         /*
3940          * For some (historical?) reason many platforms provide two different
3941          * names in flash_platform_data: "name" and "type". Quite often name is
3942          * set to "m25p80" and then "type" provides a real chip name.
3943          * If that's the case, respect "type" and ignore a "name".
3944          */
3945         if (data && data->type)
3946                 flash_name = data->type;
3947         else if (!strcmp(spi->modalias, "spi-nor"))
3948                 flash_name = NULL; /* auto-detect */
3949         else
3950                 flash_name = spi->modalias;
3951
3952         ret = spi_nor_scan(nor, flash_name, &hwcaps);
3953         if (ret)
3954                 return ret;
3955
3956         /*
3957          * None of the existing parts have > 512B pages, but let's play safe
3958          * and add this logic so that if anyone ever adds support for such
3959          * a NOR we don't end up with buffer overflows.
3960          */
3961         if (nor->page_size > PAGE_SIZE) {
3962                 nor->bouncebuf_size = nor->page_size;
3963                 devm_kfree(nor->dev, nor->bouncebuf);
3964                 nor->bouncebuf = devm_kmalloc(nor->dev,
3965                                               nor->bouncebuf_size,
3966                                               GFP_KERNEL);
3967                 if (!nor->bouncebuf)
3968                         return -ENOMEM;
3969         }
3970
3971         ret = spi_nor_create_read_dirmap(nor);
3972         if (ret)
3973                 return ret;
3974
3975         ret = spi_nor_create_write_dirmap(nor);
3976         if (ret)
3977                 return ret;
3978
3979         return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
3980                                    data ? data->nr_parts : 0);
3981 }
3982
3983 static int spi_nor_remove(struct spi_mem *spimem)
3984 {
3985         struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3986
3987         spi_nor_restore(nor);
3988
3989         /* Clean up MTD stuff. */
3990         return mtd_device_unregister(&nor->mtd);
3991 }
3992
3993 static void spi_nor_shutdown(struct spi_mem *spimem)
3994 {
3995         struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3996
3997         spi_nor_restore(nor);
3998 }
3999
4000 /*
4001  * Do NOT add to this array without reading the following:
4002  *
4003  * Historically, many flash devices are bound to this driver by their name. But
4004  * since most of these flash are compatible to some extent, and their
4005  * differences can often be differentiated by the JEDEC read-ID command, we
4006  * encourage new users to add support to the spi-nor library, and simply bind
4007  * against a generic string here (e.g., "jedec,spi-nor").
4008  *
4009  * Many flash names are kept here in this list (as well as in spi-nor.c) to
4010  * keep them available as module aliases for existing platforms.
4011  */
4012 static const struct spi_device_id spi_nor_dev_ids[] = {
4013         /*
4014          * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
4015          * hack around the fact that the SPI core does not provide uevent
4016          * matching for .of_match_table
4017          */
4018         {"spi-nor"},
4019
4020         /*
4021          * Entries not used in DTs that should be safe to drop after replacing
4022          * them with "spi-nor" in platform data.
4023          */
4024         {"s25sl064a"},  {"w25x16"},     {"m25p10"},     {"m25px64"},
4025
4026         /*
4027          * Entries that were used in DTs without "jedec,spi-nor" fallback and
4028          * should be kept for backward compatibility.
4029          */
4030         {"at25df321a"}, {"at25df641"},  {"at26df081a"},
4031         {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
4032         {"mx25l25635e"},{"mx66l51235l"},
4033         {"n25q064"},    {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
4034         {"s25fl256s1"}, {"s25fl512s"},  {"s25sl12801"}, {"s25fl008k"},
4035         {"s25fl064k"},
4036         {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
4037         {"m25p40"},     {"m25p80"},     {"m25p16"},     {"m25p32"},
4038         {"m25p64"},     {"m25p128"},
4039         {"w25x80"},     {"w25x32"},     {"w25q32"},     {"w25q32dw"},
4040         {"w25q80bl"},   {"w25q128"},    {"w25q256"},
4041
4042         /* Flashes that can't be detected using JEDEC */
4043         {"m25p05-nonjedec"},    {"m25p10-nonjedec"},    {"m25p20-nonjedec"},
4044         {"m25p40-nonjedec"},    {"m25p80-nonjedec"},    {"m25p16-nonjedec"},
4045         {"m25p32-nonjedec"},    {"m25p64-nonjedec"},    {"m25p128-nonjedec"},
4046
4047         /* Everspin MRAMs (non-JEDEC) */
4048         { "mr25h128" }, /* 128 Kib, 40 MHz */
4049         { "mr25h256" }, /* 256 Kib, 40 MHz */
4050         { "mr25h10" },  /*   1 Mib, 40 MHz */
4051         { "mr25h40" },  /*   4 Mib, 40 MHz */
4052
4053         { },
4054 };
4055 MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
4056
4057 static const struct of_device_id spi_nor_of_table[] = {
4058         /*
4059          * Generic compatibility for SPI NOR that can be identified by the
4060          * JEDEC READ ID opcode (0x9F). Use this, if possible.
4061          */
4062         { .compatible = "jedec,spi-nor" },
4063         { /* sentinel */ },
4064 };
4065 MODULE_DEVICE_TABLE(of, spi_nor_of_table);
4066
4067 /*
4068  * REVISIT: many of these chips have deep power-down modes, which
4069  * should clearly be entered on suspend() to minimize power use.
4070  * And also when they're otherwise idle...
4071  */
4072 static struct spi_mem_driver spi_nor_driver = {
4073         .spidrv = {
4074                 .driver = {
4075                         .name = "spi-nor",
4076                         .of_match_table = spi_nor_of_table,
4077                 },
4078                 .id_table = spi_nor_dev_ids,
4079         },
4080         .probe = spi_nor_probe,
4081         .remove = spi_nor_remove,
4082         .shutdown = spi_nor_shutdown,
4083 };
4084 module_spi_mem_driver(spi_nor_driver);
4085
4086 MODULE_LICENSE("GPL v2");
4087 MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
4088 MODULE_AUTHOR("Mike Lavender");
4089 MODULE_DESCRIPTION("framework for SPI NOR");