1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Core registration and callback routines for MTD
6 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
7 * Copyright © 2006 Red Hat UK Limited
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/ptrace.h>
13 #include <linux/seq_file.h>
14 #include <linux/string.h>
15 #include <linux/timer.h>
16 #include <linux/major.h>
18 #include <linux/err.h>
19 #include <linux/ioctl.h>
20 #include <linux/init.h>
22 #include <linux/proc_fs.h>
23 #include <linux/idr.h>
24 #include <linux/backing-dev.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/reboot.h>
28 #include <linux/leds.h>
29 #include <linux/debugfs.h>
30 #include <linux/nvmem-provider.h>
32 #include <linux/mtd/mtd.h>
33 #include <linux/mtd/partitions.h>
37 struct backing_dev_info *mtd_bdi;
39 #ifdef CONFIG_PM_SLEEP
41 static int mtd_cls_suspend(struct device *dev)
43 struct mtd_info *mtd = dev_get_drvdata(dev);
45 return mtd ? mtd_suspend(mtd) : 0;
48 static int mtd_cls_resume(struct device *dev)
50 struct mtd_info *mtd = dev_get_drvdata(dev);
57 static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
58 #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
60 #define MTD_CLS_PM_OPS NULL
63 static struct class mtd_class = {
69 static DEFINE_IDR(mtd_idr);
71 /* These are exported solely for the purpose of mtd_blkdevs.c. You
72 should not use them for _anything_ else */
73 DEFINE_MUTEX(mtd_table_mutex);
74 EXPORT_SYMBOL_GPL(mtd_table_mutex);
76 struct mtd_info *__mtd_next_device(int i)
78 return idr_get_next(&mtd_idr, &i);
80 EXPORT_SYMBOL_GPL(__mtd_next_device);
82 static LIST_HEAD(mtd_notifiers);
85 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
87 /* REVISIT once MTD uses the driver model better, whoever allocates
88 * the mtd_info will probably want to use the release() hook...
90 static void mtd_release(struct device *dev)
92 struct mtd_info *mtd = dev_get_drvdata(dev);
93 dev_t index = MTD_DEVT(mtd->index);
95 /* remove /dev/mtdXro node */
96 device_destroy(&mtd_class, index + 1);
99 #define MTD_DEVICE_ATTR_RO(name) \
100 static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
102 #define MTD_DEVICE_ATTR_RW(name) \
103 static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store)
105 static ssize_t mtd_type_show(struct device *dev,
106 struct device_attribute *attr, char *buf)
108 struct mtd_info *mtd = dev_get_drvdata(dev);
133 case MTD_MLCNANDFLASH:
140 return sysfs_emit(buf, "%s\n", type);
142 MTD_DEVICE_ATTR_RO(type);
144 static ssize_t mtd_flags_show(struct device *dev,
145 struct device_attribute *attr, char *buf)
147 struct mtd_info *mtd = dev_get_drvdata(dev);
149 return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags);
151 MTD_DEVICE_ATTR_RO(flags);
153 static ssize_t mtd_size_show(struct device *dev,
154 struct device_attribute *attr, char *buf)
156 struct mtd_info *mtd = dev_get_drvdata(dev);
158 return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size);
160 MTD_DEVICE_ATTR_RO(size);
162 static ssize_t mtd_erasesize_show(struct device *dev,
163 struct device_attribute *attr, char *buf)
165 struct mtd_info *mtd = dev_get_drvdata(dev);
167 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize);
169 MTD_DEVICE_ATTR_RO(erasesize);
171 static ssize_t mtd_writesize_show(struct device *dev,
172 struct device_attribute *attr, char *buf)
174 struct mtd_info *mtd = dev_get_drvdata(dev);
176 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize);
178 MTD_DEVICE_ATTR_RO(writesize);
180 static ssize_t mtd_subpagesize_show(struct device *dev,
181 struct device_attribute *attr, char *buf)
183 struct mtd_info *mtd = dev_get_drvdata(dev);
184 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
186 return sysfs_emit(buf, "%u\n", subpagesize);
188 MTD_DEVICE_ATTR_RO(subpagesize);
190 static ssize_t mtd_oobsize_show(struct device *dev,
191 struct device_attribute *attr, char *buf)
193 struct mtd_info *mtd = dev_get_drvdata(dev);
195 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize);
197 MTD_DEVICE_ATTR_RO(oobsize);
199 static ssize_t mtd_oobavail_show(struct device *dev,
200 struct device_attribute *attr, char *buf)
202 struct mtd_info *mtd = dev_get_drvdata(dev);
204 return sysfs_emit(buf, "%u\n", mtd->oobavail);
206 MTD_DEVICE_ATTR_RO(oobavail);
208 static ssize_t mtd_numeraseregions_show(struct device *dev,
209 struct device_attribute *attr, char *buf)
211 struct mtd_info *mtd = dev_get_drvdata(dev);
213 return sysfs_emit(buf, "%u\n", mtd->numeraseregions);
215 MTD_DEVICE_ATTR_RO(numeraseregions);
217 static ssize_t mtd_name_show(struct device *dev,
218 struct device_attribute *attr, char *buf)
220 struct mtd_info *mtd = dev_get_drvdata(dev);
222 return sysfs_emit(buf, "%s\n", mtd->name);
224 MTD_DEVICE_ATTR_RO(name);
226 static ssize_t mtd_ecc_strength_show(struct device *dev,
227 struct device_attribute *attr, char *buf)
229 struct mtd_info *mtd = dev_get_drvdata(dev);
231 return sysfs_emit(buf, "%u\n", mtd->ecc_strength);
233 MTD_DEVICE_ATTR_RO(ecc_strength);
235 static ssize_t mtd_bitflip_threshold_show(struct device *dev,
236 struct device_attribute *attr,
239 struct mtd_info *mtd = dev_get_drvdata(dev);
241 return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold);
244 static ssize_t mtd_bitflip_threshold_store(struct device *dev,
245 struct device_attribute *attr,
246 const char *buf, size_t count)
248 struct mtd_info *mtd = dev_get_drvdata(dev);
249 unsigned int bitflip_threshold;
252 retval = kstrtouint(buf, 0, &bitflip_threshold);
256 mtd->bitflip_threshold = bitflip_threshold;
259 MTD_DEVICE_ATTR_RW(bitflip_threshold);
261 static ssize_t mtd_ecc_step_size_show(struct device *dev,
262 struct device_attribute *attr, char *buf)
264 struct mtd_info *mtd = dev_get_drvdata(dev);
266 return sysfs_emit(buf, "%u\n", mtd->ecc_step_size);
269 MTD_DEVICE_ATTR_RO(ecc_step_size);
271 static ssize_t mtd_corrected_bits_show(struct device *dev,
272 struct device_attribute *attr, char *buf)
274 struct mtd_info *mtd = dev_get_drvdata(dev);
275 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
277 return sysfs_emit(buf, "%u\n", ecc_stats->corrected);
279 MTD_DEVICE_ATTR_RO(corrected_bits); /* ecc stats corrected */
281 static ssize_t mtd_ecc_failures_show(struct device *dev,
282 struct device_attribute *attr, char *buf)
284 struct mtd_info *mtd = dev_get_drvdata(dev);
285 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
287 return sysfs_emit(buf, "%u\n", ecc_stats->failed);
289 MTD_DEVICE_ATTR_RO(ecc_failures); /* ecc stats errors */
291 static ssize_t mtd_bad_blocks_show(struct device *dev,
292 struct device_attribute *attr, char *buf)
294 struct mtd_info *mtd = dev_get_drvdata(dev);
295 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
297 return sysfs_emit(buf, "%u\n", ecc_stats->badblocks);
299 MTD_DEVICE_ATTR_RO(bad_blocks);
301 static ssize_t mtd_bbt_blocks_show(struct device *dev,
302 struct device_attribute *attr, char *buf)
304 struct mtd_info *mtd = dev_get_drvdata(dev);
305 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
307 return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks);
309 MTD_DEVICE_ATTR_RO(bbt_blocks);
311 static struct attribute *mtd_attrs[] = {
313 &dev_attr_flags.attr,
315 &dev_attr_erasesize.attr,
316 &dev_attr_writesize.attr,
317 &dev_attr_subpagesize.attr,
318 &dev_attr_oobsize.attr,
319 &dev_attr_oobavail.attr,
320 &dev_attr_numeraseregions.attr,
322 &dev_attr_ecc_strength.attr,
323 &dev_attr_ecc_step_size.attr,
324 &dev_attr_corrected_bits.attr,
325 &dev_attr_ecc_failures.attr,
326 &dev_attr_bad_blocks.attr,
327 &dev_attr_bbt_blocks.attr,
328 &dev_attr_bitflip_threshold.attr,
331 ATTRIBUTE_GROUPS(mtd);
333 static const struct device_type mtd_devtype = {
335 .groups = mtd_groups,
336 .release = mtd_release,
339 static int mtd_partid_debug_show(struct seq_file *s, void *p)
341 struct mtd_info *mtd = s->private;
343 seq_printf(s, "%s\n", mtd->dbg.partid);
348 DEFINE_SHOW_ATTRIBUTE(mtd_partid_debug);
350 static int mtd_partname_debug_show(struct seq_file *s, void *p)
352 struct mtd_info *mtd = s->private;
354 seq_printf(s, "%s\n", mtd->dbg.partname);
359 DEFINE_SHOW_ATTRIBUTE(mtd_partname_debug);
361 static struct dentry *dfs_dir_mtd;
363 static void mtd_debugfs_populate(struct mtd_info *mtd)
365 struct mtd_info *master = mtd_get_master(mtd);
366 struct device *dev = &mtd->dev;
369 if (IS_ERR_OR_NULL(dfs_dir_mtd))
372 root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
373 mtd->dbg.dfs_dir = root;
375 if (master->dbg.partid)
376 debugfs_create_file("partid", 0400, root, master,
377 &mtd_partid_debug_fops);
379 if (master->dbg.partname)
380 debugfs_create_file("partname", 0400, root, master,
381 &mtd_partname_debug_fops);
385 unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
389 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
390 NOMMU_MAP_READ | NOMMU_MAP_WRITE;
392 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
395 return NOMMU_MAP_COPY;
398 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
401 static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
404 struct mtd_info *mtd;
406 mtd = container_of(n, struct mtd_info, reboot_notifier);
413 * mtd_wunit_to_pairing_info - get pairing information of a wunit
414 * @mtd: pointer to new MTD device info structure
415 * @wunit: write unit we are interested in
416 * @info: returned pairing information
418 * Retrieve pairing information associated to the wunit.
419 * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
420 * paired together, and where programming a page may influence the page it is
422 * The notion of page is replaced by the term wunit (write-unit) to stay
423 * consistent with the ->writesize field.
425 * The @wunit argument can be extracted from an absolute offset using
426 * mtd_offset_to_wunit(). @info is filled with the pairing information attached
429 * From the pairing info the MTD user can find all the wunits paired with
430 * @wunit using the following loop:
432 * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
434 * mtd_pairing_info_to_wunit(mtd, &info);
438 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
439 struct mtd_pairing_info *info)
441 struct mtd_info *master = mtd_get_master(mtd);
442 int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
444 if (wunit < 0 || wunit >= npairs)
447 if (master->pairing && master->pairing->get_info)
448 return master->pairing->get_info(master, wunit, info);
455 EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
458 * mtd_pairing_info_to_wunit - get wunit from pairing information
459 * @mtd: pointer to new MTD device info structure
460 * @info: pairing information struct
462 * Returns a positive number representing the wunit associated to the info
463 * struct, or a negative error code.
465 * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
466 * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
469 * It can also be used to only program the first page of each pair (i.e.
470 * page attached to group 0), which allows one to use an MLC NAND in
471 * software-emulated SLC mode:
474 * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
475 * for (info.pair = 0; info.pair < npairs; info.pair++) {
476 * wunit = mtd_pairing_info_to_wunit(mtd, &info);
477 * mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
478 * mtd->writesize, &retlen, buf + (i * mtd->writesize));
481 int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
482 const struct mtd_pairing_info *info)
484 struct mtd_info *master = mtd_get_master(mtd);
485 int ngroups = mtd_pairing_groups(master);
486 int npairs = mtd_wunit_per_eb(master) / ngroups;
488 if (!info || info->pair < 0 || info->pair >= npairs ||
489 info->group < 0 || info->group >= ngroups)
492 if (master->pairing && master->pairing->get_wunit)
493 return mtd->pairing->get_wunit(master, info);
497 EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
500 * mtd_pairing_groups - get the number of pairing groups
501 * @mtd: pointer to new MTD device info structure
503 * Returns the number of pairing groups.
505 * This number is usually equal to the number of bits exposed by a single
506 * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
507 * to iterate over all pages of a given pair.
509 int mtd_pairing_groups(struct mtd_info *mtd)
511 struct mtd_info *master = mtd_get_master(mtd);
513 if (!master->pairing || !master->pairing->ngroups)
516 return master->pairing->ngroups;
518 EXPORT_SYMBOL_GPL(mtd_pairing_groups);
520 static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
521 void *val, size_t bytes)
523 struct mtd_info *mtd = priv;
527 err = mtd_read(mtd, offset, bytes, &retlen, val);
528 if (err && err != -EUCLEAN)
531 return retlen == bytes ? 0 : -EIO;
534 static int mtd_nvmem_add(struct mtd_info *mtd)
536 struct device_node *node = mtd_get_of_node(mtd);
537 struct nvmem_config config = {};
540 config.dev = &mtd->dev;
541 config.name = dev_name(&mtd->dev);
542 config.owner = THIS_MODULE;
543 config.reg_read = mtd_nvmem_reg_read;
544 config.size = mtd->size;
545 config.word_size = 1;
547 config.read_only = true;
548 config.root_only = true;
549 config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
552 mtd->nvmem = nvmem_register(&config);
553 if (IS_ERR(mtd->nvmem)) {
554 /* Just ignore if there is no NVMEM support in the kernel */
555 if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) {
558 dev_err(&mtd->dev, "Failed to register NVMEM device\n");
559 return PTR_ERR(mtd->nvmem);
567 * add_mtd_device - register an MTD device
568 * @mtd: pointer to new MTD device info structure
570 * Add a device to the list of MTD devices present in the system, and
571 * notify each currently active MTD 'user' of its arrival. Returns
572 * zero on success or non-zero on failure.
575 int add_mtd_device(struct mtd_info *mtd)
577 struct mtd_info *master = mtd_get_master(mtd);
578 struct mtd_notifier *not;
582 * May occur, for instance, on buggy drivers which call
583 * mtd_device_parse_register() multiple times on the same master MTD,
584 * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
586 if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
589 BUG_ON(mtd->writesize == 0);
592 * MTD drivers should implement ->_{write,read}() or
593 * ->_{write,read}_oob(), but not both.
595 if (WARN_ON((mtd->_write && mtd->_write_oob) ||
596 (mtd->_read && mtd->_read_oob)))
599 if (WARN_ON((!mtd->erasesize || !master->_erase) &&
600 !(mtd->flags & MTD_NO_ERASE)))
604 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
605 * master is an MLC NAND and has a proper pairing scheme defined.
606 * We also reject masters that implement ->_writev() for now, because
607 * NAND controller drivers don't implement this hook, and adding the
608 * SLC -> MLC address/length conversion to this path is useless if we
611 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
612 (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
613 !master->pairing || master->_writev))
616 mutex_lock(&mtd_table_mutex);
618 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
627 /* default value if not set by driver */
628 if (mtd->bitflip_threshold == 0)
629 mtd->bitflip_threshold = mtd->ecc_strength;
631 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
632 int ngroups = mtd_pairing_groups(master);
634 mtd->erasesize /= ngroups;
635 mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
639 if (is_power_of_2(mtd->erasesize))
640 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
642 mtd->erasesize_shift = 0;
644 if (is_power_of_2(mtd->writesize))
645 mtd->writesize_shift = ffs(mtd->writesize) - 1;
647 mtd->writesize_shift = 0;
649 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
650 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
652 /* Some chips always power up locked. Unlock them now */
653 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
654 error = mtd_unlock(mtd, 0, mtd->size);
655 if (error && error != -EOPNOTSUPP)
657 "%s: unlock failed, writes may not work\n",
659 /* Ignore unlock failures? */
663 /* Caller should have set dev.parent to match the
664 * physical device, if appropriate.
666 mtd->dev.type = &mtd_devtype;
667 mtd->dev.class = &mtd_class;
668 mtd->dev.devt = MTD_DEVT(i);
669 dev_set_name(&mtd->dev, "mtd%d", i);
670 dev_set_drvdata(&mtd->dev, mtd);
671 of_node_get(mtd_get_of_node(mtd));
672 error = device_register(&mtd->dev);
676 /* Add the nvmem provider */
677 error = mtd_nvmem_add(mtd);
681 mtd_debugfs_populate(mtd);
683 device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
686 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
687 /* No need to get a refcount on the module containing
688 the notifier, since we hold the mtd_table_mutex */
689 list_for_each_entry(not, &mtd_notifiers, list)
692 mutex_unlock(&mtd_table_mutex);
693 /* We _know_ we aren't being removed, because
694 our caller is still holding us here. So none
695 of this try_ nonsense, and no bitching about it
697 __module_get(THIS_MODULE);
701 device_unregister(&mtd->dev);
703 of_node_put(mtd_get_of_node(mtd));
704 idr_remove(&mtd_idr, i);
706 mutex_unlock(&mtd_table_mutex);
711 * del_mtd_device - unregister an MTD device
712 * @mtd: pointer to MTD device info structure
714 * Remove a device from the list of MTD devices present in the system,
715 * and notify each currently active MTD 'user' of its departure.
716 * Returns zero on success or 1 on failure, which currently will happen
717 * if the requested device does not appear to be present in the list.
720 int del_mtd_device(struct mtd_info *mtd)
723 struct mtd_notifier *not;
725 mutex_lock(&mtd_table_mutex);
727 if (idr_find(&mtd_idr, mtd->index) != mtd) {
732 /* No need to get a refcount on the module containing
733 the notifier, since we hold the mtd_table_mutex */
734 list_for_each_entry(not, &mtd_notifiers, list)
738 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
739 mtd->index, mtd->name, mtd->usecount);
742 debugfs_remove_recursive(mtd->dbg.dfs_dir);
744 /* Try to remove the NVMEM provider */
746 nvmem_unregister(mtd->nvmem);
748 device_unregister(&mtd->dev);
750 /* Clear dev so mtd can be safely re-registered later if desired */
751 memset(&mtd->dev, 0, sizeof(mtd->dev));
753 idr_remove(&mtd_idr, mtd->index);
754 of_node_put(mtd_get_of_node(mtd));
756 module_put(THIS_MODULE);
761 mutex_unlock(&mtd_table_mutex);
766 * Set a few defaults based on the parent devices, if not provided by the
769 static void mtd_set_dev_defaults(struct mtd_info *mtd)
771 if (mtd->dev.parent) {
772 if (!mtd->owner && mtd->dev.parent->driver)
773 mtd->owner = mtd->dev.parent->driver->owner;
775 mtd->name = dev_name(mtd->dev.parent);
777 pr_debug("mtd device won't show a device symlink in sysfs\n");
780 INIT_LIST_HEAD(&mtd->partitions);
781 mutex_init(&mtd->master.partitions_lock);
782 mutex_init(&mtd->master.chrdev_lock);
785 static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
787 struct otp_info *info;
793 info = kmalloc(PAGE_SIZE, GFP_KERNEL);
798 ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info);
800 ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info);
804 for (i = 0; i < retlen / sizeof(*info); i++)
805 size += info[i].length;
813 /* ENODATA means there is no OTP region. */
814 return ret == -ENODATA ? 0 : ret;
817 static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
818 const char *compatible,
820 nvmem_reg_read_t reg_read)
822 struct nvmem_device *nvmem = NULL;
823 struct nvmem_config config = {};
824 struct device_node *np;
826 /* DT binding is optional */
827 np = of_get_compatible_child(mtd->dev.of_node, compatible);
829 /* OTP nvmem will be registered on the physical device */
830 config.dev = mtd->dev.parent;
831 config.name = kasprintf(GFP_KERNEL, "%s-%s", dev_name(&mtd->dev), compatible);
832 config.id = NVMEM_DEVID_NONE;
833 config.owner = THIS_MODULE;
834 config.type = NVMEM_TYPE_OTP;
835 config.root_only = true;
836 config.reg_read = reg_read;
841 nvmem = nvmem_register(&config);
842 /* Just ignore if there is no NVMEM support in the kernel */
843 if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP)
852 static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset,
853 void *val, size_t bytes)
855 struct mtd_info *mtd = priv;
859 ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val);
863 return retlen == bytes ? 0 : -EIO;
866 static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
867 void *val, size_t bytes)
869 struct mtd_info *mtd = priv;
873 ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val);
877 return retlen == bytes ? 0 : -EIO;
880 static int mtd_otp_nvmem_add(struct mtd_info *mtd)
882 struct nvmem_device *nvmem;
886 if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
887 size = mtd_otp_size(mtd, true);
892 nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
893 mtd_nvmem_user_otp_reg_read);
895 dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
896 return PTR_ERR(nvmem);
898 mtd->otp_user_nvmem = nvmem;
902 if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) {
903 size = mtd_otp_size(mtd, false);
910 nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
911 mtd_nvmem_fact_otp_reg_read);
913 dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
914 err = PTR_ERR(nvmem);
917 mtd->otp_factory_nvmem = nvmem;
924 if (mtd->otp_user_nvmem)
925 nvmem_unregister(mtd->otp_user_nvmem);
930 * mtd_device_parse_register - parse partitions and register an MTD device.
932 * @mtd: the MTD device to register
933 * @types: the list of MTD partition probes to try, see
934 * 'parse_mtd_partitions()' for more information
935 * @parser_data: MTD partition parser-specific data
936 * @parts: fallback partition information to register, if parsing fails;
937 * only valid if %nr_parts > %0
938 * @nr_parts: the number of partitions in parts, if zero then the full
939 * MTD device is registered if no partition info is found
941 * This function aggregates MTD partitions parsing (done by
942 * 'parse_mtd_partitions()') and MTD device and partitions registering. It
943 * basically follows the most common pattern found in many MTD drivers:
945 * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
947 * * Then It tries to probe partitions on MTD device @mtd using parsers
948 * specified in @types (if @types is %NULL, then the default list of parsers
949 * is used, see 'parse_mtd_partitions()' for more information). If none are
950 * found this functions tries to fallback to information specified in
952 * * If no partitions were found this function just registers the MTD device
955 * Returns zero in case of success and a negative error code in case of failure.
957 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
958 struct mtd_part_parser_data *parser_data,
959 const struct mtd_partition *parts,
964 mtd_set_dev_defaults(mtd);
966 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
967 ret = add_mtd_device(mtd);
972 /* Prefer parsed partitions over driver-provided fallback */
973 ret = parse_mtd_partitions(mtd, types, parser_data);
974 if (ret == -EPROBE_DEFER)
980 ret = add_mtd_partitions(mtd, parts, nr_parts);
981 else if (!device_is_registered(&mtd->dev))
982 ret = add_mtd_device(mtd);
990 * FIXME: some drivers unfortunately call this function more than once.
991 * So we have to check if we've already assigned the reboot notifier.
993 * Generally, we can make multiple calls work for most cases, but it
994 * does cause problems with parse_mtd_partitions() above (e.g.,
995 * cmdlineparts will register partitions more than once).
997 WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
998 "MTD already registered\n");
999 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
1000 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
1001 register_reboot_notifier(&mtd->reboot_notifier);
1004 ret = mtd_otp_nvmem_add(mtd);
1007 if (ret && device_is_registered(&mtd->dev))
1008 del_mtd_device(mtd);
1012 EXPORT_SYMBOL_GPL(mtd_device_parse_register);
1015 * mtd_device_unregister - unregister an existing MTD device.
1017 * @master: the MTD device to unregister. This will unregister both the master
1018 * and any partitions if registered.
1020 int mtd_device_unregister(struct mtd_info *master)
1024 if (master->_reboot) {
1025 unregister_reboot_notifier(&master->reboot_notifier);
1026 memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier));
1029 if (master->otp_user_nvmem)
1030 nvmem_unregister(master->otp_user_nvmem);
1032 if (master->otp_factory_nvmem)
1033 nvmem_unregister(master->otp_factory_nvmem);
1035 err = del_mtd_partitions(master);
1039 if (!device_is_registered(&master->dev))
1042 return del_mtd_device(master);
1044 EXPORT_SYMBOL_GPL(mtd_device_unregister);
1047 * register_mtd_user - register a 'user' of MTD devices.
1048 * @new: pointer to notifier info structure
1050 * Registers a pair of callbacks function to be called upon addition
1051 * or removal of MTD devices. Causes the 'add' callback to be immediately
1052 * invoked for each MTD device currently present in the system.
1054 void register_mtd_user (struct mtd_notifier *new)
1056 struct mtd_info *mtd;
1058 mutex_lock(&mtd_table_mutex);
1060 list_add(&new->list, &mtd_notifiers);
1062 __module_get(THIS_MODULE);
1064 mtd_for_each_device(mtd)
1067 mutex_unlock(&mtd_table_mutex);
1069 EXPORT_SYMBOL_GPL(register_mtd_user);
1072 * unregister_mtd_user - unregister a 'user' of MTD devices.
1073 * @old: pointer to notifier info structure
1075 * Removes a callback function pair from the list of 'users' to be
1076 * notified upon addition or removal of MTD devices. Causes the
1077 * 'remove' callback to be immediately invoked for each MTD device
1078 * currently present in the system.
1080 int unregister_mtd_user (struct mtd_notifier *old)
1082 struct mtd_info *mtd;
1084 mutex_lock(&mtd_table_mutex);
1086 module_put(THIS_MODULE);
1088 mtd_for_each_device(mtd)
1091 list_del(&old->list);
1092 mutex_unlock(&mtd_table_mutex);
1095 EXPORT_SYMBOL_GPL(unregister_mtd_user);
1098 * get_mtd_device - obtain a validated handle for an MTD device
1099 * @mtd: last known address of the required MTD device
1100 * @num: internal device number of the required MTD device
1102 * Given a number and NULL address, return the num'th entry in the device
1103 * table, if any. Given an address and num == -1, search the device table
1104 * for a device with that address and return if it's still present. Given
1105 * both, return the num'th driver only if its address matches. Return
1106 * error code if not.
1108 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
1110 struct mtd_info *ret = NULL, *other;
1113 mutex_lock(&mtd_table_mutex);
1116 mtd_for_each_device(other) {
1122 } else if (num >= 0) {
1123 ret = idr_find(&mtd_idr, num);
1124 if (mtd && mtd != ret)
1133 err = __get_mtd_device(ret);
1137 mutex_unlock(&mtd_table_mutex);
1140 EXPORT_SYMBOL_GPL(get_mtd_device);
1143 int __get_mtd_device(struct mtd_info *mtd)
1145 struct mtd_info *master = mtd_get_master(mtd);
1148 if (!try_module_get(master->owner))
1151 if (master->_get_device) {
1152 err = master->_get_device(mtd);
1155 module_put(master->owner);
1162 while (mtd->parent) {
1169 EXPORT_SYMBOL_GPL(__get_mtd_device);
1172 * get_mtd_device_nm - obtain a validated handle for an MTD device by
1174 * @name: MTD device name to open
1176 * This function returns MTD device description structure in case of
1177 * success and an error code in case of failure.
1179 struct mtd_info *get_mtd_device_nm(const char *name)
1182 struct mtd_info *mtd = NULL, *other;
1184 mutex_lock(&mtd_table_mutex);
1186 mtd_for_each_device(other) {
1187 if (!strcmp(name, other->name)) {
1196 err = __get_mtd_device(mtd);
1200 mutex_unlock(&mtd_table_mutex);
1204 mutex_unlock(&mtd_table_mutex);
1205 return ERR_PTR(err);
1207 EXPORT_SYMBOL_GPL(get_mtd_device_nm);
1209 void put_mtd_device(struct mtd_info *mtd)
1211 mutex_lock(&mtd_table_mutex);
1212 __put_mtd_device(mtd);
1213 mutex_unlock(&mtd_table_mutex);
1216 EXPORT_SYMBOL_GPL(put_mtd_device);
1218 void __put_mtd_device(struct mtd_info *mtd)
1220 struct mtd_info *master = mtd_get_master(mtd);
1222 while (mtd->parent) {
1224 BUG_ON(mtd->usecount < 0);
1230 if (master->_put_device)
1231 master->_put_device(master);
1233 module_put(master->owner);
1235 EXPORT_SYMBOL_GPL(__put_mtd_device);
1238 * Erase is an synchronous operation. Device drivers are epected to return a
1239 * negative error code if the operation failed and update instr->fail_addr
1240 * to point the portion that was not properly erased.
1242 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1244 struct mtd_info *master = mtd_get_master(mtd);
1245 u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
1246 struct erase_info adjinstr;
1249 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1252 if (!mtd->erasesize || !master->_erase)
1255 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
1257 if (!(mtd->flags & MTD_WRITEABLE))
1263 ledtrig_mtd_activity();
1265 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1266 adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
1268 adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
1269 master->erasesize) -
1273 adjinstr.addr += mst_ofs;
1275 ret = master->_erase(master, &adjinstr);
1277 if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
1278 instr->fail_addr = adjinstr.fail_addr - mst_ofs;
1279 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1280 instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
1282 instr->fail_addr *= mtd->erasesize;
1288 EXPORT_SYMBOL_GPL(mtd_erase);
1291 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
1293 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1294 void **virt, resource_size_t *phys)
1296 struct mtd_info *master = mtd_get_master(mtd);
1302 if (!master->_point)
1304 if (from < 0 || from >= mtd->size || len > mtd->size - from)
1309 from = mtd_get_master_ofs(mtd, from);
1310 return master->_point(master, from, len, retlen, virt, phys);
1312 EXPORT_SYMBOL_GPL(mtd_point);
1314 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
1315 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1317 struct mtd_info *master = mtd_get_master(mtd);
1319 if (!master->_unpoint)
1321 if (from < 0 || from >= mtd->size || len > mtd->size - from)
1325 return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
1327 EXPORT_SYMBOL_GPL(mtd_unpoint);
1330 * Allow NOMMU mmap() to directly map the device (if not NULL)
1331 * - return the address to which the offset maps
1332 * - return -ENOSYS to indicate refusal to do the mapping
1334 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1335 unsigned long offset, unsigned long flags)
1341 ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1344 if (retlen != len) {
1345 mtd_unpoint(mtd, offset, retlen);
1348 return (unsigned long)virt;
1350 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1352 static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
1353 const struct mtd_ecc_stats *old_stats)
1355 struct mtd_ecc_stats diff;
1360 diff = master->ecc_stats;
1361 diff.failed -= old_stats->failed;
1362 diff.corrected -= old_stats->corrected;
1364 while (mtd->parent) {
1365 mtd->ecc_stats.failed += diff.failed;
1366 mtd->ecc_stats.corrected += diff.corrected;
1371 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1374 struct mtd_oob_ops ops = {
1380 ret = mtd_read_oob(mtd, from, &ops);
1381 *retlen = ops.retlen;
1385 EXPORT_SYMBOL_GPL(mtd_read);
1387 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1390 struct mtd_oob_ops ops = {
1392 .datbuf = (u8 *)buf,
1396 ret = mtd_write_oob(mtd, to, &ops);
1397 *retlen = ops.retlen;
1401 EXPORT_SYMBOL_GPL(mtd_write);
1404 * In blackbox flight recorder like scenarios we want to make successful writes
1405 * in interrupt context. panic_write() is only intended to be called when its
1406 * known the kernel is about to panic and we need the write to succeed. Since
1407 * the kernel is not going to be running for much longer, this function can
1408 * break locks and delay to ensure the write succeeds (but not sleep).
1410 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1413 struct mtd_info *master = mtd_get_master(mtd);
1416 if (!master->_panic_write)
1418 if (to < 0 || to >= mtd->size || len > mtd->size - to)
1420 if (!(mtd->flags & MTD_WRITEABLE))
1424 if (!master->oops_panic_write)
1425 master->oops_panic_write = true;
1427 return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
1430 EXPORT_SYMBOL_GPL(mtd_panic_write);
1432 static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1433 struct mtd_oob_ops *ops)
1436 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
1437 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
1446 if (offs < 0 || offs + ops->len > mtd->size)
1452 if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1455 maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1456 mtd_div_by_ws(offs, mtd)) *
1457 mtd_oobavail(mtd, ops)) - ops->ooboffs;
1458 if (ops->ooblen > maxooblen)
1465 static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
1466 struct mtd_oob_ops *ops)
1468 struct mtd_info *master = mtd_get_master(mtd);
1471 from = mtd_get_master_ofs(mtd, from);
1472 if (master->_read_oob)
1473 ret = master->_read_oob(master, from, ops);
1475 ret = master->_read(master, from, ops->len, &ops->retlen,
1481 static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
1482 struct mtd_oob_ops *ops)
1484 struct mtd_info *master = mtd_get_master(mtd);
1487 to = mtd_get_master_ofs(mtd, to);
1488 if (master->_write_oob)
1489 ret = master->_write_oob(master, to, ops);
1491 ret = master->_write(master, to, ops->len, &ops->retlen,
1497 static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
1498 struct mtd_oob_ops *ops)
1500 struct mtd_info *master = mtd_get_master(mtd);
1501 int ngroups = mtd_pairing_groups(master);
1502 int npairs = mtd_wunit_per_eb(master) / ngroups;
1503 struct mtd_oob_ops adjops = *ops;
1504 unsigned int wunit, oobavail;
1505 struct mtd_pairing_info info;
1506 int max_bitflips = 0;
1510 ebofs = mtd_mod_by_eb(start, mtd);
1511 base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
1513 info.pair = mtd_div_by_ws(ebofs, mtd);
1514 pageofs = mtd_mod_by_ws(ebofs, mtd);
1515 oobavail = mtd_oobavail(mtd, ops);
1517 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
1520 if (info.pair >= npairs) {
1522 base += master->erasesize;
1525 wunit = mtd_pairing_info_to_wunit(master, &info);
1526 pos = mtd_wunit_to_offset(mtd, base, wunit);
1528 adjops.len = ops->len - ops->retlen;
1529 if (adjops.len > mtd->writesize - pageofs)
1530 adjops.len = mtd->writesize - pageofs;
1532 adjops.ooblen = ops->ooblen - ops->oobretlen;
1533 if (adjops.ooblen > oobavail - adjops.ooboffs)
1534 adjops.ooblen = oobavail - adjops.ooboffs;
1537 ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
1539 max_bitflips = max(max_bitflips, ret);
1541 ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
1547 max_bitflips = max(max_bitflips, ret);
1548 ops->retlen += adjops.retlen;
1549 ops->oobretlen += adjops.oobretlen;
1550 adjops.datbuf += adjops.retlen;
1551 adjops.oobbuf += adjops.oobretlen;
1557 return max_bitflips;
1560 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1562 struct mtd_info *master = mtd_get_master(mtd);
1563 struct mtd_ecc_stats old_stats = master->ecc_stats;
1566 ops->retlen = ops->oobretlen = 0;
1568 ret_code = mtd_check_oob_ops(mtd, from, ops);
1572 ledtrig_mtd_activity();
1574 /* Check the validity of a potential fallback on mtd->_read */
1575 if (!master->_read_oob && (!master->_read || ops->oobbuf))
1578 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1579 ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
1581 ret_code = mtd_read_oob_std(mtd, from, ops);
1583 mtd_update_ecc_stats(mtd, master, &old_stats);
1586 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
1587 * similar to mtd->_read(), returning a non-negative integer
1588 * representing max bitflips. In other cases, mtd->_read_oob() may
1589 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
1591 if (unlikely(ret_code < 0))
1593 if (mtd->ecc_strength == 0)
1594 return 0; /* device lacks ecc */
1595 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1597 EXPORT_SYMBOL_GPL(mtd_read_oob);
1599 int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1600 struct mtd_oob_ops *ops)
1602 struct mtd_info *master = mtd_get_master(mtd);
1605 ops->retlen = ops->oobretlen = 0;
1607 if (!(mtd->flags & MTD_WRITEABLE))
1610 ret = mtd_check_oob_ops(mtd, to, ops);
1614 ledtrig_mtd_activity();
1616 /* Check the validity of a potential fallback on mtd->_write */
1617 if (!master->_write_oob && (!master->_write || ops->oobbuf))
1620 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1621 return mtd_io_emulated_slc(mtd, to, false, ops);
1623 return mtd_write_oob_std(mtd, to, ops);
1625 EXPORT_SYMBOL_GPL(mtd_write_oob);
1628 * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
1629 * @mtd: MTD device structure
1630 * @section: ECC section. Depending on the layout you may have all the ECC
1631 * bytes stored in a single contiguous section, or one section
1632 * per ECC chunk (and sometime several sections for a single ECC
1634 * @oobecc: OOB region struct filled with the appropriate ECC position
1637 * This function returns ECC section information in the OOB area. If you want
1638 * to get all the ECC bytes information, then you should call
1639 * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
1641 * Returns zero on success, a negative error code otherwise.
1643 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1644 struct mtd_oob_region *oobecc)
1646 struct mtd_info *master = mtd_get_master(mtd);
1648 memset(oobecc, 0, sizeof(*oobecc));
1650 if (!master || section < 0)
1653 if (!master->ooblayout || !master->ooblayout->ecc)
1656 return master->ooblayout->ecc(master, section, oobecc);
1658 EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1661 * mtd_ooblayout_free - Get the OOB region definition of a specific free
1663 * @mtd: MTD device structure
1664 * @section: Free section you are interested in. Depending on the layout
1665 * you may have all the free bytes stored in a single contiguous
1666 * section, or one section per ECC chunk plus an extra section
1667 * for the remaining bytes (or other funky layout).
1668 * @oobfree: OOB region struct filled with the appropriate free position
1671 * This function returns free bytes position in the OOB area. If you want
1672 * to get all the free bytes information, then you should call
1673 * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
1675 * Returns zero on success, a negative error code otherwise.
1677 int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1678 struct mtd_oob_region *oobfree)
1680 struct mtd_info *master = mtd_get_master(mtd);
1682 memset(oobfree, 0, sizeof(*oobfree));
1684 if (!master || section < 0)
1687 if (!master->ooblayout || !master->ooblayout->free)
1690 return master->ooblayout->free(master, section, oobfree);
1692 EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1695 * mtd_ooblayout_find_region - Find the region attached to a specific byte
1696 * @mtd: mtd info structure
1697 * @byte: the byte we are searching for
1698 * @sectionp: pointer where the section id will be stored
1699 * @oobregion: used to retrieve the ECC position
1700 * @iter: iterator function. Should be either mtd_ooblayout_free or
1701 * mtd_ooblayout_ecc depending on the region type you're searching for
1703 * This function returns the section id and oobregion information of a
1704 * specific byte. For example, say you want to know where the 4th ECC byte is
1705 * stored, you'll use:
1707 * mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc);
1709 * Returns zero on success, a negative error code otherwise.
1711 static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1712 int *sectionp, struct mtd_oob_region *oobregion,
1713 int (*iter)(struct mtd_info *,
1715 struct mtd_oob_region *oobregion))
1717 int pos = 0, ret, section = 0;
1719 memset(oobregion, 0, sizeof(*oobregion));
1722 ret = iter(mtd, section, oobregion);
1726 if (pos + oobregion->length > byte)
1729 pos += oobregion->length;
1734 * Adjust region info to make it start at the beginning at the
1737 oobregion->offset += byte - pos;
1738 oobregion->length -= byte - pos;
1739 *sectionp = section;
1745 * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
1747 * @mtd: mtd info structure
1748 * @eccbyte: the byte we are searching for
1749 * @section: pointer where the section id will be stored
1750 * @oobregion: OOB region information
1752 * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
1755 * Returns zero on success, a negative error code otherwise.
1757 int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1759 struct mtd_oob_region *oobregion)
1761 return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1764 EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1767 * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
1768 * @mtd: mtd info structure
1769 * @buf: destination buffer to store OOB bytes
1770 * @oobbuf: OOB buffer
1771 * @start: first byte to retrieve
1772 * @nbytes: number of bytes to retrieve
1773 * @iter: section iterator
1775 * Extract bytes attached to a specific category (ECC or free)
1776 * from the OOB buffer and copy them into buf.
1778 * Returns zero on success, a negative error code otherwise.
1780 static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1781 const u8 *oobbuf, int start, int nbytes,
1782 int (*iter)(struct mtd_info *,
1784 struct mtd_oob_region *oobregion))
1786 struct mtd_oob_region oobregion;
1789 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1795 cnt = min_t(int, nbytes, oobregion.length);
1796 memcpy(buf, oobbuf + oobregion.offset, cnt);
1803 ret = iter(mtd, ++section, &oobregion);
1810 * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
1811 * @mtd: mtd info structure
1812 * @buf: source buffer to get OOB bytes from
1813 * @oobbuf: OOB buffer
1814 * @start: first OOB byte to set
1815 * @nbytes: number of OOB bytes to set
1816 * @iter: section iterator
1818 * Fill the OOB buffer with data provided in buf. The category (ECC or free)
1819 * is selected by passing the appropriate iterator.
1821 * Returns zero on success, a negative error code otherwise.
1823 static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1824 u8 *oobbuf, int start, int nbytes,
1825 int (*iter)(struct mtd_info *,
1827 struct mtd_oob_region *oobregion))
1829 struct mtd_oob_region oobregion;
1832 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1838 cnt = min_t(int, nbytes, oobregion.length);
1839 memcpy(oobbuf + oobregion.offset, buf, cnt);
1846 ret = iter(mtd, ++section, &oobregion);
1853 * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
1854 * @mtd: mtd info structure
1855 * @iter: category iterator
1857 * Count the number of bytes in a given category.
1859 * Returns a positive value on success, a negative error code otherwise.
1861 static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1862 int (*iter)(struct mtd_info *,
1864 struct mtd_oob_region *oobregion))
1866 struct mtd_oob_region oobregion;
1867 int section = 0, ret, nbytes = 0;
1870 ret = iter(mtd, section++, &oobregion);
1877 nbytes += oobregion.length;
1884 * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
1885 * @mtd: mtd info structure
1886 * @eccbuf: destination buffer to store ECC bytes
1887 * @oobbuf: OOB buffer
1888 * @start: first ECC byte to retrieve
1889 * @nbytes: number of ECC bytes to retrieve
1891 * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
1893 * Returns zero on success, a negative error code otherwise.
1895 int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
1896 const u8 *oobbuf, int start, int nbytes)
1898 return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1901 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
1904 * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
1905 * @mtd: mtd info structure
1906 * @eccbuf: source buffer to get ECC bytes from
1907 * @oobbuf: OOB buffer
1908 * @start: first ECC byte to set
1909 * @nbytes: number of ECC bytes to set
1911 * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
1913 * Returns zero on success, a negative error code otherwise.
1915 int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
1916 u8 *oobbuf, int start, int nbytes)
1918 return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1921 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
1924 * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
1925 * @mtd: mtd info structure
1926 * @databuf: destination buffer to store ECC bytes
1927 * @oobbuf: OOB buffer
1928 * @start: first ECC byte to retrieve
1929 * @nbytes: number of ECC bytes to retrieve
1931 * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
1933 * Returns zero on success, a negative error code otherwise.
1935 int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
1936 const u8 *oobbuf, int start, int nbytes)
1938 return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
1939 mtd_ooblayout_free);
1941 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
1944 * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
1945 * @mtd: mtd info structure
1946 * @databuf: source buffer to get data bytes from
1947 * @oobbuf: OOB buffer
1948 * @start: first ECC byte to set
1949 * @nbytes: number of ECC bytes to set
1951 * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
1953 * Returns zero on success, a negative error code otherwise.
1955 int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
1956 u8 *oobbuf, int start, int nbytes)
1958 return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
1959 mtd_ooblayout_free);
1961 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
1964 * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
1965 * @mtd: mtd info structure
1967 * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
1969 * Returns zero on success, a negative error code otherwise.
1971 int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
1973 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
1975 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
1978 * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
1979 * @mtd: mtd info structure
1981 * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
1983 * Returns zero on success, a negative error code otherwise.
1985 int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
1987 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
1989 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
1992 * Method to access the protection register area, present in some flash
1993 * devices. The user data is one time programmable but the factory data is read
1996 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1997 struct otp_info *buf)
1999 struct mtd_info *master = mtd_get_master(mtd);
2001 if (!master->_get_fact_prot_info)
2005 return master->_get_fact_prot_info(master, len, retlen, buf);
2007 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
2009 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2010 size_t *retlen, u_char *buf)
2012 struct mtd_info *master = mtd_get_master(mtd);
2015 if (!master->_read_fact_prot_reg)
2019 return master->_read_fact_prot_reg(master, from, len, retlen, buf);
2021 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
2023 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2024 struct otp_info *buf)
2026 struct mtd_info *master = mtd_get_master(mtd);
2028 if (!master->_get_user_prot_info)
2032 return master->_get_user_prot_info(master, len, retlen, buf);
2034 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
2036 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2037 size_t *retlen, u_char *buf)
2039 struct mtd_info *master = mtd_get_master(mtd);
2042 if (!master->_read_user_prot_reg)
2046 return master->_read_user_prot_reg(master, from, len, retlen, buf);
2048 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
2050 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
2051 size_t *retlen, const u_char *buf)
2053 struct mtd_info *master = mtd_get_master(mtd);
2057 if (!master->_write_user_prot_reg)
2061 ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
2066 * If no data could be written at all, we are out of memory and
2067 * must return -ENOSPC.
2069 return (*retlen) ? 0 : -ENOSPC;
2071 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
2073 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2075 struct mtd_info *master = mtd_get_master(mtd);
2077 if (!master->_lock_user_prot_reg)
2081 return master->_lock_user_prot_reg(master, from, len);
2083 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
2085 int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2087 struct mtd_info *master = mtd_get_master(mtd);
2089 if (!master->_erase_user_prot_reg)
2093 return master->_erase_user_prot_reg(master, from, len);
2095 EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
2097 /* Chip-supported device locking */
2098 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2100 struct mtd_info *master = mtd_get_master(mtd);
2104 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2109 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2110 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2111 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2114 return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
2116 EXPORT_SYMBOL_GPL(mtd_lock);
2118 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2120 struct mtd_info *master = mtd_get_master(mtd);
2122 if (!master->_unlock)
2124 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2129 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2130 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2131 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2134 return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
2136 EXPORT_SYMBOL_GPL(mtd_unlock);
2138 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2140 struct mtd_info *master = mtd_get_master(mtd);
2142 if (!master->_is_locked)
2144 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2149 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2150 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2151 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2154 return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
2156 EXPORT_SYMBOL_GPL(mtd_is_locked);
2158 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
2160 struct mtd_info *master = mtd_get_master(mtd);
2162 if (ofs < 0 || ofs >= mtd->size)
2164 if (!master->_block_isreserved)
2167 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2168 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2170 return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
2172 EXPORT_SYMBOL_GPL(mtd_block_isreserved);
2174 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
2176 struct mtd_info *master = mtd_get_master(mtd);
2178 if (ofs < 0 || ofs >= mtd->size)
2180 if (!master->_block_isbad)
2183 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2184 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2186 return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
2188 EXPORT_SYMBOL_GPL(mtd_block_isbad);
2190 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
2192 struct mtd_info *master = mtd_get_master(mtd);
2195 if (!master->_block_markbad)
2197 if (ofs < 0 || ofs >= mtd->size)
2199 if (!(mtd->flags & MTD_WRITEABLE))
2202 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2203 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2205 ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
2209 while (mtd->parent) {
2210 mtd->ecc_stats.badblocks++;
2216 EXPORT_SYMBOL_GPL(mtd_block_markbad);
2219 * default_mtd_writev - the default writev method
2220 * @mtd: mtd device description object pointer
2221 * @vecs: the vectors to write
2222 * @count: count of vectors in @vecs
2223 * @to: the MTD device offset to write to
2224 * @retlen: on exit contains the count of bytes written to the MTD device.
2226 * This function returns zero in case of success and a negative error code in
2229 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2230 unsigned long count, loff_t to, size_t *retlen)
2233 size_t totlen = 0, thislen;
2236 for (i = 0; i < count; i++) {
2237 if (!vecs[i].iov_len)
2239 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
2242 if (ret || thislen != vecs[i].iov_len)
2244 to += vecs[i].iov_len;
2251 * mtd_writev - the vector-based MTD write method
2252 * @mtd: mtd device description object pointer
2253 * @vecs: the vectors to write
2254 * @count: count of vectors in @vecs
2255 * @to: the MTD device offset to write to
2256 * @retlen: on exit contains the count of bytes written to the MTD device.
2258 * This function returns zero in case of success and a negative error code in
2261 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2262 unsigned long count, loff_t to, size_t *retlen)
2264 struct mtd_info *master = mtd_get_master(mtd);
2267 if (!(mtd->flags & MTD_WRITEABLE))
2270 if (!master->_writev)
2271 return default_mtd_writev(mtd, vecs, count, to, retlen);
2273 return master->_writev(master, vecs, count,
2274 mtd_get_master_ofs(mtd, to), retlen);
2276 EXPORT_SYMBOL_GPL(mtd_writev);
2279 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
2280 * @mtd: mtd device description object pointer
2281 * @size: a pointer to the ideal or maximum size of the allocation, points
2282 * to the actual allocation size on success.
2284 * This routine attempts to allocate a contiguous kernel buffer up to
2285 * the specified size, backing off the size of the request exponentially
2286 * until the request succeeds or until the allocation size falls below
2287 * the system page size. This attempts to make sure it does not adversely
2288 * impact system performance, so when allocating more than one page, we
2289 * ask the memory allocator to avoid re-trying, swapping, writing back
2290 * or performing I/O.
2292 * Note, this function also makes sure that the allocated buffer is aligned to
2293 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
2295 * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
2296 * to handle smaller (i.e. degraded) buffer allocations under low- or
2297 * fragmented-memory situations where such reduced allocations, from a
2298 * requested ideal, are allowed.
2300 * Returns a pointer to the allocated buffer on success; otherwise, NULL.
2302 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
2304 gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
2305 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
2308 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
2310 while (*size > min_alloc) {
2311 kbuf = kmalloc(*size, flags);
2316 *size = ALIGN(*size, mtd->writesize);
2320 * For the last resort allocation allow 'kmalloc()' to do all sorts of
2321 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
2323 return kmalloc(*size, GFP_KERNEL);
2325 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
2327 #ifdef CONFIG_PROC_FS
2329 /*====================================================================*/
2330 /* Support for /proc/mtd */
2332 static int mtd_proc_show(struct seq_file *m, void *v)
2334 struct mtd_info *mtd;
2336 seq_puts(m, "dev: size erasesize name\n");
2337 mutex_lock(&mtd_table_mutex);
2338 mtd_for_each_device(mtd) {
2339 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
2340 mtd->index, (unsigned long long)mtd->size,
2341 mtd->erasesize, mtd->name);
2343 mutex_unlock(&mtd_table_mutex);
2346 #endif /* CONFIG_PROC_FS */
2348 /*====================================================================*/
2351 static struct backing_dev_info * __init mtd_bdi_init(const char *name)
2353 struct backing_dev_info *bdi;
2356 bdi = bdi_alloc(NUMA_NO_NODE);
2358 return ERR_PTR(-ENOMEM);
2363 * We put '-0' suffix to the name to get the same name format as we
2364 * used to get. Since this is called only once, we get a unique name.
2366 ret = bdi_register(bdi, "%.28s-0", name);
2370 return ret ? ERR_PTR(ret) : bdi;
2373 char *mtd_expert_analysis_warning =
2374 "Bad block checks have been entirely disabled.\n"
2375 "This is only reserved for post-mortem forensics and debug purposes.\n"
2376 "Never enable this mode if you do not know what you are doing!\n";
2377 EXPORT_SYMBOL_GPL(mtd_expert_analysis_warning);
2378 bool mtd_expert_analysis_mode;
2379 EXPORT_SYMBOL_GPL(mtd_expert_analysis_mode);
2381 static struct proc_dir_entry *proc_mtd;
2383 static int __init init_mtd(void)
2387 ret = class_register(&mtd_class);
2391 mtd_bdi = mtd_bdi_init("mtd");
2392 if (IS_ERR(mtd_bdi)) {
2393 ret = PTR_ERR(mtd_bdi);
2397 proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
2399 ret = init_mtdchar();
2403 dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
2404 debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd,
2405 &mtd_expert_analysis_mode);
2411 remove_proc_entry("mtd", NULL);
2414 class_unregister(&mtd_class);
2416 pr_err("Error registering mtd class or bdi: %d\n", ret);
2420 static void __exit cleanup_mtd(void)
2422 debugfs_remove_recursive(dfs_dir_mtd);
2425 remove_proc_entry("mtd", NULL);
2426 class_unregister(&mtd_class);
2427 bdi_unregister(mtd_bdi);
2429 idr_destroy(&mtd_idr);
2432 module_init(init_mtd);
2433 module_exit(cleanup_mtd);
2435 MODULE_LICENSE("GPL");
2436 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
2437 MODULE_DESCRIPTION("Core MTD registration and access routines");