1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Core registration and callback routines for MTD
6 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
7 * Copyright © 2006 Red Hat UK Limited
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/ptrace.h>
13 #include <linux/seq_file.h>
14 #include <linux/string.h>
15 #include <linux/timer.h>
16 #include <linux/major.h>
18 #include <linux/err.h>
19 #include <linux/ioctl.h>
20 #include <linux/init.h>
22 #include <linux/proc_fs.h>
23 #include <linux/idr.h>
24 #include <linux/backing-dev.h>
25 #include <linux/gfp.h>
26 #include <linux/random.h>
27 #include <linux/slab.h>
28 #include <linux/reboot.h>
29 #include <linux/leds.h>
30 #include <linux/debugfs.h>
31 #include <linux/nvmem-provider.h>
32 #include <linux/root_dev.h>
33 #include <linux/error-injection.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/partitions.h>
40 struct backing_dev_info *mtd_bdi;
42 #ifdef CONFIG_PM_SLEEP
44 static int mtd_cls_suspend(struct device *dev)
46 struct mtd_info *mtd = dev_get_drvdata(dev);
48 return mtd ? mtd_suspend(mtd) : 0;
51 static int mtd_cls_resume(struct device *dev)
53 struct mtd_info *mtd = dev_get_drvdata(dev);
60 static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
61 #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
63 #define MTD_CLS_PM_OPS NULL
66 static struct class mtd_class = {
71 static DEFINE_IDR(mtd_idr);
73 /* These are exported solely for the purpose of mtd_blkdevs.c. You
74 should not use them for _anything_ else */
75 DEFINE_MUTEX(mtd_table_mutex);
76 EXPORT_SYMBOL_GPL(mtd_table_mutex);
78 struct mtd_info *__mtd_next_device(int i)
80 return idr_get_next(&mtd_idr, &i);
82 EXPORT_SYMBOL_GPL(__mtd_next_device);
84 static LIST_HEAD(mtd_notifiers);
87 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
89 /* REVISIT once MTD uses the driver model better, whoever allocates
90 * the mtd_info will probably want to use the release() hook...
92 static void mtd_release(struct device *dev)
94 struct mtd_info *mtd = dev_get_drvdata(dev);
95 dev_t index = MTD_DEVT(mtd->index);
97 idr_remove(&mtd_idr, mtd->index);
98 of_node_put(mtd_get_of_node(mtd));
100 if (mtd_is_partition(mtd))
101 release_mtd_partition(mtd);
103 /* remove /dev/mtdXro node */
104 device_destroy(&mtd_class, index + 1);
107 static void mtd_device_release(struct kref *kref)
109 struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt);
110 bool is_partition = mtd_is_partition(mtd);
112 debugfs_remove_recursive(mtd->dbg.dfs_dir);
114 /* Try to remove the NVMEM provider */
115 nvmem_unregister(mtd->nvmem);
117 device_unregister(&mtd->dev);
120 * Clear dev so mtd can be safely re-registered later if desired.
121 * Should not be done for partition,
122 * as it was already destroyed in device_unregister().
125 memset(&mtd->dev, 0, sizeof(mtd->dev));
127 module_put(THIS_MODULE);
130 #define MTD_DEVICE_ATTR_RO(name) \
131 static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
133 #define MTD_DEVICE_ATTR_RW(name) \
134 static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store)
136 static ssize_t mtd_type_show(struct device *dev,
137 struct device_attribute *attr, char *buf)
139 struct mtd_info *mtd = dev_get_drvdata(dev);
164 case MTD_MLCNANDFLASH:
171 return sysfs_emit(buf, "%s\n", type);
173 MTD_DEVICE_ATTR_RO(type);
175 static ssize_t mtd_flags_show(struct device *dev,
176 struct device_attribute *attr, char *buf)
178 struct mtd_info *mtd = dev_get_drvdata(dev);
180 return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags);
182 MTD_DEVICE_ATTR_RO(flags);
184 static ssize_t mtd_size_show(struct device *dev,
185 struct device_attribute *attr, char *buf)
187 struct mtd_info *mtd = dev_get_drvdata(dev);
189 return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size);
191 MTD_DEVICE_ATTR_RO(size);
193 static ssize_t mtd_erasesize_show(struct device *dev,
194 struct device_attribute *attr, char *buf)
196 struct mtd_info *mtd = dev_get_drvdata(dev);
198 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize);
200 MTD_DEVICE_ATTR_RO(erasesize);
202 static ssize_t mtd_writesize_show(struct device *dev,
203 struct device_attribute *attr, char *buf)
205 struct mtd_info *mtd = dev_get_drvdata(dev);
207 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize);
209 MTD_DEVICE_ATTR_RO(writesize);
211 static ssize_t mtd_subpagesize_show(struct device *dev,
212 struct device_attribute *attr, char *buf)
214 struct mtd_info *mtd = dev_get_drvdata(dev);
215 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
217 return sysfs_emit(buf, "%u\n", subpagesize);
219 MTD_DEVICE_ATTR_RO(subpagesize);
221 static ssize_t mtd_oobsize_show(struct device *dev,
222 struct device_attribute *attr, char *buf)
224 struct mtd_info *mtd = dev_get_drvdata(dev);
226 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize);
228 MTD_DEVICE_ATTR_RO(oobsize);
230 static ssize_t mtd_oobavail_show(struct device *dev,
231 struct device_attribute *attr, char *buf)
233 struct mtd_info *mtd = dev_get_drvdata(dev);
235 return sysfs_emit(buf, "%u\n", mtd->oobavail);
237 MTD_DEVICE_ATTR_RO(oobavail);
239 static ssize_t mtd_numeraseregions_show(struct device *dev,
240 struct device_attribute *attr, char *buf)
242 struct mtd_info *mtd = dev_get_drvdata(dev);
244 return sysfs_emit(buf, "%u\n", mtd->numeraseregions);
246 MTD_DEVICE_ATTR_RO(numeraseregions);
248 static ssize_t mtd_name_show(struct device *dev,
249 struct device_attribute *attr, char *buf)
251 struct mtd_info *mtd = dev_get_drvdata(dev);
253 return sysfs_emit(buf, "%s\n", mtd->name);
255 MTD_DEVICE_ATTR_RO(name);
257 static ssize_t mtd_ecc_strength_show(struct device *dev,
258 struct device_attribute *attr, char *buf)
260 struct mtd_info *mtd = dev_get_drvdata(dev);
262 return sysfs_emit(buf, "%u\n", mtd->ecc_strength);
264 MTD_DEVICE_ATTR_RO(ecc_strength);
266 static ssize_t mtd_bitflip_threshold_show(struct device *dev,
267 struct device_attribute *attr,
270 struct mtd_info *mtd = dev_get_drvdata(dev);
272 return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold);
275 static ssize_t mtd_bitflip_threshold_store(struct device *dev,
276 struct device_attribute *attr,
277 const char *buf, size_t count)
279 struct mtd_info *mtd = dev_get_drvdata(dev);
280 unsigned int bitflip_threshold;
283 retval = kstrtouint(buf, 0, &bitflip_threshold);
287 mtd->bitflip_threshold = bitflip_threshold;
290 MTD_DEVICE_ATTR_RW(bitflip_threshold);
292 static ssize_t mtd_ecc_step_size_show(struct device *dev,
293 struct device_attribute *attr, char *buf)
295 struct mtd_info *mtd = dev_get_drvdata(dev);
297 return sysfs_emit(buf, "%u\n", mtd->ecc_step_size);
300 MTD_DEVICE_ATTR_RO(ecc_step_size);
302 static ssize_t mtd_corrected_bits_show(struct device *dev,
303 struct device_attribute *attr, char *buf)
305 struct mtd_info *mtd = dev_get_drvdata(dev);
306 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
308 return sysfs_emit(buf, "%u\n", ecc_stats->corrected);
310 MTD_DEVICE_ATTR_RO(corrected_bits); /* ecc stats corrected */
312 static ssize_t mtd_ecc_failures_show(struct device *dev,
313 struct device_attribute *attr, char *buf)
315 struct mtd_info *mtd = dev_get_drvdata(dev);
316 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
318 return sysfs_emit(buf, "%u\n", ecc_stats->failed);
320 MTD_DEVICE_ATTR_RO(ecc_failures); /* ecc stats errors */
322 static ssize_t mtd_bad_blocks_show(struct device *dev,
323 struct device_attribute *attr, char *buf)
325 struct mtd_info *mtd = dev_get_drvdata(dev);
326 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
328 return sysfs_emit(buf, "%u\n", ecc_stats->badblocks);
330 MTD_DEVICE_ATTR_RO(bad_blocks);
332 static ssize_t mtd_bbt_blocks_show(struct device *dev,
333 struct device_attribute *attr, char *buf)
335 struct mtd_info *mtd = dev_get_drvdata(dev);
336 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
338 return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks);
340 MTD_DEVICE_ATTR_RO(bbt_blocks);
342 static struct attribute *mtd_attrs[] = {
344 &dev_attr_flags.attr,
346 &dev_attr_erasesize.attr,
347 &dev_attr_writesize.attr,
348 &dev_attr_subpagesize.attr,
349 &dev_attr_oobsize.attr,
350 &dev_attr_oobavail.attr,
351 &dev_attr_numeraseregions.attr,
353 &dev_attr_ecc_strength.attr,
354 &dev_attr_ecc_step_size.attr,
355 &dev_attr_corrected_bits.attr,
356 &dev_attr_ecc_failures.attr,
357 &dev_attr_bad_blocks.attr,
358 &dev_attr_bbt_blocks.attr,
359 &dev_attr_bitflip_threshold.attr,
362 ATTRIBUTE_GROUPS(mtd);
364 static const struct device_type mtd_devtype = {
366 .groups = mtd_groups,
367 .release = mtd_release,
370 static bool mtd_expert_analysis_mode;
372 #ifdef CONFIG_DEBUG_FS
373 bool mtd_check_expert_analysis_mode(void)
375 const char *mtd_expert_analysis_warning =
376 "Bad block checks have been entirely disabled.\n"
377 "This is only reserved for post-mortem forensics and debug purposes.\n"
378 "Never enable this mode if you do not know what you are doing!\n";
380 return WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning);
382 EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode);
385 static struct dentry *dfs_dir_mtd;
387 static void mtd_debugfs_populate(struct mtd_info *mtd)
389 struct device *dev = &mtd->dev;
391 if (IS_ERR_OR_NULL(dfs_dir_mtd))
394 mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
398 unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
402 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
403 NOMMU_MAP_READ | NOMMU_MAP_WRITE;
405 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
408 return NOMMU_MAP_COPY;
411 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
414 static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
417 struct mtd_info *mtd;
419 mtd = container_of(n, struct mtd_info, reboot_notifier);
426 * mtd_wunit_to_pairing_info - get pairing information of a wunit
427 * @mtd: pointer to new MTD device info structure
428 * @wunit: write unit we are interested in
429 * @info: returned pairing information
431 * Retrieve pairing information associated to the wunit.
432 * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
433 * paired together, and where programming a page may influence the page it is
435 * The notion of page is replaced by the term wunit (write-unit) to stay
436 * consistent with the ->writesize field.
438 * The @wunit argument can be extracted from an absolute offset using
439 * mtd_offset_to_wunit(). @info is filled with the pairing information attached
442 * From the pairing info the MTD user can find all the wunits paired with
443 * @wunit using the following loop:
445 * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
447 * mtd_pairing_info_to_wunit(mtd, &info);
451 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
452 struct mtd_pairing_info *info)
454 struct mtd_info *master = mtd_get_master(mtd);
455 int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
457 if (wunit < 0 || wunit >= npairs)
460 if (master->pairing && master->pairing->get_info)
461 return master->pairing->get_info(master, wunit, info);
468 EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
471 * mtd_pairing_info_to_wunit - get wunit from pairing information
472 * @mtd: pointer to new MTD device info structure
473 * @info: pairing information struct
475 * Returns a positive number representing the wunit associated to the info
476 * struct, or a negative error code.
478 * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
479 * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
482 * It can also be used to only program the first page of each pair (i.e.
483 * page attached to group 0), which allows one to use an MLC NAND in
484 * software-emulated SLC mode:
487 * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
488 * for (info.pair = 0; info.pair < npairs; info.pair++) {
489 * wunit = mtd_pairing_info_to_wunit(mtd, &info);
490 * mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
491 * mtd->writesize, &retlen, buf + (i * mtd->writesize));
494 int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
495 const struct mtd_pairing_info *info)
497 struct mtd_info *master = mtd_get_master(mtd);
498 int ngroups = mtd_pairing_groups(master);
499 int npairs = mtd_wunit_per_eb(master) / ngroups;
501 if (!info || info->pair < 0 || info->pair >= npairs ||
502 info->group < 0 || info->group >= ngroups)
505 if (master->pairing && master->pairing->get_wunit)
506 return mtd->pairing->get_wunit(master, info);
510 EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
513 * mtd_pairing_groups - get the number of pairing groups
514 * @mtd: pointer to new MTD device info structure
516 * Returns the number of pairing groups.
518 * This number is usually equal to the number of bits exposed by a single
519 * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
520 * to iterate over all pages of a given pair.
522 int mtd_pairing_groups(struct mtd_info *mtd)
524 struct mtd_info *master = mtd_get_master(mtd);
526 if (!master->pairing || !master->pairing->ngroups)
529 return master->pairing->ngroups;
531 EXPORT_SYMBOL_GPL(mtd_pairing_groups);
533 static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
534 void *val, size_t bytes)
536 struct mtd_info *mtd = priv;
540 err = mtd_read(mtd, offset, bytes, &retlen, val);
541 if (err && err != -EUCLEAN)
544 return retlen == bytes ? 0 : -EIO;
547 static int mtd_nvmem_add(struct mtd_info *mtd)
549 struct device_node *node = mtd_get_of_node(mtd);
550 struct nvmem_config config = {};
552 config.id = NVMEM_DEVID_NONE;
553 config.dev = &mtd->dev;
554 config.name = dev_name(&mtd->dev);
555 config.owner = THIS_MODULE;
556 config.add_legacy_fixed_of_cells = of_device_is_compatible(node, "nvmem-cells");
557 config.reg_read = mtd_nvmem_reg_read;
558 config.size = mtd->size;
559 config.word_size = 1;
561 config.read_only = true;
562 config.root_only = true;
563 config.ignore_wp = true;
566 mtd->nvmem = nvmem_register(&config);
567 if (IS_ERR(mtd->nvmem)) {
568 /* Just ignore if there is no NVMEM support in the kernel */
569 if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP)
572 return dev_err_probe(&mtd->dev, PTR_ERR(mtd->nvmem),
573 "Failed to register NVMEM device\n");
579 static void mtd_check_of_node(struct mtd_info *mtd)
581 struct device_node *partitions, *parent_dn, *mtd_dn = NULL;
582 const char *pname, *prefix = "partition-";
583 int plen, mtd_name_len, offset, prefix_len;
585 /* Check if MTD already has a device node */
586 if (mtd_get_of_node(mtd))
589 if (!mtd_is_partition(mtd))
592 parent_dn = of_node_get(mtd_get_of_node(mtd->parent));
596 if (mtd_is_partition(mtd->parent))
597 partitions = of_node_get(parent_dn);
599 partitions = of_get_child_by_name(parent_dn, "partitions");
603 prefix_len = strlen(prefix);
604 mtd_name_len = strlen(mtd->name);
606 /* Search if a partition is defined with the same name */
607 for_each_child_of_node(partitions, mtd_dn) {
608 /* Skip partition with no/wrong prefix */
609 if (!of_node_name_prefix(mtd_dn, prefix))
612 /* Label have priority. Check that first */
613 if (!of_property_read_string(mtd_dn, "label", &pname)) {
616 pname = mtd_dn->name;
620 plen = strlen(pname) - offset;
621 if (plen == mtd_name_len &&
622 !strncmp(mtd->name, pname + offset, plen)) {
623 mtd_set_of_node(mtd, mtd_dn);
629 of_node_put(partitions);
631 of_node_put(parent_dn);
635 * add_mtd_device - register an MTD device
636 * @mtd: pointer to new MTD device info structure
638 * Add a device to the list of MTD devices present in the system, and
639 * notify each currently active MTD 'user' of its arrival. Returns
640 * zero on success or non-zero on failure.
643 int add_mtd_device(struct mtd_info *mtd)
645 struct device_node *np = mtd_get_of_node(mtd);
646 struct mtd_info *master = mtd_get_master(mtd);
647 struct mtd_notifier *not;
651 * May occur, for instance, on buggy drivers which call
652 * mtd_device_parse_register() multiple times on the same master MTD,
653 * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
655 if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
658 BUG_ON(mtd->writesize == 0);
661 * MTD drivers should implement ->_{write,read}() or
662 * ->_{write,read}_oob(), but not both.
664 if (WARN_ON((mtd->_write && mtd->_write_oob) ||
665 (mtd->_read && mtd->_read_oob)))
668 if (WARN_ON((!mtd->erasesize || !master->_erase) &&
669 !(mtd->flags & MTD_NO_ERASE)))
673 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
674 * master is an MLC NAND and has a proper pairing scheme defined.
675 * We also reject masters that implement ->_writev() for now, because
676 * NAND controller drivers don't implement this hook, and adding the
677 * SLC -> MLC address/length conversion to this path is useless if we
680 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
681 (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
682 !master->pairing || master->_writev))
685 mutex_lock(&mtd_table_mutex);
689 ofidx = of_alias_get_id(np, "mtd");
691 i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
693 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
700 kref_init(&mtd->refcnt);
702 /* default value if not set by driver */
703 if (mtd->bitflip_threshold == 0)
704 mtd->bitflip_threshold = mtd->ecc_strength;
706 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
707 int ngroups = mtd_pairing_groups(master);
709 mtd->erasesize /= ngroups;
710 mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
714 if (is_power_of_2(mtd->erasesize))
715 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
717 mtd->erasesize_shift = 0;
719 if (is_power_of_2(mtd->writesize))
720 mtd->writesize_shift = ffs(mtd->writesize) - 1;
722 mtd->writesize_shift = 0;
724 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
725 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
727 /* Some chips always power up locked. Unlock them now */
728 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
729 error = mtd_unlock(mtd, 0, mtd->size);
730 if (error && error != -EOPNOTSUPP)
732 "%s: unlock failed, writes may not work\n",
734 /* Ignore unlock failures? */
738 /* Caller should have set dev.parent to match the
739 * physical device, if appropriate.
741 mtd->dev.type = &mtd_devtype;
742 mtd->dev.class = &mtd_class;
743 mtd->dev.devt = MTD_DEVT(i);
744 dev_set_name(&mtd->dev, "mtd%d", i);
745 dev_set_drvdata(&mtd->dev, mtd);
746 mtd_check_of_node(mtd);
747 of_node_get(mtd_get_of_node(mtd));
748 error = device_register(&mtd->dev);
750 put_device(&mtd->dev);
754 /* Add the nvmem provider */
755 error = mtd_nvmem_add(mtd);
759 mtd_debugfs_populate(mtd);
761 device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
764 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
765 /* No need to get a refcount on the module containing
766 the notifier, since we hold the mtd_table_mutex */
767 list_for_each_entry(not, &mtd_notifiers, list)
770 mutex_unlock(&mtd_table_mutex);
772 if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) {
773 if (IS_BUILTIN(CONFIG_MTD)) {
774 pr_info("mtd: setting mtd%d (%s) as root device\n", mtd->index, mtd->name);
775 ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
777 pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n",
778 mtd->index, mtd->name);
782 /* We _know_ we aren't being removed, because
783 our caller is still holding us here. So none
784 of this try_ nonsense, and no bitching about it
786 __module_get(THIS_MODULE);
790 device_unregister(&mtd->dev);
792 of_node_put(mtd_get_of_node(mtd));
793 idr_remove(&mtd_idr, i);
795 mutex_unlock(&mtd_table_mutex);
800 * del_mtd_device - unregister an MTD device
801 * @mtd: pointer to MTD device info structure
803 * Remove a device from the list of MTD devices present in the system,
804 * and notify each currently active MTD 'user' of its departure.
805 * Returns zero on success or 1 on failure, which currently will happen
806 * if the requested device does not appear to be present in the list.
809 int del_mtd_device(struct mtd_info *mtd)
812 struct mtd_notifier *not;
814 mutex_lock(&mtd_table_mutex);
816 if (idr_find(&mtd_idr, mtd->index) != mtd) {
821 /* No need to get a refcount on the module containing
822 the notifier, since we hold the mtd_table_mutex */
823 list_for_each_entry(not, &mtd_notifiers, list)
826 kref_put(&mtd->refcnt, mtd_device_release);
830 mutex_unlock(&mtd_table_mutex);
835 * Set a few defaults based on the parent devices, if not provided by the
838 static void mtd_set_dev_defaults(struct mtd_info *mtd)
840 if (mtd->dev.parent) {
841 if (!mtd->owner && mtd->dev.parent->driver)
842 mtd->owner = mtd->dev.parent->driver->owner;
844 mtd->name = dev_name(mtd->dev.parent);
846 pr_debug("mtd device won't show a device symlink in sysfs\n");
849 INIT_LIST_HEAD(&mtd->partitions);
850 mutex_init(&mtd->master.partitions_lock);
851 mutex_init(&mtd->master.chrdev_lock);
854 static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
856 struct otp_info *info;
862 info = kmalloc(PAGE_SIZE, GFP_KERNEL);
867 ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info);
869 ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info);
873 for (i = 0; i < retlen / sizeof(*info); i++)
874 size += info[i].length;
882 /* ENODATA means there is no OTP region. */
883 return ret == -ENODATA ? 0 : ret;
886 static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
887 const char *compatible,
889 nvmem_reg_read_t reg_read)
891 struct nvmem_device *nvmem = NULL;
892 struct nvmem_config config = {};
893 struct device_node *np;
895 /* DT binding is optional */
896 np = of_get_compatible_child(mtd->dev.of_node, compatible);
898 /* OTP nvmem will be registered on the physical device */
899 config.dev = mtd->dev.parent;
900 config.name = compatible;
901 config.id = NVMEM_DEVID_AUTO;
902 config.owner = THIS_MODULE;
903 config.add_legacy_fixed_of_cells = true;
904 config.type = NVMEM_TYPE_OTP;
905 config.root_only = true;
906 config.ignore_wp = true;
907 config.reg_read = reg_read;
912 nvmem = nvmem_register(&config);
913 /* Just ignore if there is no NVMEM support in the kernel */
914 if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP)
922 static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset,
923 void *val, size_t bytes)
925 struct mtd_info *mtd = priv;
929 ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val);
933 return retlen == bytes ? 0 : -EIO;
936 static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
937 void *val, size_t bytes)
939 struct mtd_info *mtd = priv;
943 ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val);
947 return retlen == bytes ? 0 : -EIO;
950 static int mtd_otp_nvmem_add(struct mtd_info *mtd)
952 struct device *dev = mtd->dev.parent;
953 struct nvmem_device *nvmem;
957 if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
958 size = mtd_otp_size(mtd, true);
963 nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
964 mtd_nvmem_user_otp_reg_read);
966 err = PTR_ERR(nvmem);
969 mtd->otp_user_nvmem = nvmem;
973 if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) {
974 size = mtd_otp_size(mtd, false);
982 * The factory OTP contains thing such as a unique serial
983 * number and is small, so let's read it out and put it
984 * into the entropy pool.
988 otp = kmalloc(size, GFP_KERNEL);
993 err = mtd_nvmem_fact_otp_reg_read(mtd, 0, otp, size);
998 add_device_randomness(otp, err);
1001 nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
1002 mtd_nvmem_fact_otp_reg_read);
1003 if (IS_ERR(nvmem)) {
1004 err = PTR_ERR(nvmem);
1007 mtd->otp_factory_nvmem = nvmem;
1014 nvmem_unregister(mtd->otp_user_nvmem);
1015 return dev_err_probe(dev, err, "Failed to register OTP NVMEM device\n");
1019 * mtd_device_parse_register - parse partitions and register an MTD device.
1021 * @mtd: the MTD device to register
1022 * @types: the list of MTD partition probes to try, see
1023 * 'parse_mtd_partitions()' for more information
1024 * @parser_data: MTD partition parser-specific data
1025 * @parts: fallback partition information to register, if parsing fails;
1026 * only valid if %nr_parts > %0
1027 * @nr_parts: the number of partitions in parts, if zero then the full
1028 * MTD device is registered if no partition info is found
1030 * This function aggregates MTD partitions parsing (done by
1031 * 'parse_mtd_partitions()') and MTD device and partitions registering. It
1032 * basically follows the most common pattern found in many MTD drivers:
1034 * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
1036 * * Then It tries to probe partitions on MTD device @mtd using parsers
1037 * specified in @types (if @types is %NULL, then the default list of parsers
1038 * is used, see 'parse_mtd_partitions()' for more information). If none are
1039 * found this functions tries to fallback to information specified in
1041 * * If no partitions were found this function just registers the MTD device
1044 * Returns zero in case of success and a negative error code in case of failure.
1046 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
1047 struct mtd_part_parser_data *parser_data,
1048 const struct mtd_partition *parts,
1053 mtd_set_dev_defaults(mtd);
1055 ret = mtd_otp_nvmem_add(mtd);
1059 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
1060 ret = add_mtd_device(mtd);
1065 /* Prefer parsed partitions over driver-provided fallback */
1066 ret = parse_mtd_partitions(mtd, types, parser_data);
1067 if (ret == -EPROBE_DEFER)
1073 ret = add_mtd_partitions(mtd, parts, nr_parts);
1074 else if (!device_is_registered(&mtd->dev))
1075 ret = add_mtd_device(mtd);
1083 * FIXME: some drivers unfortunately call this function more than once.
1084 * So we have to check if we've already assigned the reboot notifier.
1086 * Generally, we can make multiple calls work for most cases, but it
1087 * does cause problems with parse_mtd_partitions() above (e.g.,
1088 * cmdlineparts will register partitions more than once).
1090 WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
1091 "MTD already registered\n");
1092 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
1093 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
1094 register_reboot_notifier(&mtd->reboot_notifier);
1099 nvmem_unregister(mtd->otp_user_nvmem);
1100 nvmem_unregister(mtd->otp_factory_nvmem);
1103 if (ret && device_is_registered(&mtd->dev))
1104 del_mtd_device(mtd);
1108 EXPORT_SYMBOL_GPL(mtd_device_parse_register);
1111 * mtd_device_unregister - unregister an existing MTD device.
1113 * @master: the MTD device to unregister. This will unregister both the master
1114 * and any partitions if registered.
1116 int mtd_device_unregister(struct mtd_info *master)
1120 if (master->_reboot) {
1121 unregister_reboot_notifier(&master->reboot_notifier);
1122 memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier));
1125 nvmem_unregister(master->otp_user_nvmem);
1126 nvmem_unregister(master->otp_factory_nvmem);
1128 err = del_mtd_partitions(master);
1132 if (!device_is_registered(&master->dev))
1135 return del_mtd_device(master);
1137 EXPORT_SYMBOL_GPL(mtd_device_unregister);
1140 * register_mtd_user - register a 'user' of MTD devices.
1141 * @new: pointer to notifier info structure
1143 * Registers a pair of callbacks function to be called upon addition
1144 * or removal of MTD devices. Causes the 'add' callback to be immediately
1145 * invoked for each MTD device currently present in the system.
1147 void register_mtd_user (struct mtd_notifier *new)
1149 struct mtd_info *mtd;
1151 mutex_lock(&mtd_table_mutex);
1153 list_add(&new->list, &mtd_notifiers);
1155 __module_get(THIS_MODULE);
1157 mtd_for_each_device(mtd)
1160 mutex_unlock(&mtd_table_mutex);
1162 EXPORT_SYMBOL_GPL(register_mtd_user);
1165 * unregister_mtd_user - unregister a 'user' of MTD devices.
1166 * @old: pointer to notifier info structure
1168 * Removes a callback function pair from the list of 'users' to be
1169 * notified upon addition or removal of MTD devices. Causes the
1170 * 'remove' callback to be immediately invoked for each MTD device
1171 * currently present in the system.
1173 int unregister_mtd_user (struct mtd_notifier *old)
1175 struct mtd_info *mtd;
1177 mutex_lock(&mtd_table_mutex);
1179 module_put(THIS_MODULE);
1181 mtd_for_each_device(mtd)
1184 list_del(&old->list);
1185 mutex_unlock(&mtd_table_mutex);
1188 EXPORT_SYMBOL_GPL(unregister_mtd_user);
1191 * get_mtd_device - obtain a validated handle for an MTD device
1192 * @mtd: last known address of the required MTD device
1193 * @num: internal device number of the required MTD device
1195 * Given a number and NULL address, return the num'th entry in the device
1196 * table, if any. Given an address and num == -1, search the device table
1197 * for a device with that address and return if it's still present. Given
1198 * both, return the num'th driver only if its address matches. Return
1199 * error code if not.
1201 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
1203 struct mtd_info *ret = NULL, *other;
1206 mutex_lock(&mtd_table_mutex);
1209 mtd_for_each_device(other) {
1215 } else if (num >= 0) {
1216 ret = idr_find(&mtd_idr, num);
1217 if (mtd && mtd != ret)
1226 err = __get_mtd_device(ret);
1230 mutex_unlock(&mtd_table_mutex);
1233 EXPORT_SYMBOL_GPL(get_mtd_device);
1236 int __get_mtd_device(struct mtd_info *mtd)
1238 struct mtd_info *master = mtd_get_master(mtd);
1241 if (master->_get_device) {
1242 err = master->_get_device(mtd);
1247 if (!try_module_get(master->owner)) {
1248 if (master->_put_device)
1249 master->_put_device(master);
1255 kref_get(&mtd->refcnt);
1259 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
1260 kref_get(&master->refcnt);
1264 EXPORT_SYMBOL_GPL(__get_mtd_device);
1267 * of_get_mtd_device_by_node - obtain an MTD device associated with a given node
1269 * @np: device tree node
1271 struct mtd_info *of_get_mtd_device_by_node(struct device_node *np)
1273 struct mtd_info *mtd = NULL;
1274 struct mtd_info *tmp;
1277 mutex_lock(&mtd_table_mutex);
1279 err = -EPROBE_DEFER;
1280 mtd_for_each_device(tmp) {
1281 if (mtd_get_of_node(tmp) == np) {
1283 err = __get_mtd_device(mtd);
1288 mutex_unlock(&mtd_table_mutex);
1290 return err ? ERR_PTR(err) : mtd;
1292 EXPORT_SYMBOL_GPL(of_get_mtd_device_by_node);
1295 * get_mtd_device_nm - obtain a validated handle for an MTD device by
1297 * @name: MTD device name to open
1299 * This function returns MTD device description structure in case of
1300 * success and an error code in case of failure.
1302 struct mtd_info *get_mtd_device_nm(const char *name)
1305 struct mtd_info *mtd = NULL, *other;
1307 mutex_lock(&mtd_table_mutex);
1309 mtd_for_each_device(other) {
1310 if (!strcmp(name, other->name)) {
1319 err = __get_mtd_device(mtd);
1323 mutex_unlock(&mtd_table_mutex);
1327 mutex_unlock(&mtd_table_mutex);
1328 return ERR_PTR(err);
1330 EXPORT_SYMBOL_GPL(get_mtd_device_nm);
1332 void put_mtd_device(struct mtd_info *mtd)
1334 mutex_lock(&mtd_table_mutex);
1335 __put_mtd_device(mtd);
1336 mutex_unlock(&mtd_table_mutex);
1339 EXPORT_SYMBOL_GPL(put_mtd_device);
1341 void __put_mtd_device(struct mtd_info *mtd)
1343 struct mtd_info *master = mtd_get_master(mtd);
1346 /* kref_put() can relese mtd, so keep a reference mtd->parent */
1347 struct mtd_info *parent = mtd->parent;
1350 kref_put(&mtd->refcnt, mtd_device_release);
1354 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
1355 kref_put(&master->refcnt, mtd_device_release);
1357 module_put(master->owner);
1359 /* must be the last as master can be freed in the _put_device */
1360 if (master->_put_device)
1361 master->_put_device(master);
1363 EXPORT_SYMBOL_GPL(__put_mtd_device);
1366 * Erase is an synchronous operation. Device drivers are epected to return a
1367 * negative error code if the operation failed and update instr->fail_addr
1368 * to point the portion that was not properly erased.
1370 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1372 struct mtd_info *master = mtd_get_master(mtd);
1373 u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
1374 struct erase_info adjinstr;
1377 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1380 if (!mtd->erasesize || !master->_erase)
1383 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
1385 if (!(mtd->flags & MTD_WRITEABLE))
1391 ledtrig_mtd_activity();
1393 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1394 adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
1396 adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
1397 master->erasesize) -
1401 adjinstr.addr += mst_ofs;
1403 ret = master->_erase(master, &adjinstr);
1405 if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
1406 instr->fail_addr = adjinstr.fail_addr - mst_ofs;
1407 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1408 instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
1410 instr->fail_addr *= mtd->erasesize;
1416 EXPORT_SYMBOL_GPL(mtd_erase);
1417 ALLOW_ERROR_INJECTION(mtd_erase, ERRNO);
1420 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
1422 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1423 void **virt, resource_size_t *phys)
1425 struct mtd_info *master = mtd_get_master(mtd);
1431 if (!master->_point)
1433 if (from < 0 || from >= mtd->size || len > mtd->size - from)
1438 from = mtd_get_master_ofs(mtd, from);
1439 return master->_point(master, from, len, retlen, virt, phys);
1441 EXPORT_SYMBOL_GPL(mtd_point);
1443 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
1444 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1446 struct mtd_info *master = mtd_get_master(mtd);
1448 if (!master->_unpoint)
1450 if (from < 0 || from >= mtd->size || len > mtd->size - from)
1454 return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
1456 EXPORT_SYMBOL_GPL(mtd_unpoint);
1459 * Allow NOMMU mmap() to directly map the device (if not NULL)
1460 * - return the address to which the offset maps
1461 * - return -ENOSYS to indicate refusal to do the mapping
1463 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1464 unsigned long offset, unsigned long flags)
1470 ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1473 if (retlen != len) {
1474 mtd_unpoint(mtd, offset, retlen);
1477 return (unsigned long)virt;
1479 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1481 static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
1482 const struct mtd_ecc_stats *old_stats)
1484 struct mtd_ecc_stats diff;
1489 diff = master->ecc_stats;
1490 diff.failed -= old_stats->failed;
1491 diff.corrected -= old_stats->corrected;
1493 while (mtd->parent) {
1494 mtd->ecc_stats.failed += diff.failed;
1495 mtd->ecc_stats.corrected += diff.corrected;
1500 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1503 struct mtd_oob_ops ops = {
1509 ret = mtd_read_oob(mtd, from, &ops);
1510 *retlen = ops.retlen;
1512 WARN_ON_ONCE(*retlen != len && mtd_is_bitflip_or_eccerr(ret));
1516 EXPORT_SYMBOL_GPL(mtd_read);
1517 ALLOW_ERROR_INJECTION(mtd_read, ERRNO);
1519 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1522 struct mtd_oob_ops ops = {
1524 .datbuf = (u8 *)buf,
1528 ret = mtd_write_oob(mtd, to, &ops);
1529 *retlen = ops.retlen;
1533 EXPORT_SYMBOL_GPL(mtd_write);
1534 ALLOW_ERROR_INJECTION(mtd_write, ERRNO);
1537 * In blackbox flight recorder like scenarios we want to make successful writes
1538 * in interrupt context. panic_write() is only intended to be called when its
1539 * known the kernel is about to panic and we need the write to succeed. Since
1540 * the kernel is not going to be running for much longer, this function can
1541 * break locks and delay to ensure the write succeeds (but not sleep).
1543 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1546 struct mtd_info *master = mtd_get_master(mtd);
1549 if (!master->_panic_write)
1551 if (to < 0 || to >= mtd->size || len > mtd->size - to)
1553 if (!(mtd->flags & MTD_WRITEABLE))
1557 if (!master->oops_panic_write)
1558 master->oops_panic_write = true;
1560 return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
1563 EXPORT_SYMBOL_GPL(mtd_panic_write);
1565 static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1566 struct mtd_oob_ops *ops)
1569 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
1570 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
1579 if (offs < 0 || offs + ops->len > mtd->size)
1585 if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1588 maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1589 mtd_div_by_ws(offs, mtd)) *
1590 mtd_oobavail(mtd, ops)) - ops->ooboffs;
1591 if (ops->ooblen > maxooblen)
1598 static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
1599 struct mtd_oob_ops *ops)
1601 struct mtd_info *master = mtd_get_master(mtd);
1604 from = mtd_get_master_ofs(mtd, from);
1605 if (master->_read_oob)
1606 ret = master->_read_oob(master, from, ops);
1608 ret = master->_read(master, from, ops->len, &ops->retlen,
1614 static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
1615 struct mtd_oob_ops *ops)
1617 struct mtd_info *master = mtd_get_master(mtd);
1620 to = mtd_get_master_ofs(mtd, to);
1621 if (master->_write_oob)
1622 ret = master->_write_oob(master, to, ops);
1624 ret = master->_write(master, to, ops->len, &ops->retlen,
1630 static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
1631 struct mtd_oob_ops *ops)
1633 struct mtd_info *master = mtd_get_master(mtd);
1634 int ngroups = mtd_pairing_groups(master);
1635 int npairs = mtd_wunit_per_eb(master) / ngroups;
1636 struct mtd_oob_ops adjops = *ops;
1637 unsigned int wunit, oobavail;
1638 struct mtd_pairing_info info;
1639 int max_bitflips = 0;
1643 ebofs = mtd_mod_by_eb(start, mtd);
1644 base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
1646 info.pair = mtd_div_by_ws(ebofs, mtd);
1647 pageofs = mtd_mod_by_ws(ebofs, mtd);
1648 oobavail = mtd_oobavail(mtd, ops);
1650 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
1653 if (info.pair >= npairs) {
1655 base += master->erasesize;
1658 wunit = mtd_pairing_info_to_wunit(master, &info);
1659 pos = mtd_wunit_to_offset(mtd, base, wunit);
1661 adjops.len = ops->len - ops->retlen;
1662 if (adjops.len > mtd->writesize - pageofs)
1663 adjops.len = mtd->writesize - pageofs;
1665 adjops.ooblen = ops->ooblen - ops->oobretlen;
1666 if (adjops.ooblen > oobavail - adjops.ooboffs)
1667 adjops.ooblen = oobavail - adjops.ooboffs;
1670 ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
1672 max_bitflips = max(max_bitflips, ret);
1674 ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
1680 max_bitflips = max(max_bitflips, ret);
1681 ops->retlen += adjops.retlen;
1682 ops->oobretlen += adjops.oobretlen;
1683 adjops.datbuf += adjops.retlen;
1684 adjops.oobbuf += adjops.oobretlen;
1690 return max_bitflips;
1693 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1695 struct mtd_info *master = mtd_get_master(mtd);
1696 struct mtd_ecc_stats old_stats = master->ecc_stats;
1699 ops->retlen = ops->oobretlen = 0;
1701 ret_code = mtd_check_oob_ops(mtd, from, ops);
1705 ledtrig_mtd_activity();
1707 /* Check the validity of a potential fallback on mtd->_read */
1708 if (!master->_read_oob && (!master->_read || ops->oobbuf))
1712 memset(ops->stats, 0, sizeof(*ops->stats));
1714 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1715 ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
1717 ret_code = mtd_read_oob_std(mtd, from, ops);
1719 mtd_update_ecc_stats(mtd, master, &old_stats);
1722 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
1723 * similar to mtd->_read(), returning a non-negative integer
1724 * representing max bitflips. In other cases, mtd->_read_oob() may
1725 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
1727 if (unlikely(ret_code < 0))
1729 if (mtd->ecc_strength == 0)
1730 return 0; /* device lacks ecc */
1732 ops->stats->max_bitflips = ret_code;
1733 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1735 EXPORT_SYMBOL_GPL(mtd_read_oob);
1737 int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1738 struct mtd_oob_ops *ops)
1740 struct mtd_info *master = mtd_get_master(mtd);
1743 ops->retlen = ops->oobretlen = 0;
1745 if (!(mtd->flags & MTD_WRITEABLE))
1748 ret = mtd_check_oob_ops(mtd, to, ops);
1752 ledtrig_mtd_activity();
1754 /* Check the validity of a potential fallback on mtd->_write */
1755 if (!master->_write_oob && (!master->_write || ops->oobbuf))
1758 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1759 return mtd_io_emulated_slc(mtd, to, false, ops);
1761 return mtd_write_oob_std(mtd, to, ops);
1763 EXPORT_SYMBOL_GPL(mtd_write_oob);
1766 * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
1767 * @mtd: MTD device structure
1768 * @section: ECC section. Depending on the layout you may have all the ECC
1769 * bytes stored in a single contiguous section, or one section
1770 * per ECC chunk (and sometime several sections for a single ECC
1772 * @oobecc: OOB region struct filled with the appropriate ECC position
1775 * This function returns ECC section information in the OOB area. If you want
1776 * to get all the ECC bytes information, then you should call
1777 * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
1779 * Returns zero on success, a negative error code otherwise.
1781 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1782 struct mtd_oob_region *oobecc)
1784 struct mtd_info *master = mtd_get_master(mtd);
1786 memset(oobecc, 0, sizeof(*oobecc));
1788 if (!master || section < 0)
1791 if (!master->ooblayout || !master->ooblayout->ecc)
1794 return master->ooblayout->ecc(master, section, oobecc);
1796 EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1799 * mtd_ooblayout_free - Get the OOB region definition of a specific free
1801 * @mtd: MTD device structure
1802 * @section: Free section you are interested in. Depending on the layout
1803 * you may have all the free bytes stored in a single contiguous
1804 * section, or one section per ECC chunk plus an extra section
1805 * for the remaining bytes (or other funky layout).
1806 * @oobfree: OOB region struct filled with the appropriate free position
1809 * This function returns free bytes position in the OOB area. If you want
1810 * to get all the free bytes information, then you should call
1811 * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
1813 * Returns zero on success, a negative error code otherwise.
1815 int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1816 struct mtd_oob_region *oobfree)
1818 struct mtd_info *master = mtd_get_master(mtd);
1820 memset(oobfree, 0, sizeof(*oobfree));
1822 if (!master || section < 0)
1825 if (!master->ooblayout || !master->ooblayout->free)
1828 return master->ooblayout->free(master, section, oobfree);
1830 EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1833 * mtd_ooblayout_find_region - Find the region attached to a specific byte
1834 * @mtd: mtd info structure
1835 * @byte: the byte we are searching for
1836 * @sectionp: pointer where the section id will be stored
1837 * @oobregion: used to retrieve the ECC position
1838 * @iter: iterator function. Should be either mtd_ooblayout_free or
1839 * mtd_ooblayout_ecc depending on the region type you're searching for
1841 * This function returns the section id and oobregion information of a
1842 * specific byte. For example, say you want to know where the 4th ECC byte is
1843 * stored, you'll use:
1845 * mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc);
1847 * Returns zero on success, a negative error code otherwise.
1849 static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1850 int *sectionp, struct mtd_oob_region *oobregion,
1851 int (*iter)(struct mtd_info *,
1853 struct mtd_oob_region *oobregion))
1855 int pos = 0, ret, section = 0;
1857 memset(oobregion, 0, sizeof(*oobregion));
1860 ret = iter(mtd, section, oobregion);
1864 if (pos + oobregion->length > byte)
1867 pos += oobregion->length;
1872 * Adjust region info to make it start at the beginning at the
1875 oobregion->offset += byte - pos;
1876 oobregion->length -= byte - pos;
1877 *sectionp = section;
1883 * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
1885 * @mtd: mtd info structure
1886 * @eccbyte: the byte we are searching for
1887 * @section: pointer where the section id will be stored
1888 * @oobregion: OOB region information
1890 * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
1893 * Returns zero on success, a negative error code otherwise.
1895 int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1897 struct mtd_oob_region *oobregion)
1899 return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1902 EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1905 * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
1906 * @mtd: mtd info structure
1907 * @buf: destination buffer to store OOB bytes
1908 * @oobbuf: OOB buffer
1909 * @start: first byte to retrieve
1910 * @nbytes: number of bytes to retrieve
1911 * @iter: section iterator
1913 * Extract bytes attached to a specific category (ECC or free)
1914 * from the OOB buffer and copy them into buf.
1916 * Returns zero on success, a negative error code otherwise.
1918 static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1919 const u8 *oobbuf, int start, int nbytes,
1920 int (*iter)(struct mtd_info *,
1922 struct mtd_oob_region *oobregion))
1924 struct mtd_oob_region oobregion;
1927 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1933 cnt = min_t(int, nbytes, oobregion.length);
1934 memcpy(buf, oobbuf + oobregion.offset, cnt);
1941 ret = iter(mtd, ++section, &oobregion);
1948 * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
1949 * @mtd: mtd info structure
1950 * @buf: source buffer to get OOB bytes from
1951 * @oobbuf: OOB buffer
1952 * @start: first OOB byte to set
1953 * @nbytes: number of OOB bytes to set
1954 * @iter: section iterator
1956 * Fill the OOB buffer with data provided in buf. The category (ECC or free)
1957 * is selected by passing the appropriate iterator.
1959 * Returns zero on success, a negative error code otherwise.
1961 static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1962 u8 *oobbuf, int start, int nbytes,
1963 int (*iter)(struct mtd_info *,
1965 struct mtd_oob_region *oobregion))
1967 struct mtd_oob_region oobregion;
1970 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1976 cnt = min_t(int, nbytes, oobregion.length);
1977 memcpy(oobbuf + oobregion.offset, buf, cnt);
1984 ret = iter(mtd, ++section, &oobregion);
1991 * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
1992 * @mtd: mtd info structure
1993 * @iter: category iterator
1995 * Count the number of bytes in a given category.
1997 * Returns a positive value on success, a negative error code otherwise.
1999 static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
2000 int (*iter)(struct mtd_info *,
2002 struct mtd_oob_region *oobregion))
2004 struct mtd_oob_region oobregion;
2005 int section = 0, ret, nbytes = 0;
2008 ret = iter(mtd, section++, &oobregion);
2015 nbytes += oobregion.length;
2022 * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
2023 * @mtd: mtd info structure
2024 * @eccbuf: destination buffer to store ECC bytes
2025 * @oobbuf: OOB buffer
2026 * @start: first ECC byte to retrieve
2027 * @nbytes: number of ECC bytes to retrieve
2029 * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
2031 * Returns zero on success, a negative error code otherwise.
2033 int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
2034 const u8 *oobbuf, int start, int nbytes)
2036 return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
2039 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
2042 * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
2043 * @mtd: mtd info structure
2044 * @eccbuf: source buffer to get ECC bytes from
2045 * @oobbuf: OOB buffer
2046 * @start: first ECC byte to set
2047 * @nbytes: number of ECC bytes to set
2049 * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
2051 * Returns zero on success, a negative error code otherwise.
2053 int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
2054 u8 *oobbuf, int start, int nbytes)
2056 return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
2059 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
2062 * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
2063 * @mtd: mtd info structure
2064 * @databuf: destination buffer to store ECC bytes
2065 * @oobbuf: OOB buffer
2066 * @start: first ECC byte to retrieve
2067 * @nbytes: number of ECC bytes to retrieve
2069 * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
2071 * Returns zero on success, a negative error code otherwise.
2073 int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
2074 const u8 *oobbuf, int start, int nbytes)
2076 return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
2077 mtd_ooblayout_free);
2079 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
2082 * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
2083 * @mtd: mtd info structure
2084 * @databuf: source buffer to get data bytes from
2085 * @oobbuf: OOB buffer
2086 * @start: first ECC byte to set
2087 * @nbytes: number of ECC bytes to set
2089 * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
2091 * Returns zero on success, a negative error code otherwise.
2093 int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
2094 u8 *oobbuf, int start, int nbytes)
2096 return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
2097 mtd_ooblayout_free);
2099 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
2102 * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
2103 * @mtd: mtd info structure
2105 * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
2107 * Returns zero on success, a negative error code otherwise.
2109 int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
2111 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
2113 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
2116 * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
2117 * @mtd: mtd info structure
2119 * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
2121 * Returns zero on success, a negative error code otherwise.
2123 int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
2125 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
2127 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
2130 * Method to access the protection register area, present in some flash
2131 * devices. The user data is one time programmable but the factory data is read
2134 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2135 struct otp_info *buf)
2137 struct mtd_info *master = mtd_get_master(mtd);
2139 if (!master->_get_fact_prot_info)
2143 return master->_get_fact_prot_info(master, len, retlen, buf);
2145 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
2147 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2148 size_t *retlen, u_char *buf)
2150 struct mtd_info *master = mtd_get_master(mtd);
2153 if (!master->_read_fact_prot_reg)
2157 return master->_read_fact_prot_reg(master, from, len, retlen, buf);
2159 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
2161 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2162 struct otp_info *buf)
2164 struct mtd_info *master = mtd_get_master(mtd);
2166 if (!master->_get_user_prot_info)
2170 return master->_get_user_prot_info(master, len, retlen, buf);
2172 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
2174 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2175 size_t *retlen, u_char *buf)
2177 struct mtd_info *master = mtd_get_master(mtd);
2180 if (!master->_read_user_prot_reg)
2184 return master->_read_user_prot_reg(master, from, len, retlen, buf);
2186 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
2188 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
2189 size_t *retlen, const u_char *buf)
2191 struct mtd_info *master = mtd_get_master(mtd);
2195 if (!master->_write_user_prot_reg)
2199 ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
2204 * If no data could be written at all, we are out of memory and
2205 * must return -ENOSPC.
2207 return (*retlen) ? 0 : -ENOSPC;
2209 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
2211 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2213 struct mtd_info *master = mtd_get_master(mtd);
2215 if (!master->_lock_user_prot_reg)
2219 return master->_lock_user_prot_reg(master, from, len);
2221 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
2223 int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2225 struct mtd_info *master = mtd_get_master(mtd);
2227 if (!master->_erase_user_prot_reg)
2231 return master->_erase_user_prot_reg(master, from, len);
2233 EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
2235 /* Chip-supported device locking */
2236 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2238 struct mtd_info *master = mtd_get_master(mtd);
2242 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2247 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2248 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2249 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2252 return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
2254 EXPORT_SYMBOL_GPL(mtd_lock);
2256 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2258 struct mtd_info *master = mtd_get_master(mtd);
2260 if (!master->_unlock)
2262 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2267 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2268 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2269 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2272 return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
2274 EXPORT_SYMBOL_GPL(mtd_unlock);
2276 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2278 struct mtd_info *master = mtd_get_master(mtd);
2280 if (!master->_is_locked)
2282 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2287 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2288 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2289 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2292 return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
2294 EXPORT_SYMBOL_GPL(mtd_is_locked);
2296 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
2298 struct mtd_info *master = mtd_get_master(mtd);
2300 if (ofs < 0 || ofs >= mtd->size)
2302 if (!master->_block_isreserved)
2305 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2306 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2308 return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
2310 EXPORT_SYMBOL_GPL(mtd_block_isreserved);
2312 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
2314 struct mtd_info *master = mtd_get_master(mtd);
2316 if (ofs < 0 || ofs >= mtd->size)
2318 if (!master->_block_isbad)
2321 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2322 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2324 return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
2326 EXPORT_SYMBOL_GPL(mtd_block_isbad);
2328 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
2330 struct mtd_info *master = mtd_get_master(mtd);
2333 if (!master->_block_markbad)
2335 if (ofs < 0 || ofs >= mtd->size)
2337 if (!(mtd->flags & MTD_WRITEABLE))
2340 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2341 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2343 ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
2347 while (mtd->parent) {
2348 mtd->ecc_stats.badblocks++;
2354 EXPORT_SYMBOL_GPL(mtd_block_markbad);
2355 ALLOW_ERROR_INJECTION(mtd_block_markbad, ERRNO);
2358 * default_mtd_writev - the default writev method
2359 * @mtd: mtd device description object pointer
2360 * @vecs: the vectors to write
2361 * @count: count of vectors in @vecs
2362 * @to: the MTD device offset to write to
2363 * @retlen: on exit contains the count of bytes written to the MTD device.
2365 * This function returns zero in case of success and a negative error code in
2368 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2369 unsigned long count, loff_t to, size_t *retlen)
2372 size_t totlen = 0, thislen;
2375 for (i = 0; i < count; i++) {
2376 if (!vecs[i].iov_len)
2378 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
2381 if (ret || thislen != vecs[i].iov_len)
2383 to += vecs[i].iov_len;
2390 * mtd_writev - the vector-based MTD write method
2391 * @mtd: mtd device description object pointer
2392 * @vecs: the vectors to write
2393 * @count: count of vectors in @vecs
2394 * @to: the MTD device offset to write to
2395 * @retlen: on exit contains the count of bytes written to the MTD device.
2397 * This function returns zero in case of success and a negative error code in
2400 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2401 unsigned long count, loff_t to, size_t *retlen)
2403 struct mtd_info *master = mtd_get_master(mtd);
2406 if (!(mtd->flags & MTD_WRITEABLE))
2409 if (!master->_writev)
2410 return default_mtd_writev(mtd, vecs, count, to, retlen);
2412 return master->_writev(master, vecs, count,
2413 mtd_get_master_ofs(mtd, to), retlen);
2415 EXPORT_SYMBOL_GPL(mtd_writev);
2418 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
2419 * @mtd: mtd device description object pointer
2420 * @size: a pointer to the ideal or maximum size of the allocation, points
2421 * to the actual allocation size on success.
2423 * This routine attempts to allocate a contiguous kernel buffer up to
2424 * the specified size, backing off the size of the request exponentially
2425 * until the request succeeds or until the allocation size falls below
2426 * the system page size. This attempts to make sure it does not adversely
2427 * impact system performance, so when allocating more than one page, we
2428 * ask the memory allocator to avoid re-trying, swapping, writing back
2429 * or performing I/O.
2431 * Note, this function also makes sure that the allocated buffer is aligned to
2432 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
2434 * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
2435 * to handle smaller (i.e. degraded) buffer allocations under low- or
2436 * fragmented-memory situations where such reduced allocations, from a
2437 * requested ideal, are allowed.
2439 * Returns a pointer to the allocated buffer on success; otherwise, NULL.
2441 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
2443 gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
2444 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
2447 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
2449 while (*size > min_alloc) {
2450 kbuf = kmalloc(*size, flags);
2455 *size = ALIGN(*size, mtd->writesize);
2459 * For the last resort allocation allow 'kmalloc()' to do all sorts of
2460 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
2462 return kmalloc(*size, GFP_KERNEL);
2464 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
2466 #ifdef CONFIG_PROC_FS
2468 /*====================================================================*/
2469 /* Support for /proc/mtd */
2471 static int mtd_proc_show(struct seq_file *m, void *v)
2473 struct mtd_info *mtd;
2475 seq_puts(m, "dev: size erasesize name\n");
2476 mutex_lock(&mtd_table_mutex);
2477 mtd_for_each_device(mtd) {
2478 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
2479 mtd->index, (unsigned long long)mtd->size,
2480 mtd->erasesize, mtd->name);
2482 mutex_unlock(&mtd_table_mutex);
2485 #endif /* CONFIG_PROC_FS */
2487 /*====================================================================*/
2490 static struct backing_dev_info * __init mtd_bdi_init(const char *name)
2492 struct backing_dev_info *bdi;
2495 bdi = bdi_alloc(NUMA_NO_NODE);
2497 return ERR_PTR(-ENOMEM);
2502 * We put '-0' suffix to the name to get the same name format as we
2503 * used to get. Since this is called only once, we get a unique name.
2505 ret = bdi_register(bdi, "%.28s-0", name);
2509 return ret ? ERR_PTR(ret) : bdi;
2512 static struct proc_dir_entry *proc_mtd;
2514 static int __init init_mtd(void)
2518 ret = class_register(&mtd_class);
2522 mtd_bdi = mtd_bdi_init("mtd");
2523 if (IS_ERR(mtd_bdi)) {
2524 ret = PTR_ERR(mtd_bdi);
2528 proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
2530 ret = init_mtdchar();
2534 dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
2535 debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd,
2536 &mtd_expert_analysis_mode);
2542 remove_proc_entry("mtd", NULL);
2543 bdi_unregister(mtd_bdi);
2546 class_unregister(&mtd_class);
2548 pr_err("Error registering mtd class or bdi: %d\n", ret);
2552 static void __exit cleanup_mtd(void)
2554 debugfs_remove_recursive(dfs_dir_mtd);
2557 remove_proc_entry("mtd", NULL);
2558 class_unregister(&mtd_class);
2559 bdi_unregister(mtd_bdi);
2561 idr_destroy(&mtd_idr);
2564 module_init(init_mtd);
2565 module_exit(cleanup_mtd);
2567 MODULE_LICENSE("GPL");
2568 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
2569 MODULE_DESCRIPTION("Core MTD registration and access routines");