treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 156
[linux-block.git] / drivers / mtd / ubi / build.c
CommitLineData
1a59d1b8 1// SPDX-License-Identifier: GPL-2.0-or-later
801c135c
AB
2/*
3 * Copyright (c) International Business Machines Corp., 2006
4 * Copyright (c) Nokia Corporation, 2007
5 *
801c135c
AB
6 * Author: Artem Bityutskiy (Битюцкий Артём),
7 * Frank Haverkamp
8 */
9
10/*
9f961b57
AB
11 * This file includes UBI initialization and building of UBI devices.
12 *
13 * When UBI is initialized, it attaches all the MTD devices specified as the
14 * module load parameters or the kernel boot parameters. If MTD devices were
15 * specified, UBI does not attach any MTD device, but it is possible to do
16 * later using the "UBI control device".
801c135c
AB
17 */
18
19#include <linux/err.h>
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/stringify.h>
f9b0080e 23#include <linux/namei.h>
801c135c 24#include <linux/stat.h>
9f961b57 25#include <linux/miscdevice.h>
ba4087e9 26#include <linux/mtd/partitions.h>
7753f169 27#include <linux/log2.h>
cdfa788a 28#include <linux/kthread.h>
774b1382 29#include <linux/kernel.h>
5a0e3ad6 30#include <linux/slab.h>
f83c3838 31#include <linux/major.h>
801c135c
AB
32#include "ubi.h"
33
34/* Maximum length of the 'mtd=' parameter */
35#define MTD_PARAM_LEN_MAX 64
36
5993f9b7 37/* Maximum number of comma-separated items in the 'mtd=' parameter */
83ff59a0 38#define MTD_PARAM_MAX_COUNT 4
5993f9b7 39
d2f588f9
RG
40/* Maximum value for the number of bad PEBs per 1024 PEBs */
41#define MAX_MTD_UBI_BEB_LIMIT 768
42
af7ad7a0
MKB
43#ifdef CONFIG_MTD_UBI_MODULE
44#define ubi_is_module() 1
45#else
46#define ubi_is_module() 0
47#endif
48
801c135c
AB
49/**
50 * struct mtd_dev_param - MTD device parameter description data structure.
f9b0080e
AB
51 * @name: MTD character device node path, MTD device name, or MTD device number
52 * string
801c135c 53 * @vid_hdr_offs: VID header offset
edac493d 54 * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
801c135c 55 */
9c9ec147 56struct mtd_dev_param {
801c135c 57 char name[MTD_PARAM_LEN_MAX];
83ff59a0 58 int ubi_num;
801c135c 59 int vid_hdr_offs;
edac493d 60 int max_beb_per1024;
801c135c
AB
61};
62
63/* Numbers of elements set in the @mtd_dev_param array */
435009d4 64static int mtd_devs;
801c135c
AB
65
66/* MTD devices specification parameters */
435009d4 67static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES];
77e6c2f0
RW
68#ifdef CONFIG_MTD_UBI_FASTMAP
69/* UBI module parameter to enable fastmap automatically on non-fastmap images */
70static bool fm_autoconvert;
479c2c0c 71static bool fm_debug;
77e6c2f0 72#endif
801c135c 73
06b68ba1
AB
74/* Slab cache for wear-leveling entries */
75struct kmem_cache *ubi_wl_entry_slab;
76
9f961b57
AB
77/* UBI control character device */
78static struct miscdevice ubi_ctrl_cdev = {
79 .minor = MISC_DYNAMIC_MINOR,
80 .name = "ubi_ctrl",
81 .fops = &ubi_ctrl_cdev_operations,
82};
06b68ba1 83
e73f4459
AB
84/* All UBI devices in system */
85static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
86
cdfa788a
AB
87/* Serializes UBI devices creations and removals */
88DEFINE_MUTEX(ubi_devices_mutex);
89
e73f4459
AB
90/* Protects @ubi_devices and @ubi->ref_count */
91static DEFINE_SPINLOCK(ubi_devices_lock);
92
801c135c 93/* "Show" method for files in '/<sysfs>/class/ubi/' */
219eccda
GKH
94/* UBI version attribute ('/<sysfs>/class/ubi/version') */
95static ssize_t version_show(struct class *class, struct class_attribute *attr,
96 char *buf)
801c135c
AB
97{
98 return sprintf(buf, "%d\n", UBI_VERSION);
99}
219eccda 100static CLASS_ATTR_RO(version);
801c135c 101
219eccda
GKH
102static struct attribute *ubi_class_attrs[] = {
103 &class_attr_version.attr,
104 NULL,
53cd255c 105};
219eccda 106ATTRIBUTE_GROUPS(ubi_class);
53cd255c
TI
107
108/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
109struct class ubi_class = {
110 .name = UBI_NAME_STR,
111 .owner = THIS_MODULE,
219eccda 112 .class_groups = ubi_class_groups,
53cd255c 113};
801c135c
AB
114
115static ssize_t dev_attribute_show(struct device *dev,
116 struct device_attribute *attr, char *buf);
117
118/* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */
119static struct device_attribute dev_eraseblock_size =
120 __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL);
121static struct device_attribute dev_avail_eraseblocks =
122 __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
123static struct device_attribute dev_total_eraseblocks =
124 __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
125static struct device_attribute dev_volumes_count =
126 __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
127static struct device_attribute dev_max_ec =
128 __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
129static struct device_attribute dev_reserved_for_bad =
130 __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
131static struct device_attribute dev_bad_peb_count =
132 __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL);
133static struct device_attribute dev_max_vol_count =
134 __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL);
135static struct device_attribute dev_min_io_size =
136 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
137static struct device_attribute dev_bgt_enabled =
138 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
b6b76ba4
AB
139static struct device_attribute dev_mtd_num =
140 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
525bab71
EG
141static struct device_attribute dev_ro_mode =
142 __ATTR(ro_mode, S_IRUGO, dev_attribute_show, NULL);
801c135c 143
0e0ee1cc
DP
144/**
145 * ubi_volume_notify - send a volume change notification.
146 * @ubi: UBI device description object
147 * @vol: volume description object of the changed volume
148 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
149 *
150 * This is a helper function which notifies all subscribers about a volume
151 * change event (creation, removal, re-sizing, re-naming, updating). Returns
152 * zero in case of success and a negative error code in case of failure.
153 */
154int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
155{
84b678f4 156 int ret;
0e0ee1cc
DP
157 struct ubi_notification nt;
158
159 ubi_do_get_device_info(ubi, &nt.di);
160 ubi_do_get_volume_info(ubi, vol, &nt.vi);
77e6c2f0 161
77e6c2f0
RW
162 switch (ntype) {
163 case UBI_VOLUME_ADDED:
164 case UBI_VOLUME_REMOVED:
165 case UBI_VOLUME_RESIZED:
166 case UBI_VOLUME_RENAMED:
84b678f4
RW
167 ret = ubi_update_fastmap(ubi);
168 if (ret)
169 ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
77e6c2f0 170 }
84b678f4 171
0e0ee1cc
DP
172 return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
173}
174
175/**
176 * ubi_notify_all - send a notification to all volumes.
177 * @ubi: UBI device description object
178 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
179 * @nb: the notifier to call
180 *
181 * This function walks all volumes of UBI device @ubi and sends the @ntype
182 * notification for each volume. If @nb is %NULL, then all registered notifiers
183 * are called, otherwise only the @nb notifier is called. Returns the number of
184 * sent notifications.
185 */
186int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
187{
188 struct ubi_notification nt;
189 int i, count = 0;
190
191 ubi_do_get_device_info(ubi, &nt.di);
192
193 mutex_lock(&ubi->device_mutex);
194 for (i = 0; i < ubi->vtbl_slots; i++) {
195 /*
196 * Since the @ubi->device is locked, and we are not going to
197 * change @ubi->volumes, we do not have to lock
198 * @ubi->volumes_lock.
199 */
200 if (!ubi->volumes[i])
201 continue;
202
203 ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
204 if (nb)
205 nb->notifier_call(nb, ntype, &nt);
206 else
207 blocking_notifier_call_chain(&ubi_notifiers, ntype,
208 &nt);
209 count += 1;
210 }
211 mutex_unlock(&ubi->device_mutex);
212
213 return count;
214}
215
216/**
217 * ubi_enumerate_volumes - send "add" notification for all existing volumes.
218 * @nb: the notifier to call
219 *
220 * This function walks all UBI devices and volumes and sends the
221 * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all
222 * registered notifiers are called, otherwise only the @nb notifier is called.
223 * Returns the number of sent notifications.
224 */
225int ubi_enumerate_volumes(struct notifier_block *nb)
226{
227 int i, count = 0;
228
229 /*
230 * Since the @ubi_devices_mutex is locked, and we are not going to
231 * change @ubi_devices, we do not have to lock @ubi_devices_lock.
232 */
233 for (i = 0; i < UBI_MAX_DEVICES; i++) {
234 struct ubi_device *ubi = ubi_devices[i];
235
236 if (!ubi)
237 continue;
238 count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
239 }
240
241 return count;
242}
243
e73f4459
AB
244/**
245 * ubi_get_device - get UBI device.
246 * @ubi_num: UBI device number
247 *
248 * This function returns UBI device description object for UBI device number
249 * @ubi_num, or %NULL if the device does not exist. This function increases the
250 * device reference count to prevent removal of the device. In other words, the
251 * device cannot be removed if its reference count is not zero.
252 */
253struct ubi_device *ubi_get_device(int ubi_num)
254{
255 struct ubi_device *ubi;
256
257 spin_lock(&ubi_devices_lock);
258 ubi = ubi_devices[ubi_num];
259 if (ubi) {
260 ubi_assert(ubi->ref_count >= 0);
261 ubi->ref_count += 1;
262 get_device(&ubi->dev);
263 }
264 spin_unlock(&ubi_devices_lock);
265
266 return ubi;
267}
268
269/**
270 * ubi_put_device - drop an UBI device reference.
271 * @ubi: UBI device description object
272 */
273void ubi_put_device(struct ubi_device *ubi)
274{
275 spin_lock(&ubi_devices_lock);
276 ubi->ref_count -= 1;
277 put_device(&ubi->dev);
278 spin_unlock(&ubi_devices_lock);
279}
280
281/**
ebaaf1af 282 * ubi_get_by_major - get UBI device by character device major number.
e73f4459
AB
283 * @major: major number
284 *
285 * This function is similar to 'ubi_get_device()', but it searches the device
286 * by its major number.
287 */
288struct ubi_device *ubi_get_by_major(int major)
289{
290 int i;
291 struct ubi_device *ubi;
292
293 spin_lock(&ubi_devices_lock);
294 for (i = 0; i < UBI_MAX_DEVICES; i++) {
295 ubi = ubi_devices[i];
296 if (ubi && MAJOR(ubi->cdev.dev) == major) {
297 ubi_assert(ubi->ref_count >= 0);
298 ubi->ref_count += 1;
299 get_device(&ubi->dev);
300 spin_unlock(&ubi_devices_lock);
301 return ubi;
302 }
303 }
304 spin_unlock(&ubi_devices_lock);
305
306 return NULL;
307}
308
309/**
310 * ubi_major2num - get UBI device number by character device major number.
311 * @major: major number
312 *
313 * This function searches UBI device number object by its major number. If UBI
cdfa788a 314 * device was not found, this function returns -ENODEV, otherwise the UBI device
e73f4459
AB
315 * number is returned.
316 */
317int ubi_major2num(int major)
318{
319 int i, ubi_num = -ENODEV;
320
321 spin_lock(&ubi_devices_lock);
322 for (i = 0; i < UBI_MAX_DEVICES; i++) {
323 struct ubi_device *ubi = ubi_devices[i];
324
325 if (ubi && MAJOR(ubi->cdev.dev) == major) {
326 ubi_num = ubi->ubi_num;
327 break;
328 }
329 }
330 spin_unlock(&ubi_devices_lock);
331
332 return ubi_num;
333}
334
801c135c
AB
335/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
336static ssize_t dev_attribute_show(struct device *dev,
337 struct device_attribute *attr, char *buf)
338{
e73f4459
AB
339 ssize_t ret;
340 struct ubi_device *ubi;
801c135c 341
e73f4459
AB
342 /*
343 * The below code looks weird, but it actually makes sense. We get the
344 * UBI device reference from the contained 'struct ubi_device'. But it
345 * is unclear if the device was removed or not yet. Indeed, if the
346 * device was removed before we increased its reference count,
347 * 'ubi_get_device()' will return -ENODEV and we fail.
348 *
349 * Remember, 'struct ubi_device' is freed in the release function, so
350 * we still can use 'ubi->ubi_num'.
351 */
801c135c 352 ubi = container_of(dev, struct ubi_device, dev);
e73f4459
AB
353 ubi = ubi_get_device(ubi->ubi_num);
354 if (!ubi)
355 return -ENODEV;
356
801c135c 357 if (attr == &dev_eraseblock_size)
e73f4459 358 ret = sprintf(buf, "%d\n", ubi->leb_size);
801c135c 359 else if (attr == &dev_avail_eraseblocks)
e73f4459 360 ret = sprintf(buf, "%d\n", ubi->avail_pebs);
801c135c 361 else if (attr == &dev_total_eraseblocks)
e73f4459 362 ret = sprintf(buf, "%d\n", ubi->good_peb_count);
801c135c 363 else if (attr == &dev_volumes_count)
4b3cc340 364 ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
801c135c 365 else if (attr == &dev_max_ec)
e73f4459 366 ret = sprintf(buf, "%d\n", ubi->max_ec);
801c135c 367 else if (attr == &dev_reserved_for_bad)
e73f4459 368 ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
801c135c 369 else if (attr == &dev_bad_peb_count)
e73f4459 370 ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
801c135c 371 else if (attr == &dev_max_vol_count)
e73f4459 372 ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
801c135c 373 else if (attr == &dev_min_io_size)
e73f4459 374 ret = sprintf(buf, "%d\n", ubi->min_io_size);
801c135c 375 else if (attr == &dev_bgt_enabled)
e73f4459 376 ret = sprintf(buf, "%d\n", ubi->thread_enabled);
b6b76ba4
AB
377 else if (attr == &dev_mtd_num)
378 ret = sprintf(buf, "%d\n", ubi->mtd->index);
525bab71
EG
379 else if (attr == &dev_ro_mode)
380 ret = sprintf(buf, "%d\n", ubi->ro_mode);
801c135c 381 else
b6b76ba4 382 ret = -EINVAL;
801c135c 383
e73f4459
AB
384 ubi_put_device(ubi);
385 return ret;
801c135c
AB
386}
387
53cd255c
TI
388static struct attribute *ubi_dev_attrs[] = {
389 &dev_eraseblock_size.attr,
390 &dev_avail_eraseblocks.attr,
391 &dev_total_eraseblocks.attr,
392 &dev_volumes_count.attr,
393 &dev_max_ec.attr,
394 &dev_reserved_for_bad.attr,
395 &dev_bad_peb_count.attr,
396 &dev_max_vol_count.attr,
397 &dev_min_io_size.attr,
398 &dev_bgt_enabled.attr,
399 &dev_mtd_num.attr,
525bab71 400 &dev_ro_mode.attr,
53cd255c
TI
401 NULL
402};
403ATTRIBUTE_GROUPS(ubi_dev);
404
36b477d0
AB
405static void dev_release(struct device *dev)
406{
407 struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
408
409 kfree(ubi);
410}
801c135c 411
801c135c 412/**
0bf1c439 413 * kill_volumes - destroy all user volumes.
801c135c
AB
414 * @ubi: UBI device description object
415 */
416static void kill_volumes(struct ubi_device *ubi)
417{
418 int i;
419
420 for (i = 0; i < ubi->vtbl_slots; i++)
421 if (ubi->volumes[i])
89b96b69 422 ubi_free_volume(ubi, ubi->volumes[i]);
801c135c
AB
423}
424
425/**
426 * uif_init - initialize user interfaces for an UBI device.
427 * @ubi: UBI device description object
0bf1c439
AB
428 *
429 * This function initializes various user interfaces for an UBI device. If the
430 * initialization fails at an early stage, this function frees all the
493cfaea 431 * resources it allocated, returns an error.
801c135c
AB
432 *
433 * This function returns zero in case of success and a negative error code in
0bf1c439 434 * case of failure.
801c135c 435 */
493cfaea 436static int uif_init(struct ubi_device *ubi)
801c135c 437{
8c4c19f1 438 int i, err;
801c135c
AB
439 dev_t dev;
440
801c135c
AB
441 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
442
443 /*
444 * Major numbers for the UBI character devices are allocated
445 * dynamically. Major numbers of volume character devices are
446 * equivalent to ones of the corresponding UBI character device. Minor
447 * numbers of UBI character devices are 0, while minor numbers of
448 * volume character devices start from 1. Thus, we allocate one major
449 * number and ubi->vtbl_slots + 1 minor numbers.
450 */
451 err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
452 if (err) {
32608703 453 ubi_err(ubi, "cannot register UBI character devices");
801c135c
AB
454 return err;
455 }
456
493cfaea
LG
457 ubi->dev.devt = dev;
458
49dfc299 459 ubi_assert(MINOR(dev) == 0);
801c135c 460 cdev_init(&ubi->cdev, &ubi_cdev_operations);
c8566350 461 dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev));
801c135c
AB
462 ubi->cdev.owner = THIS_MODULE;
463
493cfaea
LG
464 dev_set_name(&ubi->dev, UBI_NAME_STR "%d", ubi->ubi_num);
465 err = cdev_device_add(&ubi->cdev, &ubi->dev);
801c135c 466 if (err)
493cfaea 467 goto out_unreg;
801c135c
AB
468
469 for (i = 0; i < ubi->vtbl_slots; i++)
470 if (ubi->volumes[i]) {
89b96b69 471 err = ubi_add_volume(ubi, ubi->volumes[i]);
01f7b309 472 if (err) {
32608703 473 ubi_err(ubi, "cannot add volume %d", i);
801c135c 474 goto out_volumes;
01f7b309 475 }
801c135c
AB
476 }
477
478 return 0;
479
480out_volumes:
481 kill_volumes(ubi);
493cfaea 482 cdev_device_del(&ubi->cdev, &ubi->dev);
801c135c 483out_unreg:
49dfc299 484 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
32608703
TB
485 ubi_err(ubi, "cannot initialize UBI %s, error %d",
486 ubi->ubi_name, err);
801c135c
AB
487 return err;
488}
489
490/**
491 * uif_close - close user interfaces for an UBI device.
492 * @ubi: UBI device description object
505d1caa
AB
493 *
494 * Note, since this function un-registers UBI volume device objects (@vol->dev),
495 * the memory allocated voe the volumes is freed as well (in the release
496 * function).
801c135c
AB
497 */
498static void uif_close(struct ubi_device *ubi)
499{
500 kill_volumes(ubi);
493cfaea 501 cdev_device_del(&ubi->cdev, &ubi->dev);
49dfc299 502 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
801c135c
AB
503}
504
505d1caa 505/**
47e1ec70 506 * ubi_free_internal_volumes - free internal volumes.
505d1caa
AB
507 * @ubi: UBI device description object
508 */
47e1ec70 509void ubi_free_internal_volumes(struct ubi_device *ubi)
505d1caa
AB
510{
511 int i;
512
513 for (i = ubi->vtbl_slots;
514 i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
799dca34 515 ubi_eba_replace_table(ubi->volumes[i], NULL);
34653fd8 516 ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
505d1caa
AB
517 kfree(ubi->volumes[i]);
518 }
519}
520
95e6fb02
RG
521static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
522{
523 int limit, device_pebs;
524 uint64_t device_size;
525
c0e860ba
JW
526 if (!max_beb_per1024) {
527 /*
528 * Since max_beb_per1024 has not been set by the user in either
529 * the cmdline or Kconfig, use mtd_max_bad_blocks to set the
530 * limit if it is supported by the device.
531 */
532 limit = mtd_max_bad_blocks(ubi->mtd, 0, ubi->mtd->size);
533 if (limit < 0)
534 return 0;
535 return limit;
536 }
95e6fb02
RG
537
538 /*
539 * Here we are using size of the entire flash chip and
540 * not just the MTD partition size because the maximum
541 * number of bad eraseblocks is a percentage of the
542 * whole device and bad eraseblocks are not fairly
543 * distributed over the flash chip. So the worst case
544 * is that all the bad eraseblocks of the chip are in
545 * the MTD partition we are attaching (ubi->mtd).
546 */
547 device_size = mtd_get_device_size(ubi->mtd);
548 device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
549 limit = mult_frac(device_pebs, max_beb_per1024, 1024);
550
551 /* Round it up */
552 if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
553 limit += 1;
554
555 return limit;
556}
557
801c135c 558/**
85c6e6e2 559 * io_init - initialize I/O sub-system for a given UBI device.
801c135c 560 * @ubi: UBI device description object
256334c3 561 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
801c135c
AB
562 *
563 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
564 * assumed:
565 * o EC header is always at offset zero - this cannot be changed;
566 * o VID header starts just after the EC header at the closest address
cdfa788a 567 * aligned to @io->hdrs_min_io_size;
801c135c 568 * o data starts just after the VID header at the closest address aligned to
cdfa788a 569 * @io->min_io_size
801c135c
AB
570 *
571 * This function returns zero in case of success and a negative error code in
572 * case of failure.
573 */
256334c3 574static int io_init(struct ubi_device *ubi, int max_beb_per1024)
801c135c 575{
719bb840
AB
576 dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
577 dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
578
801c135c
AB
579 if (ubi->mtd->numeraseregions != 0) {
580 /*
581 * Some flashes have several erase regions. Different regions
582 * may have different eraseblock size and other
583 * characteristics. It looks like mostly multi-region flashes
584 * have one "main" region and one or more small regions to
585 * store boot loader code or boot parameters or whatever. I
586 * guess we should just pick the largest region. But this is
587 * not implemented.
588 */
32608703 589 ubi_err(ubi, "multiple regions, not implemented");
801c135c
AB
590 return -EINVAL;
591 }
592
dd38fccf 593 if (ubi->vid_hdr_offset < 0)
cdfa788a
AB
594 return -EINVAL;
595
801c135c
AB
596 /*
597 * Note, in this implementation we support MTD devices with 0x7FFFFFFF
598 * physical eraseblocks maximum.
599 */
600
601 ubi->peb_size = ubi->mtd->erasesize;
69423d99 602 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
801c135c
AB
603 ubi->flash_size = ubi->mtd->size;
604
8beeb3bb 605 if (mtd_can_have_bb(ubi->mtd)) {
801c135c 606 ubi->bad_allowed = 1;
95e6fb02 607 ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
8beeb3bb 608 }
801c135c 609
ebf53f42
AB
610 if (ubi->mtd->type == MTD_NORFLASH) {
611 ubi_assert(ubi->mtd->writesize == 1);
612 ubi->nor_flash = 1;
613 }
614
801c135c
AB
615 ubi->min_io_size = ubi->mtd->writesize;
616 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
617
cadb40cc
KP
618 /*
619 * Make sure minimal I/O unit is power of 2. Note, there is no
620 * fundamental reason for this assumption. It is just an optimization
621 * which allows us to avoid costly division operations.
622 */
7753f169 623 if (!is_power_of_2(ubi->min_io_size)) {
32608703 624 ubi_err(ubi, "min. I/O unit (%d) is not power of 2",
01f7b309 625 ubi->min_io_size);
801c135c
AB
626 return -EINVAL;
627 }
628
629 ubi_assert(ubi->hdrs_min_io_size > 0);
630 ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
631 ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
632
30b542ef
AB
633 ubi->max_write_size = ubi->mtd->writebufsize;
634 /*
635 * Maximum write size has to be greater or equivalent to min. I/O
636 * size, and be multiple of min. I/O size.
637 */
638 if (ubi->max_write_size < ubi->min_io_size ||
639 ubi->max_write_size % ubi->min_io_size ||
640 !is_power_of_2(ubi->max_write_size)) {
32608703 641 ubi_err(ubi, "bad write buffer size %d for %d min. I/O unit",
30b542ef
AB
642 ubi->max_write_size, ubi->min_io_size);
643 return -EINVAL;
644 }
645
801c135c
AB
646 /* Calculate default aligned sizes of EC and VID headers */
647 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
648 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
649
719bb840
AB
650 dbg_gen("min_io_size %d", ubi->min_io_size);
651 dbg_gen("max_write_size %d", ubi->max_write_size);
652 dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
653 dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
654 dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
801c135c
AB
655
656 if (ubi->vid_hdr_offset == 0)
657 /* Default offset */
658 ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
659 ubi->ec_hdr_alsize;
660 else {
661 ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
662 ~(ubi->hdrs_min_io_size - 1);
663 ubi->vid_hdr_shift = ubi->vid_hdr_offset -
664 ubi->vid_hdr_aloffset;
665 }
666
667 /* Similar for the data offset */
e8cfe009 668 ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
dd38fccf 669 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
801c135c 670
719bb840
AB
671 dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset);
672 dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
673 dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift);
674 dbg_gen("leb_start %d", ubi->leb_start);
801c135c
AB
675
676 /* The shift must be aligned to 32-bit boundary */
677 if (ubi->vid_hdr_shift % 4) {
32608703 678 ubi_err(ubi, "unaligned VID header shift %d",
801c135c
AB
679 ubi->vid_hdr_shift);
680 return -EINVAL;
681 }
682
683 /* Check sanity */
684 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
685 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
686 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
cadb40cc 687 ubi->leb_start & (ubi->min_io_size - 1)) {
32608703 688 ubi_err(ubi, "bad VID header (%d) or data offsets (%d)",
801c135c
AB
689 ubi->vid_hdr_offset, ubi->leb_start);
690 return -EINVAL;
691 }
692
b86a2c56
AB
693 /*
694 * Set maximum amount of physical erroneous eraseblocks to be 10%.
695 * Erroneous PEB are those which have read errors.
696 */
697 ubi->max_erroneous = ubi->peb_count / 10;
698 if (ubi->max_erroneous < 16)
699 ubi->max_erroneous = 16;
719bb840 700 dbg_gen("max_erroneous %d", ubi->max_erroneous);
b86a2c56 701
801c135c
AB
702 /*
703 * It may happen that EC and VID headers are situated in one minimal
704 * I/O unit. In this case we can only accept this UBI image in
705 * read-only mode.
706 */
707 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
32608703 708 ubi_warn(ubi, "EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
801c135c
AB
709 ubi->ro_mode = 1;
710 }
711
712 ubi->leb_size = ubi->peb_size - ubi->leb_start;
713
714 if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
32608703 715 ubi_msg(ubi, "MTD device %d is write-protected, attach in read-only mode",
049333ce 716 ubi->mtd->index);
801c135c
AB
717 ubi->ro_mode = 1;
718 }
719
801c135c 720 /*
fbd0107f 721 * Note, ideally, we have to initialize @ubi->bad_peb_count here. But
801c135c
AB
722 * unfortunately, MTD does not provide this information. We should loop
723 * over all physical eraseblocks and invoke mtd->block_is_bad() for
fbd0107f
AB
724 * each physical eraseblock. So, we leave @ubi->bad_peb_count
725 * uninitialized so far.
801c135c
AB
726 */
727
728 return 0;
729}
730
4ccf8cff
AB
731/**
732 * autoresize - re-size the volume which has the "auto-resize" flag set.
733 * @ubi: UBI device description object
734 * @vol_id: ID of the volume to re-size
735 *
fbd0107f 736 * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in
4ccf8cff
AB
737 * the volume table to the largest possible size. See comments in ubi-header.h
738 * for more description of the flag. Returns zero in case of success and a
739 * negative error code in case of failure.
740 */
741static int autoresize(struct ubi_device *ubi, int vol_id)
742{
743 struct ubi_volume_desc desc;
744 struct ubi_volume *vol = ubi->volumes[vol_id];
745 int err, old_reserved_pebs = vol->reserved_pebs;
746
abb3e011 747 if (ubi->ro_mode) {
32608703 748 ubi_warn(ubi, "skip auto-resize because of R/O mode");
abb3e011
AB
749 return 0;
750 }
751
4ccf8cff
AB
752 /*
753 * Clear the auto-resize flag in the volume in-memory copy of the
505d1caa 754 * volume table, and 'ubi_resize_volume()' will propagate this change
4ccf8cff
AB
755 * to the flash.
756 */
757 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
758
759 if (ubi->avail_pebs == 0) {
760 struct ubi_vtbl_record vtbl_rec;
761
762 /*
505d1caa 763 * No available PEBs to re-size the volume, clear the flag on
4ccf8cff
AB
764 * flash and exit.
765 */
d856c13c 766 vtbl_rec = ubi->vtbl[vol_id];
4ccf8cff
AB
767 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
768 if (err)
32608703 769 ubi_err(ubi, "cannot clean auto-resize flag for volume %d",
4ccf8cff
AB
770 vol_id);
771 } else {
772 desc.vol = vol;
773 err = ubi_resize_volume(&desc,
774 old_reserved_pebs + ubi->avail_pebs);
775 if (err)
32608703
TB
776 ubi_err(ubi, "cannot auto-resize volume %d",
777 vol_id);
4ccf8cff
AB
778 }
779
780 if (err)
781 return err;
782
32608703
TB
783 ubi_msg(ubi, "volume %d (\"%s\") re-sized from %d to %d LEBs",
784 vol_id, vol->name, old_reserved_pebs, vol->reserved_pebs);
4ccf8cff
AB
785 return 0;
786}
787
801c135c 788/**
cdfa788a 789 * ubi_attach_mtd_dev - attach an MTD device.
ebaaf1af 790 * @mtd: MTD device description object
897a316c 791 * @ubi_num: number to assign to the new UBI device
801c135c 792 * @vid_hdr_offset: VID header offset
edac493d 793 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
801c135c 794 *
897a316c
AB
795 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
796 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
505d1caa 797 * which case this function finds a vacant device number and assigns it
897a316c
AB
798 * automatically. Returns the new UBI device number in case of success and a
799 * negative error code in case of failure.
cdfa788a
AB
800 *
801 * Note, the invocations of this function has to be serialized by the
802 * @ubi_devices_mutex.
801c135c 803 */
256334c3
RG
804int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
805 int vid_hdr_offset, int max_beb_per1024)
801c135c
AB
806{
807 struct ubi_device *ubi;
493cfaea 808 int i, err;
801c135c 809
d2f588f9
RG
810 if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
811 return -EINVAL;
812
813 if (!max_beb_per1024)
814 max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
815
cdfa788a
AB
816 /*
817 * Check if we already have the same MTD device attached.
818 *
819 * Note, this function assumes that UBI devices creations and deletions
820 * are serialized, so it does not take the &ubi_devices_lock.
821 */
897a316c 822 for (i = 0; i < UBI_MAX_DEVICES; i++) {
b96bf4c3 823 ubi = ubi_devices[i];
cdfa788a 824 if (ubi && mtd->index == ubi->mtd->index) {
a51b7ccf 825 pr_err("ubi: mtd%d is already attached to ubi%d\n",
801c135c 826 mtd->index, i);
897a316c 827 return -EEXIST;
801c135c 828 }
897a316c 829 }
801c135c 830
897a316c
AB
831 /*
832 * Make sure this MTD device is not emulated on top of an UBI volume
833 * already. Well, generally this recursion works fine, but there are
834 * different problems like the UBI module takes a reference to itself
835 * by attaching (and thus, opening) the emulated MTD device. This
836 * results in inability to unload the module. And in general it makes
837 * no sense to attach emulated MTD devices, so we prohibit this.
838 */
839 if (mtd->type == MTD_UBIVOLUME) {
a51b7ccf 840 pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI\n",
049333ce 841 mtd->index);
897a316c
AB
842 return -EINVAL;
843 }
844
b5094b7f
RW
845 /*
846 * Both UBI and UBIFS have been designed for SLC NAND and NOR flashes.
847 * MLC NAND is different and needs special care, otherwise UBI or UBIFS
848 * will die soon and you will lose all your data.
849 */
850 if (mtd->type == MTD_MLCNANDFLASH) {
851 pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n",
852 mtd->index);
853 return -EINVAL;
854 }
855
897a316c
AB
856 if (ubi_num == UBI_DEV_NUM_AUTO) {
857 /* Search for an empty slot in the @ubi_devices array */
858 for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
859 if (!ubi_devices[ubi_num])
860 break;
861 if (ubi_num == UBI_MAX_DEVICES) {
a51b7ccf 862 pr_err("ubi: only %d UBI devices may be created\n",
9c9ec147 863 UBI_MAX_DEVICES);
897a316c
AB
864 return -ENFILE;
865 }
866 } else {
867 if (ubi_num >= UBI_MAX_DEVICES)
868 return -EINVAL;
b96bf4c3 869
897a316c
AB
870 /* Make sure ubi_num is not busy */
871 if (ubi_devices[ubi_num]) {
a51b7ccf 872 pr_err("ubi: ubi%i already exists\n", ubi_num);
897a316c
AB
873 return -EEXIST;
874 }
b96bf4c3
AB
875 }
876
cdfa788a
AB
877 ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
878 if (!ubi)
879 return -ENOMEM;
801c135c 880
493cfaea
LG
881 device_initialize(&ubi->dev);
882 ubi->dev.release = dev_release;
883 ubi->dev.class = &ubi_class;
884 ubi->dev.groups = ubi_dev_groups;
885
cdfa788a 886 ubi->mtd = mtd;
897a316c 887 ubi->ubi_num = ubi_num;
801c135c 888 ubi->vid_hdr_offset = vid_hdr_offset;
4ccf8cff
AB
889 ubi->autoresize_vol_id = -1;
890
77e6c2f0
RW
891#ifdef CONFIG_MTD_UBI_FASTMAP
892 ubi->fm_pool.used = ubi->fm_pool.size = 0;
893 ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
894
895 /*
896 * fm_pool.max_size is 5% of the total number of PEBs but it's also
897 * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
898 */
899 ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
900 ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
212240df 901 ubi->fm_pool.max_size = max(ubi->fm_pool.max_size,
902 UBI_FM_MIN_POOL_SIZE);
77e6c2f0 903
68e3226b 904 ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
77e6c2f0 905 ubi->fm_disabled = !fm_autoconvert;
479c2c0c
RW
906 if (fm_debug)
907 ubi_enable_dbg_chk_fastmap(ubi);
77e6c2f0
RW
908
909 if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
910 <= UBI_FM_MAX_START) {
32608703 911 ubi_err(ubi, "More than %i PEBs are needed for fastmap, sorry.",
77e6c2f0
RW
912 UBI_FM_MAX_START);
913 ubi->fm_disabled = 1;
914 }
915
32608703
TB
916 ubi_msg(ubi, "default fastmap pool size: %d", ubi->fm_pool.max_size);
917 ubi_msg(ubi, "default fastmap WL pool size: %d",
918 ubi->fm_wl_pool.max_size);
77e6c2f0
RW
919#else
920 ubi->fm_disabled = 1;
921#endif
4ccf8cff
AB
922 mutex_init(&ubi->buf_mutex);
923 mutex_init(&ubi->ckvol_mutex);
f089c0b2 924 mutex_init(&ubi->device_mutex);
4ccf8cff 925 spin_lock_init(&ubi->volumes_lock);
111ab0b2
RW
926 init_rwsem(&ubi->fm_protect);
927 init_rwsem(&ubi->fm_eba_sem);
cdfa788a 928
45fc5c81 929 ubi_msg(ubi, "attaching mtd%d", mtd->index);
cdfa788a 930
256334c3 931 err = io_init(ubi, max_beb_per1024);
801c135c
AB
932 if (err)
933 goto out_free;
934
ad5942ba 935 err = -ENOMEM;
0ca39d74
AB
936 ubi->peb_buf = vmalloc(ubi->peb_size);
937 if (!ubi->peb_buf)
e88d6e10
AB
938 goto out_free;
939
77e6c2f0
RW
940#ifdef CONFIG_MTD_UBI_FASTMAP
941 ubi->fm_size = ubi_calc_fm_size(ubi);
942 ubi->fm_buf = vzalloc(ubi->fm_size);
943 if (!ubi->fm_buf)
944 goto out_free;
945#endif
dac6e208 946 err = ubi_attach(ubi, 0);
801c135c 947 if (err) {
32608703
TB
948 ubi_err(ubi, "failed to attach mtd%d, error %d",
949 mtd->index, err);
eab73772 950 goto out_free;
801c135c
AB
951 }
952
4ccf8cff
AB
953 if (ubi->autoresize_vol_id != -1) {
954 err = autoresize(ubi, ubi->autoresize_vol_id);
955 if (err)
956 goto out_detach;
957 }
958
714fb87e
IH
959 /* Make device "available" before it becomes accessible via sysfs */
960 ubi_devices[ubi_num] = ubi;
961
493cfaea 962 err = uif_init(ubi);
801c135c 963 if (err)
0bf1c439 964 goto out_detach;
801c135c 965
2a734bb8
AB
966 err = ubi_debugfs_init_dev(ubi);
967 if (err)
968 goto out_uif;
969
f170168b 970 ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name);
cdfa788a
AB
971 if (IS_ERR(ubi->bgt_thread)) {
972 err = PTR_ERR(ubi->bgt_thread);
32608703
TB
973 ubi_err(ubi, "cannot spawn \"%s\", error %d",
974 ubi->bgt_name, err);
2a734bb8 975 goto out_debugfs;
cdfa788a
AB
976 }
977
32608703
TB
978 ubi_msg(ubi, "attached mtd%d (name \"%s\", size %llu MiB)",
979 mtd->index, mtd->name, ubi->flash_size >> 20);
980 ubi_msg(ubi, "PEB size: %d bytes (%d KiB), LEB size: %d bytes",
719bb840 981 ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
32608703 982 ubi_msg(ubi, "min./max. I/O unit sizes: %d/%d, sub-page size %d",
719bb840 983 ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
32608703 984 ubi_msg(ubi, "VID header offset: %d (aligned %d), data offset: %d",
719bb840 985 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
32608703 986 ubi_msg(ubi, "good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
719bb840 987 ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
32608703 988 ubi_msg(ubi, "user volume: %d, internal volumes: %d, max. volumes count: %d",
719bb840
AB
989 ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
990 ubi->vtbl_slots);
32608703 991 ubi_msg(ubi, "max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
719bb840
AB
992 ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
993 ubi->image_seq);
32608703 994 ubi_msg(ubi, "available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
719bb840 995 ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
801c135c 996
ddbd3b61
AB
997 /*
998 * The below lock makes sure we do not race with 'ubi_thread()' which
999 * checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
1000 */
1001 spin_lock(&ubi->wl_lock);
28237e45 1002 ubi->thread_enabled = 1;
d37e6bf6 1003 wake_up_process(ubi->bgt_thread);
ddbd3b61 1004 spin_unlock(&ubi->wl_lock);
801c135c 1005
0e0ee1cc 1006 ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
897a316c 1007 return ubi_num;
801c135c 1008
2a734bb8
AB
1009out_debugfs:
1010 ubi_debugfs_exit_dev(ubi);
cdfa788a
AB
1011out_uif:
1012 uif_close(ubi);
801c135c 1013out_detach:
714fb87e 1014 ubi_devices[ubi_num] = NULL;
801c135c 1015 ubi_wl_close(ubi);
47e1ec70 1016 ubi_free_internal_volumes(ubi);
d7f0c4dc 1017 vfree(ubi->vtbl);
801c135c 1018out_free:
0ca39d74 1019 vfree(ubi->peb_buf);
77e6c2f0 1020 vfree(ubi->fm_buf);
493cfaea 1021 put_device(&ubi->dev);
801c135c
AB
1022 return err;
1023}
1024
1025/**
cdfa788a
AB
1026 * ubi_detach_mtd_dev - detach an MTD device.
1027 * @ubi_num: UBI device number to detach from
1028 * @anyway: detach MTD even if device reference count is not zero
1029 *
1030 * This function destroys an UBI device number @ubi_num and detaches the
1031 * underlying MTD device. Returns zero in case of success and %-EBUSY if the
1032 * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not
1033 * exist.
1034 *
1035 * Note, the invocations of this function has to be serialized by the
1036 * @ubi_devices_mutex.
801c135c 1037 */
cdfa788a 1038int ubi_detach_mtd_dev(int ubi_num, int anyway)
801c135c 1039{
cdfa788a
AB
1040 struct ubi_device *ubi;
1041
1042 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
1043 return -EINVAL;
1044
0e0ee1cc
DP
1045 ubi = ubi_get_device(ubi_num);
1046 if (!ubi)
cdfa788a 1047 return -EINVAL;
cdfa788a 1048
0e0ee1cc
DP
1049 spin_lock(&ubi_devices_lock);
1050 put_device(&ubi->dev);
1051 ubi->ref_count -= 1;
cdfa788a
AB
1052 if (ubi->ref_count) {
1053 if (!anyway) {
897a316c 1054 spin_unlock(&ubi_devices_lock);
cdfa788a
AB
1055 return -EBUSY;
1056 }
1057 /* This may only happen if there is a bug */
32608703 1058 ubi_err(ubi, "%s reference count %d, destroy anyway",
cdfa788a
AB
1059 ubi->ubi_name, ubi->ref_count);
1060 }
897a316c 1061 ubi_devices[ubi_num] = NULL;
cdfa788a
AB
1062 spin_unlock(&ubi_devices_lock);
1063
897a316c 1064 ubi_assert(ubi_num == ubi->ubi_num);
0e0ee1cc 1065 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
32608703 1066 ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index);
77e6c2f0
RW
1067#ifdef CONFIG_MTD_UBI_FASTMAP
1068 /* If we don't write a new fastmap at detach time we lose all
24b7a347
RW
1069 * EC updates that have been made since the last written fastmap.
1070 * In case of fastmap debugging we omit the update to simulate an
1071 * unclean shutdown. */
1072 if (!ubi_dbg_chk_fastmap(ubi))
1073 ubi_update_fastmap(ubi);
77e6c2f0 1074#endif
cdfa788a
AB
1075 /*
1076 * Before freeing anything, we have to stop the background thread to
1077 * prevent it from doing anything on this device while we are freeing.
1078 */
1079 if (ubi->bgt_thread)
1080 kthread_stop(ubi->bgt_thread);
801c135c 1081
6e7d8016
RW
1082#ifdef CONFIG_MTD_UBI_FASTMAP
1083 cancel_work_sync(&ubi->fm_work);
1084#endif
2a734bb8 1085 ubi_debugfs_exit_dev(ubi);
801c135c 1086 uif_close(ubi);
77e6c2f0 1087
801c135c 1088 ubi_wl_close(ubi);
47e1ec70 1089 ubi_free_internal_volumes(ubi);
92ad8f37 1090 vfree(ubi->vtbl);
0ca39d74 1091 vfree(ubi->peb_buf);
77e6c2f0 1092 vfree(ubi->fm_buf);
32608703 1093 ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index);
b95f83ab 1094 put_mtd_device(ubi->mtd);
36b477d0 1095 put_device(&ubi->dev);
cdfa788a 1096 return 0;
801c135c
AB
1097}
1098
cdfa788a 1099/**
f9b0080e
AB
1100 * open_mtd_by_chdev - open an MTD device by its character device node path.
1101 * @mtd_dev: MTD character device node path
1102 *
1103 * This helper function opens an MTD device by its character node device path.
1104 * Returns MTD device description object in case of success and a negative
1105 * error code in case of failure.
1106 */
1107static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
1108{
61edc3f3 1109 int err, minor;
1a498ec4 1110 struct path path;
61edc3f3 1111 struct kstat stat;
f9b0080e
AB
1112
1113 /* Probably this is an MTD character device node path */
1a498ec4 1114 err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
f9b0080e
AB
1115 if (err)
1116 return ERR_PTR(err);
1117
a528d35e 1118 err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
1a498ec4 1119 path_put(&path);
61edc3f3
RW
1120 if (err)
1121 return ERR_PTR(err);
1122
1123 /* MTD device number is defined by the major / minor numbers */
1124 if (MAJOR(stat.rdev) != MTD_CHAR_MAJOR || !S_ISCHR(stat.mode))
f9b0080e
AB
1125 return ERR_PTR(-EINVAL);
1126
61edc3f3
RW
1127 minor = MINOR(stat.rdev);
1128
f9b0080e
AB
1129 if (minor & 1)
1130 /*
1131 * Just do not think the "/dev/mtdrX" devices support is need,
1132 * so do not support them to avoid doing extra work.
1133 */
1134 return ERR_PTR(-EINVAL);
1135
1136 return get_mtd_device(NULL, minor / 2);
1137}
1138
1139/**
1140 * open_mtd_device - open MTD device by name, character device path, or number.
1141 * @mtd_dev: name, character device node path, or MTD device device number
cdfa788a 1142 *
d1f3dd6c 1143 * This function tries to open and MTD device described by @mtd_dev string,
f9b0080e
AB
1144 * which is first treated as ASCII MTD device number, and if it is not true, it
1145 * is treated as MTD device name, and if that is also not true, it is treated
1146 * as MTD character device node path. Returns MTD device description object in
1147 * case of success and a negative error code in case of failure.
cdfa788a
AB
1148 */
1149static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
1150{
1151 struct mtd_info *mtd;
d1f3dd6c
AB
1152 int mtd_num;
1153 char *endp;
cdfa788a 1154
d1f3dd6c
AB
1155 mtd_num = simple_strtoul(mtd_dev, &endp, 0);
1156 if (*endp != '\0' || mtd_dev == endp) {
cdfa788a 1157 /*
d1f3dd6c
AB
1158 * This does not look like an ASCII integer, probably this is
1159 * MTD device name.
cdfa788a 1160 */
d1f3dd6c 1161 mtd = get_mtd_device_nm(mtd_dev);
f9b0080e
AB
1162 if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV)
1163 /* Probably this is an MTD character device node path */
1164 mtd = open_mtd_by_chdev(mtd_dev);
d1f3dd6c 1165 } else
cdfa788a 1166 mtd = get_mtd_device(NULL, mtd_num);
cdfa788a
AB
1167
1168 return mtd;
1169}
1170
801c135c
AB
1171static int __init ubi_init(void)
1172{
1173 int err, i, k;
1174
1175 /* Ensure that EC and VID headers have correct size */
1176 BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
1177 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
1178
1179 if (mtd_devs > UBI_MAX_DEVICES) {
278f31a7 1180 pr_err("UBI error: too many MTD devices, maximum is %d\n",
32608703 1181 UBI_MAX_DEVICES);
801c135c
AB
1182 return -EINVAL;
1183 }
1184
9f961b57 1185 /* Create base sysfs directory and sysfs files */
53cd255c
TI
1186 err = class_register(&ubi_class);
1187 if (err < 0)
1188 return err;
9f961b57
AB
1189
1190 err = misc_register(&ubi_ctrl_cdev);
1191 if (err) {
278f31a7 1192 pr_err("UBI error: cannot register device\n");
53cd255c 1193 goto out;
9f961b57 1194 }
801c135c 1195
06b68ba1 1196 ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
c4506092
AB
1197 sizeof(struct ubi_wl_entry),
1198 0, 0, NULL);
4d525145
JL
1199 if (!ubi_wl_entry_slab) {
1200 err = -ENOMEM;
b9a06623 1201 goto out_dev_unreg;
4d525145 1202 }
06b68ba1 1203
2a734bb8
AB
1204 err = ubi_debugfs_init();
1205 if (err)
1206 goto out_slab;
1207
1208
801c135c
AB
1209 /* Attach MTD devices */
1210 for (i = 0; i < mtd_devs; i++) {
1211 struct mtd_dev_param *p = &mtd_dev_param[i];
cdfa788a 1212 struct mtd_info *mtd;
801c135c
AB
1213
1214 cond_resched();
cdfa788a
AB
1215
1216 mtd = open_mtd_device(p->name);
1217 if (IS_ERR(mtd)) {
1218 err = PTR_ERR(mtd);
278f31a7 1219 pr_err("UBI error: cannot open mtd %s, error %d\n",
32608703 1220 p->name, err);
1557b9e1
MF
1221 /* See comment below re-ubi_is_module(). */
1222 if (ubi_is_module())
1223 goto out_detach;
1224 continue;
cdfa788a
AB
1225 }
1226
1227 mutex_lock(&ubi_devices_mutex);
83ff59a0 1228 err = ubi_attach_mtd_dev(mtd, p->ubi_num,
edac493d 1229 p->vid_hdr_offs, p->max_beb_per1024);
cdfa788a
AB
1230 mutex_unlock(&ubi_devices_mutex);
1231 if (err < 0) {
278f31a7 1232 pr_err("UBI error: cannot attach mtd%d\n",
32608703 1233 mtd->index);
af7ad7a0
MKB
1234 put_mtd_device(mtd);
1235
1236 /*
1237 * Originally UBI stopped initializing on any error.
1238 * However, later on it was found out that this
1239 * behavior is not very good when UBI is compiled into
1240 * the kernel and the MTD devices to attach are passed
1241 * through the command line. Indeed, UBI failure
1242 * stopped whole boot sequence.
1243 *
1244 * To fix this, we changed the behavior for the
1245 * non-module case, but preserved the old behavior for
1246 * the module case, just for compatibility. This is a
1247 * little inconsistent, though.
1248 */
1249 if (ubi_is_module())
1250 goto out_detach;
9f961b57 1251 }
801c135c
AB
1252 }
1253
9d54c8a3
EG
1254 err = ubiblock_init();
1255 if (err) {
278f31a7 1256 pr_err("UBI error: block: cannot initialize, error %d\n", err);
9d54c8a3
EG
1257
1258 /* See comment above re-ubi_is_module(). */
1259 if (ubi_is_module())
1260 goto out_detach;
1261 }
1262
801c135c
AB
1263 return 0;
1264
1265out_detach:
1266 for (k = 0; k < i; k++)
cdfa788a
AB
1267 if (ubi_devices[k]) {
1268 mutex_lock(&ubi_devices_mutex);
1269 ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
1270 mutex_unlock(&ubi_devices_mutex);
1271 }
2a734bb8
AB
1272 ubi_debugfs_exit();
1273out_slab:
06b68ba1 1274 kmem_cache_destroy(ubi_wl_entry_slab);
9f961b57
AB
1275out_dev_unreg:
1276 misc_deregister(&ubi_ctrl_cdev);
9f961b57 1277out:
53cd255c 1278 class_unregister(&ubi_class);
278f31a7 1279 pr_err("UBI error: cannot initialize UBI, error %d\n", err);
801c135c
AB
1280 return err;
1281}
cf38aca5 1282late_initcall(ubi_init);
801c135c
AB
1283
1284static void __exit ubi_exit(void)
1285{
b96bf4c3 1286 int i;
801c135c 1287
9d54c8a3
EG
1288 ubiblock_exit();
1289
b96bf4c3 1290 for (i = 0; i < UBI_MAX_DEVICES; i++)
cdfa788a
AB
1291 if (ubi_devices[i]) {
1292 mutex_lock(&ubi_devices_mutex);
1293 ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
1294 mutex_unlock(&ubi_devices_mutex);
1295 }
2a734bb8 1296 ubi_debugfs_exit();
06b68ba1 1297 kmem_cache_destroy(ubi_wl_entry_slab);
9f961b57 1298 misc_deregister(&ubi_ctrl_cdev);
53cd255c 1299 class_unregister(&ubi_class);
801c135c
AB
1300}
1301module_exit(ubi_exit);
1302
1303/**
ebaaf1af 1304 * bytes_str_to_int - convert a number of bytes string into an integer.
801c135c
AB
1305 * @str: the string to convert
1306 *
1307 * This function returns positive resulting integer in case of success and a
1308 * negative error code in case of failure.
1309 */
435009d4 1310static int bytes_str_to_int(const char *str)
801c135c
AB
1311{
1312 char *endp;
1313 unsigned long result;
1314
1315 result = simple_strtoul(str, &endp, 0);
774b1382 1316 if (str == endp || result >= INT_MAX) {
32608703 1317 pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
801c135c
AB
1318 return -EINVAL;
1319 }
1320
1321 switch (*endp) {
1322 case 'G':
1323 result *= 1024;
7e5583fd 1324 /* fall through */
801c135c
AB
1325 case 'M':
1326 result *= 1024;
7e5583fd 1327 /* fall through */
801c135c 1328 case 'K':
801c135c 1329 result *= 1024;
aeddb877 1330 if (endp[1] == 'i' && endp[2] == 'B')
801c135c
AB
1331 endp += 2;
1332 case '\0':
1333 break;
1334 default:
32608703 1335 pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
801c135c
AB
1336 return -EINVAL;
1337 }
1338
1339 return result;
1340}
1341
1342/**
1343 * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter.
1344 * @val: the parameter value to parse
1345 * @kp: not used
1346 *
1347 * This function returns zero in case of success and a negative error code in
1348 * case of error.
1349 */
e4dca7b7 1350static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
801c135c
AB
1351{
1352 int i, len;
1353 struct mtd_dev_param *p;
1354 char buf[MTD_PARAM_LEN_MAX];
1355 char *pbuf = &buf[0];
83ff59a0 1356 char *tokens[MTD_PARAM_MAX_COUNT], *token;
801c135c 1357
77c722dd
AB
1358 if (!val)
1359 return -EINVAL;
1360
801c135c 1361 if (mtd_devs == UBI_MAX_DEVICES) {
32608703
TB
1362 pr_err("UBI error: too many parameters, max. is %d\n",
1363 UBI_MAX_DEVICES);
801c135c
AB
1364 return -EINVAL;
1365 }
1366
1367 len = strnlen(val, MTD_PARAM_LEN_MAX);
1368 if (len == MTD_PARAM_LEN_MAX) {
32608703
TB
1369 pr_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
1370 val, MTD_PARAM_LEN_MAX);
801c135c
AB
1371 return -EINVAL;
1372 }
1373
1374 if (len == 0) {
45fc5c81 1375 pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
801c135c
AB
1376 return 0;
1377 }
1378
1379 strcpy(buf, val);
1380
1381 /* Get rid of the final newline */
1382 if (buf[len - 1] == '\n')
503990eb 1383 buf[len - 1] = '\0';
801c135c 1384
5993f9b7 1385 for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
801c135c
AB
1386 tokens[i] = strsep(&pbuf, ",");
1387
1388 if (pbuf) {
32608703 1389 pr_err("UBI error: too many arguments at \"%s\"\n", val);
801c135c
AB
1390 return -EINVAL;
1391 }
1392
801c135c
AB
1393 p = &mtd_dev_param[mtd_devs];
1394 strcpy(&p->name[0], tokens[0]);
1395
83ff59a0
MF
1396 token = tokens[1];
1397 if (token) {
1398 p->vid_hdr_offs = bytes_str_to_int(token);
801c135c 1399
83ff59a0
MF
1400 if (p->vid_hdr_offs < 0)
1401 return p->vid_hdr_offs;
1402 }
801c135c 1403
83ff59a0
MF
1404 token = tokens[2];
1405 if (token) {
1406 int err = kstrtoint(token, 10, &p->max_beb_per1024);
edac493d
RG
1407
1408 if (err) {
32608703
TB
1409 pr_err("UBI error: bad value for max_beb_per1024 parameter: %s",
1410 token);
edac493d
RG
1411 return -EINVAL;
1412 }
1413 }
1414
83ff59a0
MF
1415 token = tokens[3];
1416 if (token) {
1417 int err = kstrtoint(token, 10, &p->ubi_num);
1418
1419 if (err) {
32608703
TB
1420 pr_err("UBI error: bad value for ubi_num parameter: %s",
1421 token);
83ff59a0
MF
1422 return -EINVAL;
1423 }
1424 } else
1425 p->ubi_num = UBI_DEV_NUM_AUTO;
1426
801c135c
AB
1427 mtd_devs += 1;
1428 return 0;
1429}
1430
997d30cb 1431module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 0400);
83ff59a0 1432MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024[,ubi_num]]].\n"
801c135c 1433 "Multiple \"mtd\" parameters may be specified.\n"
edac493d
RG
1434 "MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
1435 "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
1436 "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
1437 __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
83ff59a0 1438 "Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
edac493d
RG
1439 "\n"
1440 "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
1441 "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
1442 "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
83ff59a0 1443 "Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
edac493d 1444 "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
77e6c2f0
RW
1445#ifdef CONFIG_MTD_UBI_FASTMAP
1446module_param(fm_autoconvert, bool, 0644);
1447MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
479c2c0c
RW
1448module_param(fm_debug, bool, 0);
1449MODULE_PARM_DESC(fm_debug, "Set this parameter to enable fastmap debugging by default. Warning, this will make fastmap slow!");
77e6c2f0 1450#endif
801c135c
AB
1451MODULE_VERSION(__stringify(UBI_VERSION));
1452MODULE_DESCRIPTION("UBI - Unsorted Block Images");
1453MODULE_AUTHOR("Artem Bityutskiy");
1454MODULE_LICENSE("GPL");