Merge tag 'ubifs-for-linus-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / drivers / mtd / ubi / block.c
CommitLineData
50acfb2b 1// SPDX-License-Identifier: GPL-2.0-only
9d54c8a3
EG
2/*
3 * Copyright (c) 2014 Ezequiel Garcia
4 * Copyright (c) 2011 Free Electrons
5 *
6 * Driver parameter handling strongly based on drivers/mtd/ubi/build.c
7 * Copyright (c) International Business Machines Corp., 2006
8 * Copyright (c) Nokia Corporation, 2007
9 * Authors: Artem Bityutskiy, Frank Haverkamp
9d54c8a3
EG
10 */
11
12/*
13 * Read-only block devices on top of UBI volumes
14 *
15 * A simple implementation to allow a block device to be layered on top of a
16 * UBI volume. The implementation is provided by creating a static 1-to-1
17 * mapping between the block device and the UBI volume.
18 *
19 * The addressed byte is obtained from the addressed block sector, which is
20 * mapped linearly into the corresponding LEB:
21 *
22 * LEB number = addressed byte / LEB size
23 *
4d283ee2
AB
24 * This feature is compiled in the UBI core, and adds a 'block' parameter
25 * to allow early creation of block devices on top of UBI volumes. Runtime
26 * block creation/removal for UBI volumes is provided through two UBI ioctls:
8af87188 27 * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
9d54c8a3
EG
28 */
29
30#include <linux/module.h>
31#include <linux/init.h>
32#include <linux/err.h>
33#include <linux/kernel.h>
34#include <linux/list.h>
35#include <linux/mutex.h>
36#include <linux/slab.h>
9d54c8a3 37#include <linux/mtd/ubi.h>
9d54c8a3 38#include <linux/blkdev.h>
ff1f48ee 39#include <linux/blk-mq.h>
9d54c8a3 40#include <linux/hdreg.h>
ff1f48ee 41#include <linux/scatterlist.h>
2bf50d42 42#include <linux/idr.h>
9d54c8a3
EG
43#include <asm/div64.h>
44
45#include "ubi-media.h"
46#include "ubi.h"
47
48/* Maximum number of supported devices */
49#define UBIBLOCK_MAX_DEVICES 32
50
51/* Maximum length of the 'block=' parameter */
52#define UBIBLOCK_PARAM_LEN 63
53
54/* Maximum number of comma-separated items in the 'block=' parameter */
55#define UBIBLOCK_PARAM_COUNT 2
56
57struct ubiblock_param {
58 int ubi_num;
59 int vol_id;
60 char name[UBIBLOCK_PARAM_LEN+1];
61};
62
ff1f48ee 63struct ubiblock_pdu {
ff1f48ee
RW
64 struct ubi_sgl usgl;
65};
66
9d54c8a3
EG
67/* Numbers of elements set in the @ubiblock_param array */
68static int ubiblock_devs __initdata;
69
70/* MTD devices specification parameters */
71static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
72
73struct ubiblock {
74 struct ubi_volume_desc *desc;
75 int ubi_num;
76 int vol_id;
77 int refcnt;
78 int leb_size;
79
80 struct gendisk *gd;
81 struct request_queue *rq;
82
9d54c8a3 83 struct mutex dev_mutex;
9d54c8a3 84 struct list_head list;
ff1f48ee 85 struct blk_mq_tag_set tag_set;
9d54c8a3
EG
86};
87
88/* Linked list of all ubiblock instances */
89static LIST_HEAD(ubiblock_devices);
7f29ae9f
BB
90static DEFINE_IDR(ubiblock_minor_idr);
91/* Protects ubiblock_devices and ubiblock_minor_idr */
9d54c8a3
EG
92static DEFINE_MUTEX(devices_mutex);
93static int ubiblock_major;
94
95static int __init ubiblock_set_param(const char *val,
96 const struct kernel_param *kp)
97{
98 int i, ret;
99 size_t len;
100 struct ubiblock_param *param;
101 char buf[UBIBLOCK_PARAM_LEN];
102 char *pbuf = &buf[0];
103 char *tokens[UBIBLOCK_PARAM_COUNT];
104
105 if (!val)
106 return -EINVAL;
107
108 len = strnlen(val, UBIBLOCK_PARAM_LEN);
109 if (len == 0) {
32608703 110 pr_warn("UBI: block: empty 'block=' parameter - ignored\n");
9d54c8a3
EG
111 return 0;
112 }
113
114 if (len == UBIBLOCK_PARAM_LEN) {
32608703
TB
115 pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n",
116 val, UBIBLOCK_PARAM_LEN);
9d54c8a3
EG
117 return -EINVAL;
118 }
119
120 strcpy(buf, val);
121
122 /* Get rid of the final newline */
123 if (buf[len - 1] == '\n')
124 buf[len - 1] = '\0';
125
126 for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
127 tokens[i] = strsep(&pbuf, ",");
128
129 param = &ubiblock_param[ubiblock_devs];
130 if (tokens[1]) {
131 /* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
132 ret = kstrtoint(tokens[0], 10, &param->ubi_num);
133 if (ret < 0)
134 return -EINVAL;
135
136 /* Second param can be a number or a name */
137 ret = kstrtoint(tokens[1], 10, &param->vol_id);
138 if (ret < 0) {
139 param->vol_id = -1;
140 strcpy(param->name, tokens[1]);
141 }
142
143 } else {
144 /* One parameter: must be device path */
145 strcpy(param->name, tokens[0]);
146 param->ubi_num = -1;
147 param->vol_id = -1;
148 }
149
150 ubiblock_devs++;
151
152 return 0;
153}
154
9c27847d 155static const struct kernel_param_ops ubiblock_param_ops = {
9d54c8a3
EG
156 .set = ubiblock_set_param,
157};
158module_param_cb(block, &ubiblock_param_ops, NULL, 0);
159MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
160 "Multiple \"block\" parameters may be specified.\n"
161 "UBI volumes may be specified by their number, name, or path to the device node.\n"
162 "Examples\n"
163 "Using the UBI volume path:\n"
164 "ubi.block=/dev/ubi0_0\n"
165 "Using the UBI device, and the volume name:\n"
166 "ubi.block=0,rootfs\n"
167 "Using both UBI device number and UBI volume number:\n"
168 "ubi.block=0,0\n");
169
170static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
171{
172 struct ubiblock *dev;
173
174 list_for_each_entry(dev, &ubiblock_devices, list)
175 if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
176 return dev;
177 return NULL;
178}
179
91cc8fbc 180static blk_status_t ubiblock_read(struct request *req)
9d54c8a3 181{
91cc8fbc 182 struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
ff1f48ee 183 struct ubiblock *dev = req->q->queuedata;
91cc8fbc
CH
184 u64 pos = blk_rq_pos(req) << 9;
185 int to_read = blk_rq_bytes(req);
186 int bytes_left = to_read;
187 /* Get LEB:offset address to read from */
188 int offset = do_div(pos, dev->leb_size);
189 int leb = pos;
190 struct req_iterator iter;
191 struct bio_vec bvec;
192 int ret;
9d54c8a3 193
91cc8fbc 194 blk_mq_start_request(req);
9d54c8a3 195
91cc8fbc
CH
196 /*
197 * It is safe to ignore the return value of blk_rq_map_sg() because
198 * the number of sg entries is limited to UBI_MAX_SG_COUNT
199 * and ubi_read_sg() will check that limit.
200 */
201 ubi_sgl_init(&pdu->usgl);
202 blk_rq_map_sg(req->q, req, pdu->usgl.sg);
9d54c8a3
EG
203
204 while (bytes_left) {
205 /*
206 * We can only read one LEB at a time. Therefore if the read
207 * length is larger than one LEB size, we split the operation.
208 */
209 if (offset + to_read > dev->leb_size)
210 to_read = dev->leb_size - offset;
211
ff1f48ee
RW
212 ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
213 if (ret < 0)
91cc8fbc 214 break;
9d54c8a3 215
9d54c8a3
EG
216 bytes_left -= to_read;
217 to_read = bytes_left;
218 leb += 1;
219 offset = 0;
220 }
91cc8fbc
CH
221
222 rq_for_each_segment(bvec, req, iter)
223 flush_dcache_page(bvec.bv_page);
224 return errno_to_blk_status(ret);
9d54c8a3
EG
225}
226
9d54c8a3
EG
227static int ubiblock_open(struct block_device *bdev, fmode_t mode)
228{
229 struct ubiblock *dev = bdev->bd_disk->private_data;
230 int ret;
231
232 mutex_lock(&dev->dev_mutex);
233 if (dev->refcnt > 0) {
234 /*
235 * The volume is already open, just increase the reference
236 * counter.
237 */
238 goto out_done;
239 }
240
241 /*
242 * We want users to be aware they should only mount us as read-only.
243 * It's just a paranoid check, as write requests will get rejected
244 * in any case.
245 */
246 if (mode & FMODE_WRITE) {
78a8dfba 247 ret = -EROFS;
9d54c8a3
EG
248 goto out_unlock;
249 }
250
251 dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
252 if (IS_ERR(dev->desc)) {
32608703
TB
253 dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
254 dev->ubi_num, dev->vol_id);
9d54c8a3
EG
255 ret = PTR_ERR(dev->desc);
256 dev->desc = NULL;
257 goto out_unlock;
258 }
259
260out_done:
261 dev->refcnt++;
262 mutex_unlock(&dev->dev_mutex);
263 return 0;
264
265out_unlock:
266 mutex_unlock(&dev->dev_mutex);
267 return ret;
268}
269
270static void ubiblock_release(struct gendisk *gd, fmode_t mode)
271{
272 struct ubiblock *dev = gd->private_data;
273
274 mutex_lock(&dev->dev_mutex);
275 dev->refcnt--;
276 if (dev->refcnt == 0) {
277 ubi_close_volume(dev->desc);
278 dev->desc = NULL;
279 }
280 mutex_unlock(&dev->dev_mutex);
281}
282
283static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
284{
285 /* Some tools might require this information */
286 geo->heads = 1;
287 geo->cylinders = 1;
288 geo->sectors = get_capacity(bdev->bd_disk);
289 geo->start = 0;
290 return 0;
291}
292
293static const struct block_device_operations ubiblock_ops = {
294 .owner = THIS_MODULE,
295 .open = ubiblock_open,
296 .release = ubiblock_release,
297 .getgeo = ubiblock_getgeo,
298};
299
fc17b653 300static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
ff1f48ee
RW
301 const struct blk_mq_queue_data *bd)
302{
91cc8fbc 303 switch (req_op(bd->rq)) {
aebf526b 304 case REQ_OP_READ:
91cc8fbc 305 return ubiblock_read(bd->rq);
aebf526b 306 default:
fc17b653 307 return BLK_STS_IOERR;
aebf526b 308 }
ff1f48ee
RW
309}
310
d6296d39
CH
311static int ubiblock_init_request(struct blk_mq_tag_set *set,
312 struct request *req, unsigned int hctx_idx,
313 unsigned int numa_node)
ff1f48ee
RW
314{
315 struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
316
317 sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
ff1f48ee
RW
318 return 0;
319}
320
f363b089 321static const struct blk_mq_ops ubiblock_mq_ops = {
ff1f48ee
RW
322 .queue_rq = ubiblock_queue_rq,
323 .init_request = ubiblock_init_request,
ff1f48ee
RW
324};
325
e46131b9
RW
326static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
327{
328 u64 size = vi->used_bytes >> 9;
329
330 if (vi->used_bytes % 512) {
6addbe91
ML
331 if (vi->vol_type == UBI_DYNAMIC_VOLUME)
332 pr_warn("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
333 vi->used_bytes - (size << 9));
334 else
335 pr_info("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
336 vi->used_bytes - (size << 9));
e46131b9
RW
337 }
338
339 if ((sector_t)size != size)
340 return -EFBIG;
341
342 *disk_capacity = size;
343
344 return 0;
345}
346
4d283ee2 347int ubiblock_create(struct ubi_volume_info *vi)
9d54c8a3
EG
348{
349 struct ubiblock *dev;
350 struct gendisk *gd;
e46131b9 351 u64 disk_capacity;
9d54c8a3
EG
352 int ret;
353
e46131b9
RW
354 ret = calc_disk_capacity(vi, &disk_capacity);
355 if (ret) {
356 return ret;
357 }
358
9d54c8a3
EG
359 /* Check that the volume isn't already handled */
360 mutex_lock(&devices_mutex);
361 if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
7f29ae9f
BB
362 ret = -EEXIST;
363 goto out_unlock;
9d54c8a3 364 }
9d54c8a3
EG
365
366 dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
7f29ae9f
BB
367 if (!dev) {
368 ret = -ENOMEM;
369 goto out_unlock;
370 }
9d54c8a3
EG
371
372 mutex_init(&dev->dev_mutex);
373
374 dev->ubi_num = vi->ubi_num;
375 dev->vol_id = vi->vol_id;
376 dev->leb_size = vi->usable_leb_size;
377
77567b25
CH
378 dev->tag_set.ops = &ubiblock_mq_ops;
379 dev->tag_set.queue_depth = 64;
380 dev->tag_set.numa_node = NUMA_NO_NODE;
91cc8fbc 381 dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
77567b25
CH
382 dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
383 dev->tag_set.driver_data = dev;
384 dev->tag_set.nr_hw_queues = 1;
385
386 ret = blk_mq_alloc_tag_set(&dev->tag_set);
387 if (ret) {
388 dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
6c97bb34 389 goto out_free_dev;
77567b25
CH
390 }
391
392
9d54c8a3 393 /* Initialize the gendisk of this ubiblock device */
77567b25
CH
394 gd = blk_mq_alloc_disk(&dev->tag_set, dev);
395 if (IS_ERR(gd)) {
396 ret = PTR_ERR(gd);
397 goto out_free_tags;
9d54c8a3
EG
398 }
399
400 gd->fops = &ubiblock_ops;
401 gd->major = ubiblock_major;
77567b25 402 gd->minors = 1;
2bf50d42
DE
403 gd->first_minor = idr_alloc(&ubiblock_minor_idr, dev, 0, 0, GFP_KERNEL);
404 if (gd->first_minor < 0) {
405 dev_err(disk_to_dev(gd),
406 "block: dynamic minor allocation failed");
407 ret = -ENODEV;
77567b25 408 goto out_cleanup_disk;
2bf50d42 409 }
1ebe2e5f 410 gd->flags |= GENHD_FL_NO_PART;
9d54c8a3
EG
411 gd->private_data = dev;
412 sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
9d54c8a3
EG
413 set_capacity(gd, disk_capacity);
414 dev->gd = gd;
415
77567b25 416 dev->rq = gd->queue;
ff1f48ee 417 blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
9d54c8a3 418
9d54c8a3 419 list_add_tail(&dev->list, &ubiblock_devices);
9d54c8a3
EG
420
421 /* Must be the last step: anyone can call file ops from now on */
05b8773c 422 ret = device_add_disk(vi->dev, dev->gd, NULL);
ed739191 423 if (ret)
91cc8fbc 424 goto out_remove_minor;
ed739191 425
32608703
TB
426 dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
427 dev->ubi_num, dev->vol_id, vi->name);
7f29ae9f 428 mutex_unlock(&devices_mutex);
9d54c8a3
EG
429 return 0;
430
2bf50d42 431out_remove_minor:
8fcf2d01 432 list_del(&dev->list);
2bf50d42 433 idr_remove(&ubiblock_minor_idr, gd->first_minor);
77567b25 434out_cleanup_disk:
8b9ab626 435 put_disk(dev->gd);
77567b25
CH
436out_free_tags:
437 blk_mq_free_tag_set(&dev->tag_set);
9d54c8a3
EG
438out_free_dev:
439 kfree(dev);
7f29ae9f
BB
440out_unlock:
441 mutex_unlock(&devices_mutex);
9d54c8a3
EG
442
443 return ret;
444}
445
446static void ubiblock_cleanup(struct ubiblock *dev)
447{
ff1f48ee 448 /* Stop new requests to arrive */
9d54c8a3 449 del_gendisk(dev->gd);
ff1f48ee 450 /* Finally destroy the blk queue */
32608703 451 dev_info(disk_to_dev(dev->gd), "released");
8b9ab626 452 put_disk(dev->gd);
77567b25 453 blk_mq_free_tag_set(&dev->tag_set);
2bf50d42 454 idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
9d54c8a3
EG
455}
456
4d283ee2 457int ubiblock_remove(struct ubi_volume_info *vi)
9d54c8a3
EG
458{
459 struct ubiblock *dev;
7f29ae9f 460 int ret;
9d54c8a3
EG
461
462 mutex_lock(&devices_mutex);
463 dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
464 if (!dev) {
7f29ae9f
BB
465 ret = -ENODEV;
466 goto out_unlock;
9d54c8a3
EG
467 }
468
469 /* Found a device, let's lock it so we can check if it's busy */
470 mutex_lock(&dev->dev_mutex);
471 if (dev->refcnt > 0) {
7f29ae9f
BB
472 ret = -EBUSY;
473 goto out_unlock_dev;
9d54c8a3
EG
474 }
475
476 /* Remove from device list */
477 list_del(&dev->list);
9d54c8a3
EG
478 ubiblock_cleanup(dev);
479 mutex_unlock(&dev->dev_mutex);
7f29ae9f
BB
480 mutex_unlock(&devices_mutex);
481
9d54c8a3
EG
482 kfree(dev);
483 return 0;
7f29ae9f
BB
484
485out_unlock_dev:
486 mutex_unlock(&dev->dev_mutex);
487out_unlock:
488 mutex_unlock(&devices_mutex);
489 return ret;
9d54c8a3
EG
490}
491
495f2bf6 492static int ubiblock_resize(struct ubi_volume_info *vi)
9d54c8a3
EG
493{
494 struct ubiblock *dev;
e46131b9
RW
495 u64 disk_capacity;
496 int ret;
9d54c8a3
EG
497
498 /*
499 * Need to lock the device list until we stop using the device,
4d283ee2
AB
500 * otherwise the device struct might get released in
501 * 'ubiblock_remove()'.
9d54c8a3
EG
502 */
503 mutex_lock(&devices_mutex);
504 dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
505 if (!dev) {
506 mutex_unlock(&devices_mutex);
495f2bf6 507 return -ENODEV;
9d54c8a3 508 }
e46131b9
RW
509
510 ret = calc_disk_capacity(vi, &disk_capacity);
511 if (ret) {
3df77072 512 mutex_unlock(&devices_mutex);
e46131b9
RW
513 if (ret == -EFBIG) {
514 dev_warn(disk_to_dev(dev->gd),
515 "the volume is too big (%d LEBs), cannot resize",
516 vi->size);
517 }
518 return ret;
3df77072 519 }
9d54c8a3
EG
520
521 mutex_lock(&dev->dev_mutex);
06d9c290
EG
522
523 if (get_capacity(dev->gd) != disk_capacity) {
524 set_capacity(dev->gd, disk_capacity);
32608703
TB
525 dev_info(disk_to_dev(dev->gd), "resized to %lld bytes",
526 vi->used_bytes);
06d9c290 527 }
9d54c8a3
EG
528 mutex_unlock(&dev->dev_mutex);
529 mutex_unlock(&devices_mutex);
495f2bf6 530 return 0;
9d54c8a3
EG
531}
532
533static int ubiblock_notify(struct notifier_block *nb,
534 unsigned long notification_type, void *ns_ptr)
535{
536 struct ubi_notification *nt = ns_ptr;
537
538 switch (notification_type) {
539 case UBI_VOLUME_ADDED:
540 /*
4d283ee2 541 * We want to enforce explicit block device creation for
9d54c8a3
EG
542 * volumes, so when a volume is added we do nothing.
543 */
544 break;
545 case UBI_VOLUME_REMOVED:
4d283ee2 546 ubiblock_remove(&nt->vi);
9d54c8a3
EG
547 break;
548 case UBI_VOLUME_RESIZED:
549 ubiblock_resize(&nt->vi);
550 break;
06d9c290
EG
551 case UBI_VOLUME_UPDATED:
552 /*
553 * If the volume is static, a content update might mean the
554 * size (i.e. used_bytes) was also changed.
555 */
556 if (nt->vi.vol_type == UBI_STATIC_VOLUME)
557 ubiblock_resize(&nt->vi);
558 break;
9d54c8a3
EG
559 default:
560 break;
561 }
562 return NOTIFY_OK;
563}
564
565static struct notifier_block ubiblock_notifier = {
566 .notifier_call = ubiblock_notify,
567};
568
569static struct ubi_volume_desc * __init
570open_volume_desc(const char *name, int ubi_num, int vol_id)
571{
572 if (ubi_num == -1)
573 /* No ubi num, name must be a vol device path */
574 return ubi_open_volume_path(name, UBI_READONLY);
575 else if (vol_id == -1)
576 /* No vol_id, must be vol_name */
577 return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
578 else
579 return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
580}
581
1440061b 582static void __init ubiblock_create_from_param(void)
9d54c8a3 583{
1440061b 584 int i, ret = 0;
9d54c8a3
EG
585 struct ubiblock_param *p;
586 struct ubi_volume_desc *desc;
587 struct ubi_volume_info vi;
588
1440061b
DE
589 /*
590 * If there is an error creating one of the ubiblocks, continue on to
591 * create the following ubiblocks. This helps in a circumstance where
592 * the kernel command-line specifies multiple block devices and some
593 * may be broken, but we still want the working ones to come up.
594 */
9d54c8a3
EG
595 for (i = 0; i < ubiblock_devs; i++) {
596 p = &ubiblock_param[i];
597
598 desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
599 if (IS_ERR(desc)) {
1440061b 600 pr_err(
b62fc462 601 "UBI: block: can't open volume on ubi%d_%d, err=%ld\n",
1440061b
DE
602 p->ubi_num, p->vol_id, PTR_ERR(desc));
603 continue;
9d54c8a3
EG
604 }
605
606 ubi_get_volume_info(desc, &vi);
607 ubi_close_volume(desc);
608
4d283ee2 609 ret = ubiblock_create(&vi);
9d54c8a3 610 if (ret) {
1440061b 611 pr_err(
b62fc462 612 "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
1440061b
DE
613 vi.name, p->ubi_num, p->vol_id, ret);
614 continue;
9d54c8a3
EG
615 }
616 }
9d54c8a3
EG
617}
618
4d283ee2 619static void ubiblock_remove_all(void)
9d54c8a3
EG
620{
621 struct ubiblock *next;
622 struct ubiblock *dev;
623
7f29ae9f 624 mutex_lock(&devices_mutex);
9d54c8a3 625 list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
9d54c8a3
EG
626 /* The module is being forcefully removed */
627 WARN_ON(dev->desc);
628 /* Remove from device list */
629 list_del(&dev->list);
630 ubiblock_cleanup(dev);
631 kfree(dev);
632 }
7f29ae9f 633 mutex_unlock(&devices_mutex);
9d54c8a3
EG
634}
635
636int __init ubiblock_init(void)
637{
638 int ret;
639
640 ubiblock_major = register_blkdev(0, "ubiblock");
641 if (ubiblock_major < 0)
642 return ubiblock_major;
643
1440061b
DE
644 /*
645 * Attach block devices from 'block=' module param.
646 * Even if one block device in the param list fails to come up,
647 * still allow the module to load and leave any others up.
648 */
649 ubiblock_create_from_param();
9d54c8a3
EG
650
651 /*
4d283ee2
AB
652 * Block devices are only created upon user requests, so we ignore
653 * existing volumes.
9d54c8a3
EG
654 */
655 ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
656 if (ret)
657 goto err_unreg;
658 return 0;
659
660err_unreg:
661 unregister_blkdev(ubiblock_major, "ubiblock");
4d283ee2 662 ubiblock_remove_all();
9d54c8a3
EG
663 return ret;
664}
665
666void __exit ubiblock_exit(void)
667{
668 ubi_unregister_volume_notifier(&ubiblock_notifier);
4d283ee2 669 ubiblock_remove_all();
9d54c8a3
EG
670 unregister_blkdev(ubiblock_major, "ubiblock");
671}