Merge tag 'ubifs-for-linus-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 1 Mar 2023 17:06:51 +0000 (09:06 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 1 Mar 2023 17:06:51 +0000 (09:06 -0800)
Pull jffs2, ubi and ubifs updates from Richard Weinberger:
 "JFFS2:
   - Fix memory corruption in error path
   - Spelling and coding style fixes

  UBI:
   - Switch to BLK_MQ_F_BLOCKING in ubiblock
   - Wire up partent device (for sysfs)
   - Multiple UAF bugfixes
   - Fix for an infinite loop in WL error path

  UBIFS:
   - Fix for multiple memory leaks in error paths
   - Fixes for wrong space accounting
   - Minor cleanups
   - Spelling and coding style fixes"

* tag 'ubifs-for-linus-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs: (36 commits)
  ubi: block: Fix a possible use-after-free bug in ubiblock_create()
  ubifs: make kobj_type structures constant
  mtd: ubi: block: wire-up device parent
  mtd: ubi: wire-up parent MTD device
  ubi: use correct names in function kernel-doc comments
  ubi: block: set BLK_MQ_F_BLOCKING
  jffs2: Fix list_del corruption if compressors initialized failed
  jffs2: Use function instead of macro when initialize compressors
  jffs2: fix spelling mistake "neccecary"->"necessary"
  ubifs: Fix kernel-doc
  ubifs: Fix some kernel-doc comments
  UBI: Fastmap: Fix kernel-doc
  ubi: ubi_wl_put_peb: Fix infinite loop when wear-leveling work failed
  ubi: Fix UAF wear-leveling entry in eraseblk_count_seq_show()
  ubi: fastmap: Fix missed fm_anchor PEB in wear-leveling after disabling fastmap
  ubifs: ubifs_releasepage: Remove ubifs_assert(0) to valid this process
  ubifs: ubifs_writepage: Mark page dirty after writing inode failed
  ubifs: dirty_cow_znode: Fix memleak in error handling path
  ubifs: Re-statistic cleaned znode count if commit failed
  ubi: Fix permission display of the debugfs files
  ...

24 files changed:
drivers/mtd/ubi/block.c
drivers/mtd/ubi/build.c
drivers/mtd/ubi/debug.c
drivers/mtd/ubi/eba.c
drivers/mtd/ubi/fastmap-wl.c
drivers/mtd/ubi/fastmap.c
drivers/mtd/ubi/kapi.c
drivers/mtd/ubi/misc.c
drivers/mtd/ubi/vmt.c
drivers/mtd/ubi/wl.c
fs/jffs2/compr.c
fs/jffs2/compr.h
fs/jffs2/file.c
fs/jffs2/fs.c
fs/ubifs/budget.c
fs/ubifs/dir.c
fs/ubifs/file.c
fs/ubifs/io.c
fs/ubifs/journal.c
fs/ubifs/super.c
fs/ubifs/sysfs.c
fs/ubifs/tnc.c
fs/ubifs/ubifs.h
include/linux/mtd/ubi.h

index 75eaecc8639f00b32ad925167a41410792ad3a89..1de87062c67b9b54ca6664abeadefaf4971dbafb 100644 (file)
@@ -35,7 +35,6 @@
 #include <linux/mutex.h>
 #include <linux/slab.h>
 #include <linux/mtd/ubi.h>
-#include <linux/workqueue.h>
 #include <linux/blkdev.h>
 #include <linux/blk-mq.h>
 #include <linux/hdreg.h>
@@ -62,7 +61,6 @@ struct ubiblock_param {
 };
 
 struct ubiblock_pdu {
-       struct work_struct work;
        struct ubi_sgl usgl;
 };
 
@@ -82,8 +80,6 @@ struct ubiblock {
        struct gendisk *gd;
        struct request_queue *rq;
 
-       struct workqueue_struct *wq;
-
        struct mutex dev_mutex;
        struct list_head list;
        struct blk_mq_tag_set tag_set;
@@ -181,20 +177,29 @@ static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
        return NULL;
 }
 
-static int ubiblock_read(struct ubiblock_pdu *pdu)
+static blk_status_t ubiblock_read(struct request *req)
 {
-       int ret, leb, offset, bytes_left, to_read;
-       u64 pos;
-       struct request *req = blk_mq_rq_from_pdu(pdu);
+       struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
        struct ubiblock *dev = req->q->queuedata;
+       u64 pos = blk_rq_pos(req) << 9;
+       int to_read = blk_rq_bytes(req);
+       int bytes_left = to_read;
+       /* Get LEB:offset address to read from */
+       int offset = do_div(pos, dev->leb_size);
+       int leb = pos;
+       struct req_iterator iter;
+       struct bio_vec bvec;
+       int ret;
 
-       to_read = blk_rq_bytes(req);
-       pos = blk_rq_pos(req) << 9;
+       blk_mq_start_request(req);
 
-       /* Get LEB:offset address to read from */
-       offset = do_div(pos, dev->leb_size);
-       leb = pos;
-       bytes_left = to_read;
+       /*
+        * It is safe to ignore the return value of blk_rq_map_sg() because
+        * the number of sg entries is limited to UBI_MAX_SG_COUNT
+        * and ubi_read_sg() will check that limit.
+        */
+       ubi_sgl_init(&pdu->usgl);
+       blk_rq_map_sg(req->q, req, pdu->usgl.sg);
 
        while (bytes_left) {
                /*
@@ -206,14 +211,17 @@ static int ubiblock_read(struct ubiblock_pdu *pdu)
 
                ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
                if (ret < 0)
-                       return ret;
+                       break;
 
                bytes_left -= to_read;
                to_read = bytes_left;
                leb += 1;
                offset = 0;
        }
-       return 0;
+
+       rq_for_each_segment(bvec, req, iter)
+               flush_dcache_page(bvec.bv_page);
+       return errno_to_blk_status(ret);
 }
 
 static int ubiblock_open(struct block_device *bdev, fmode_t mode)
@@ -289,47 +297,15 @@ static const struct block_device_operations ubiblock_ops = {
        .getgeo = ubiblock_getgeo,
 };
 
-static void ubiblock_do_work(struct work_struct *work)
-{
-       int ret;
-       struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
-       struct request *req = blk_mq_rq_from_pdu(pdu);
-       struct req_iterator iter;
-       struct bio_vec bvec;
-
-       blk_mq_start_request(req);
-
-       /*
-        * It is safe to ignore the return value of blk_rq_map_sg() because
-        * the number of sg entries is limited to UBI_MAX_SG_COUNT
-        * and ubi_read_sg() will check that limit.
-        */
-       blk_rq_map_sg(req->q, req, pdu->usgl.sg);
-
-       ret = ubiblock_read(pdu);
-
-       rq_for_each_segment(bvec, req, iter)
-               flush_dcache_page(bvec.bv_page);
-
-       blk_mq_end_request(req, errno_to_blk_status(ret));
-}
-
 static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
                             const struct blk_mq_queue_data *bd)
 {
-       struct request *req = bd->rq;
-       struct ubiblock *dev = hctx->queue->queuedata;
-       struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
-
-       switch (req_op(req)) {
+       switch (req_op(bd->rq)) {
        case REQ_OP_READ:
-               ubi_sgl_init(&pdu->usgl);
-               queue_work(dev->wq, &pdu->work);
-               return BLK_STS_OK;
+               return ubiblock_read(bd->rq);
        default:
                return BLK_STS_IOERR;
        }
-
 }
 
 static int ubiblock_init_request(struct blk_mq_tag_set *set,
@@ -339,8 +315,6 @@ static int ubiblock_init_request(struct blk_mq_tag_set *set,
        struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
 
        sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
-       INIT_WORK(&pdu->work, ubiblock_do_work);
-
        return 0;
 }
 
@@ -354,9 +328,12 @@ static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
        u64 size = vi->used_bytes >> 9;
 
        if (vi->used_bytes % 512) {
-               pr_warn("UBI: block: volume size is not a multiple of 512, "
-                       "last %llu bytes are ignored!\n",
-                       vi->used_bytes - (size << 9));
+               if (vi->vol_type == UBI_DYNAMIC_VOLUME)
+                       pr_warn("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
+                               vi->used_bytes - (size << 9));
+               else
+                       pr_info("UBI: block: volume size is not a multiple of 512, last %llu bytes are ignored!\n",
+                               vi->used_bytes - (size << 9));
        }
 
        if ((sector_t)size != size)
@@ -401,7 +378,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
        dev->tag_set.ops = &ubiblock_mq_ops;
        dev->tag_set.queue_depth = 64;
        dev->tag_set.numa_node = NUMA_NO_NODE;
-       dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+       dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
        dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
        dev->tag_set.driver_data = dev;
        dev->tag_set.nr_hw_queues = 1;
@@ -439,32 +416,20 @@ int ubiblock_create(struct ubi_volume_info *vi)
        dev->rq = gd->queue;
        blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
 
-       /*
-        * Create one workqueue per volume (per registered block device).
-        * Remember workqueues are cheap, they're not threads.
-        */
-       dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
-       if (!dev->wq) {
-               ret = -ENOMEM;
-               goto out_remove_minor;
-       }
-
        list_add_tail(&dev->list, &ubiblock_devices);
 
        /* Must be the last step: anyone can call file ops from now on */
-       ret = add_disk(dev->gd);
+       ret = device_add_disk(vi->dev, dev->gd, NULL);
        if (ret)
-               goto out_destroy_wq;
+               goto out_remove_minor;
 
        dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
                 dev->ubi_num, dev->vol_id, vi->name);
        mutex_unlock(&devices_mutex);
        return 0;
 
-out_destroy_wq:
-       list_del(&dev->list);
-       destroy_workqueue(dev->wq);
 out_remove_minor:
+       list_del(&dev->list);
        idr_remove(&ubiblock_minor_idr, gd->first_minor);
 out_cleanup_disk:
        put_disk(dev->gd);
@@ -482,8 +447,6 @@ static void ubiblock_cleanup(struct ubiblock *dev)
 {
        /* Stop new requests to arrive */
        del_gendisk(dev->gd);
-       /* Flush pending work */
-       destroy_workqueue(dev->wq);
        /* Finally destroy the blk queue */
        dev_info(disk_to_dev(dev->gd), "released");
        put_disk(dev->gd);
index a901f8edfa41d4c1228df067eee00af1fe3f3fbc..0904eb40c95fa133c1cdc4454beb6d4d3d90fc6a 100644 (file)
@@ -35,7 +35,7 @@
 #define MTD_PARAM_LEN_MAX 64
 
 /* Maximum number of comma-separated items in the 'mtd=' parameter */
-#define MTD_PARAM_MAX_COUNT 4
+#define MTD_PARAM_MAX_COUNT 5
 
 /* Maximum value for the number of bad PEBs per 1024 PEBs */
 #define MAX_MTD_UBI_BEB_LIMIT 768
  * @ubi_num: UBI number
  * @vid_hdr_offs: VID header offset
  * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
+ * @enable_fm: enable fastmap when value is non-zero
  */
 struct mtd_dev_param {
        char name[MTD_PARAM_LEN_MAX];
        int ubi_num;
        int vid_hdr_offs;
        int max_beb_per1024;
+       int enable_fm;
 };
 
 /* Numbers of elements set in the @mtd_dev_param array */
@@ -468,6 +470,7 @@ static int uif_init(struct ubi_device *ubi)
                        err = ubi_add_volume(ubi, ubi->volumes[i]);
                        if (err) {
                                ubi_err(ubi, "cannot add volume %d", i);
+                               ubi->volumes[i] = NULL;
                                goto out_volumes;
                        }
                }
@@ -663,6 +666,12 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
        ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
        ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
 
+       if (ubi->vid_hdr_offset && ((ubi->vid_hdr_offset + UBI_VID_HDR_SIZE) >
+           ubi->vid_hdr_alsize)) {
+               ubi_err(ubi, "VID header offset %d too large.", ubi->vid_hdr_offset);
+               return -EINVAL;
+       }
+
        dbg_gen("min_io_size      %d", ubi->min_io_size);
        dbg_gen("max_write_size   %d", ubi->max_write_size);
        dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
@@ -906,6 +915,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
        ubi->dev.release = dev_release;
        ubi->dev.class = &ubi_class;
        ubi->dev.groups = ubi_dev_groups;
+       ubi->dev.parent = &mtd->dev;
 
        ubi->mtd = mtd;
        ubi->ubi_num = ubi_num;
@@ -1248,7 +1258,7 @@ static int __init ubi_init(void)
                mutex_lock(&ubi_devices_mutex);
                err = ubi_attach_mtd_dev(mtd, p->ubi_num,
                                         p->vid_hdr_offs, p->max_beb_per1024,
-                                        false);
+                                        p->enable_fm == 0 ? true : false);
                mutex_unlock(&ubi_devices_mutex);
                if (err < 0) {
                        pr_err("UBI error: cannot attach mtd%d\n",
@@ -1427,7 +1437,7 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
                int err = kstrtoint(token, 10, &p->max_beb_per1024);
 
                if (err) {
-                       pr_err("UBI error: bad value for max_beb_per1024 parameter: %s",
+                       pr_err("UBI error: bad value for max_beb_per1024 parameter: %s\n",
                               token);
                        return -EINVAL;
                }
@@ -1438,13 +1448,25 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
                int err = kstrtoint(token, 10, &p->ubi_num);
 
                if (err) {
-                       pr_err("UBI error: bad value for ubi_num parameter: %s",
+                       pr_err("UBI error: bad value for ubi_num parameter: %s\n",
                               token);
                        return -EINVAL;
                }
        } else
                p->ubi_num = UBI_DEV_NUM_AUTO;
 
+       token = tokens[4];
+       if (token) {
+               int err = kstrtoint(token, 10, &p->enable_fm);
+
+               if (err) {
+                       pr_err("UBI error: bad value for enable_fm parameter: %s\n",
+                               token);
+                       return -EINVAL;
+               }
+       } else
+               p->enable_fm = 0;
+
        mtd_devs += 1;
        return 0;
 }
@@ -1457,11 +1479,13 @@ MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|pa
                      "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
                      __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
                      "Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
+                     "Optional \"enable_fm\" parameter determines whether to enable fastmap during attach. If the value is non-zero, fastmap is enabled. Default value is 0.\n"
                      "\n"
                      "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
                      "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
                      "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
                      "Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
+                     "example 5: mtd=1,0,0,5 mtd=2,0,0,6,1 - attach MTD device /dev/mtd1 to UBI 5 and disable fastmap; attach MTD device /dev/mtd2 to UBI 6 and enable fastmap.(only works when fastmap is enabled and fm_autoconvert=Y).\n"
                      "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
 #ifdef CONFIG_MTD_UBI_FASTMAP
 module_param(fm_autoconvert, bool, 0644);
index fcca6942dbdd0d7324c579c1d11e666ba1a06e26..27168f511d6d4602d857cad9f25b3b97417d2ba3 100644 (file)
@@ -504,6 +504,7 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
 {
        unsigned long ubi_num = ubi->ubi_num;
        struct ubi_debug_info *d = &ubi->dbg;
+       umode_t mode = S_IRUSR | S_IWUSR;
        int n;
 
        if (!IS_ENABLED(CONFIG_DEBUG_FS))
@@ -518,41 +519,41 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
 
        d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir);
 
-       d->dfs_chk_gen = debugfs_create_file("chk_gen", S_IWUSR, d->dfs_dir,
+       d->dfs_chk_gen = debugfs_create_file("chk_gen", mode, d->dfs_dir,
                                             (void *)ubi_num, &dfs_fops);
 
-       d->dfs_chk_io = debugfs_create_file("chk_io", S_IWUSR, d->dfs_dir,
+       d->dfs_chk_io = debugfs_create_file("chk_io", mode, d->dfs_dir,
                                            (void *)ubi_num, &dfs_fops);
 
-       d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", S_IWUSR,
+       d->dfs_chk_fastmap = debugfs_create_file("chk_fastmap", mode,
                                                 d->dfs_dir, (void *)ubi_num,
                                                 &dfs_fops);
 
-       d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", S_IWUSR,
+       d->dfs_disable_bgt = debugfs_create_file("tst_disable_bgt", mode,
                                                 d->dfs_dir, (void *)ubi_num,
                                                 &dfs_fops);
 
        d->dfs_emulate_bitflips = debugfs_create_file("tst_emulate_bitflips",
-                                                     S_IWUSR, d->dfs_dir,
+                                                     mode, d->dfs_dir,
                                                      (void *)ubi_num,
                                                      &dfs_fops);
 
        d->dfs_emulate_io_failures = debugfs_create_file("tst_emulate_io_failures",
-                                                        S_IWUSR, d->dfs_dir,
+                                                        mode, d->dfs_dir,
                                                         (void *)ubi_num,
                                                         &dfs_fops);
 
        d->dfs_emulate_power_cut = debugfs_create_file("tst_emulate_power_cut",
-                                                      S_IWUSR, d->dfs_dir,
+                                                      mode, d->dfs_dir,
                                                       (void *)ubi_num,
                                                       &dfs_fops);
 
        d->dfs_power_cut_min = debugfs_create_file("tst_emulate_power_cut_min",
-                                                  S_IWUSR, d->dfs_dir,
+                                                  mode, d->dfs_dir,
                                                   (void *)ubi_num, &dfs_fops);
 
        d->dfs_power_cut_max = debugfs_create_file("tst_emulate_power_cut_max",
-                                                  S_IWUSR, d->dfs_dir,
+                                                  mode, d->dfs_dir,
                                                   (void *)ubi_num, &dfs_fops);
 
        debugfs_create_file("detailed_erase_block_info", S_IRUSR, d->dfs_dir,
index 09c408c45a62186a67cc1efe4bfcd66b258dc49d..403b79d6efd5ad56868e35f7e10e5903ec7fb7b5 100644 (file)
@@ -61,7 +61,7 @@ struct ubi_eba_table {
 };
 
 /**
- * next_sqnum - get next sequence number.
+ * ubi_next_sqnum - get next sequence number.
  * @ubi: UBI device description object
  *
  * This function returns next sequence number to use, which is just the current
index 0ee452275578d9e5b968a41b41e9cbbc66be794d..863f571f1adb545a4991c0d22efc9889968d22f3 100644 (file)
@@ -146,13 +146,15 @@ void ubi_refill_pools(struct ubi_device *ubi)
        if (ubi->fm_anchor) {
                wl_tree_add(ubi->fm_anchor, &ubi->free);
                ubi->free_count++;
+               ubi->fm_anchor = NULL;
        }
 
-       /*
-        * All available PEBs are in ubi->free, now is the time to get
-        * the best anchor PEBs.
-        */
-       ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
+       if (!ubi->fm_disabled)
+               /*
+                * All available PEBs are in ubi->free, now is the time to get
+                * the best anchor PEBs.
+                */
+               ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
 
        for (;;) {
                enough = 0;
index ca2d9efe62c3c75063b41ecacb6cbbd6f107146d..28c8151a0725d5b5c9abbc31bdab3e48fad3253b 100644 (file)
@@ -93,7 +93,7 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi)
 
 
 /**
- * new_fm_vhdr - allocate a new volume header for fastmap usage.
+ * new_fm_vbuf() - allocate a new volume header for fastmap usage.
  * @ubi: UBI device description object
  * @vol_id: the VID of the new header
  *
index 0fce99ff29b583639723676b24341231f731952a..5db653eacbd451ba6efc08e4ce96c192159fef44 100644 (file)
@@ -79,6 +79,7 @@ void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
        vi->name_len = vol->name_len;
        vi->name = vol->name;
        vi->cdev = vol->cdev.dev;
+       vi->dev = &vol->dev;
 }
 
 /**
index 7b30c8ee3e82d7bfb5005857175736f485b373b4..1794d66b6eb7232969cd955cbb7927764554563f 100644 (file)
@@ -10,7 +10,7 @@
 #include "ubi.h"
 
 /**
- * calc_data_len - calculate how much real data is stored in a buffer.
+ * ubi_calc_data_len - calculate how much real data is stored in a buffer.
  * @ubi: UBI device description object
  * @buf: a buffer with the contents of the physical eraseblock
  * @length: the buffer length
index 8fcc0bdf06358d8f3c210e56ee43e97c9ee39821..2c867d16f89f7de2942f0db6db9b194bd92ee945 100644 (file)
@@ -464,7 +464,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
                for (i = 0; i < -pebs; i++) {
                        err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
                        if (err)
-                               goto out_acc;
+                               goto out_free;
                }
                spin_lock(&ubi->volumes_lock);
                ubi->rsvd_pebs += pebs;
@@ -512,8 +512,10 @@ out_acc:
                ubi->avail_pebs += pebs;
                spin_unlock(&ubi->volumes_lock);
        }
+       return err;
+
 out_free:
-       kfree(new_eba_tbl);
+       ubi_eba_destroy_table(new_eba_tbl);
        return err;
 }
 
@@ -580,6 +582,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
        if (err) {
                ubi_err(ubi, "cannot add character device for volume %d, error %d",
                        vol_id, err);
+               vol_release(&vol->dev);
                return err;
        }
 
@@ -590,15 +593,14 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
        vol->dev.groups = volume_dev_groups;
        dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
        err = device_register(&vol->dev);
-       if (err)
-               goto out_cdev;
+       if (err) {
+               cdev_del(&vol->cdev);
+               put_device(&vol->dev);
+               return err;
+       }
 
        self_check_volumes(ubi);
        return err;
-
-out_cdev:
-       cdev_del(&vol->cdev);
-       return err;
 }
 
 /**
index 68eb0f21b3fe2150d5a89542daf31679eed7c331..40f39e5d6dfcc068519598452bd8787c7b143c39 100644 (file)
@@ -165,7 +165,7 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
 }
 
 /**
- * wl_tree_destroy - destroy a wear-leveling entry.
+ * wl_entry_destroy - destroy a wear-leveling entry.
  * @ubi: UBI device description object
  * @e: the wear-leveling entry to add
  *
@@ -890,8 +890,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
 
        err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
        if (err) {
-               if (e2)
+               if (e2) {
+                       spin_lock(&ubi->wl_lock);
                        wl_entry_destroy(ubi, e2);
+                       spin_unlock(&ubi->wl_lock);
+               }
                goto out_ro;
        }
 
@@ -973,11 +976,11 @@ out_error:
        spin_lock(&ubi->wl_lock);
        ubi->move_from = ubi->move_to = NULL;
        ubi->move_to_put = ubi->wl_scheduled = 0;
+       wl_entry_destroy(ubi, e1);
+       wl_entry_destroy(ubi, e2);
        spin_unlock(&ubi->wl_lock);
 
        ubi_free_vid_buf(vidb);
-       wl_entry_destroy(ubi, e1);
-       wl_entry_destroy(ubi, e2);
 
 out_ro:
        ubi_ro_mode(ubi);
@@ -1130,14 +1133,18 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
                /* Re-schedule the LEB for erasure */
                err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
                if (err1) {
+                       spin_lock(&ubi->wl_lock);
                        wl_entry_destroy(ubi, e);
+                       spin_unlock(&ubi->wl_lock);
                        err = err1;
                        goto out_ro;
                }
                return err;
        }
 
+       spin_lock(&ubi->wl_lock);
        wl_entry_destroy(ubi, e);
+       spin_unlock(&ubi->wl_lock);
        if (err != -EIO)
                /*
                 * If this is not %-EIO, we have no idea what to do. Scheduling
@@ -1253,6 +1260,18 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
 retry:
        spin_lock(&ubi->wl_lock);
        e = ubi->lookuptbl[pnum];
+       if (!e) {
+               /*
+                * This wl entry has been removed for some errors by other
+                * process (eg. wear leveling worker), corresponding process
+                * (except __erase_worker, which cannot concurrent with
+                * ubi_wl_put_peb) will set ubi ro_mode at the same time,
+                * just ignore this wl entry.
+                */
+               spin_unlock(&ubi->wl_lock);
+               up_read(&ubi->fm_protect);
+               return 0;
+       }
        if (e == ubi->move_from) {
                /*
                 * User is putting the physical eraseblock which was selected to
index 4849a4c9a0e24f1b8b4202630d68e491eac113dc..764f19dec3f0f04fc3aa5b444f6f6659136c03a4 100644 (file)
@@ -364,20 +364,25 @@ void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig)
 
 int __init jffs2_compressors_init(void)
 {
+       int ret = 0;
 /* Registering compressors */
-#ifdef CONFIG_JFFS2_ZLIB
-       jffs2_zlib_init();
-#endif
-#ifdef CONFIG_JFFS2_RTIME
-       jffs2_rtime_init();
-#endif
-#ifdef CONFIG_JFFS2_RUBIN
-       jffs2_rubinmips_init();
-       jffs2_dynrubin_init();
-#endif
-#ifdef CONFIG_JFFS2_LZO
-       jffs2_lzo_init();
-#endif
+       ret = jffs2_zlib_init();
+       if (ret)
+               goto exit;
+       ret = jffs2_rtime_init();
+       if (ret)
+               goto exit_zlib;
+       ret = jffs2_rubinmips_init();
+       if (ret)
+               goto exit_rtime;
+       ret = jffs2_dynrubin_init();
+       if (ret)
+               goto exit_runinmips;
+       ret = jffs2_lzo_init();
+       if (ret)
+               goto exit_dynrubin;
+
+
 /* Setting default compression mode */
 #ifdef CONFIG_JFFS2_CMODE_NONE
        jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
@@ -396,23 +401,26 @@ int __init jffs2_compressors_init(void)
 #endif
 #endif
        return 0;
+
+exit_dynrubin:
+       jffs2_dynrubin_exit();
+exit_runinmips:
+       jffs2_rubinmips_exit();
+exit_rtime:
+       jffs2_rtime_exit();
+exit_zlib:
+       jffs2_zlib_exit();
+exit:
+       return ret;
 }
 
 int jffs2_compressors_exit(void)
 {
 /* Unregistering compressors */
-#ifdef CONFIG_JFFS2_LZO
        jffs2_lzo_exit();
-#endif
-#ifdef CONFIG_JFFS2_RUBIN
        jffs2_dynrubin_exit();
        jffs2_rubinmips_exit();
-#endif
-#ifdef CONFIG_JFFS2_RTIME
        jffs2_rtime_exit();
-#endif
-#ifdef CONFIG_JFFS2_ZLIB
        jffs2_zlib_exit();
-#endif
        return 0;
 }
index 5e91d578f4ed858c2ac575f375619000c5c6814c..3716b6b7924c05333cd6f0b63fd7175eaa41b9d2 100644 (file)
@@ -88,18 +88,32 @@ int jffs2_rubinmips_init(void);
 void jffs2_rubinmips_exit(void);
 int jffs2_dynrubin_init(void);
 void jffs2_dynrubin_exit(void);
+#else
+static inline int jffs2_rubinmips_init(void) { return 0; }
+static inline void jffs2_rubinmips_exit(void) {}
+static inline int jffs2_dynrubin_init(void) { return 0; }
+static inline void jffs2_dynrubin_exit(void) {}
 #endif
 #ifdef CONFIG_JFFS2_RTIME
-int jffs2_rtime_init(void);
-void jffs2_rtime_exit(void);
+extern int jffs2_rtime_init(void);
+extern void jffs2_rtime_exit(void);
+#else
+static inline int jffs2_rtime_init(void) { return 0; }
+static inline void jffs2_rtime_exit(void) {}
 #endif
 #ifdef CONFIG_JFFS2_ZLIB
-int jffs2_zlib_init(void);
-void jffs2_zlib_exit(void);
+extern int jffs2_zlib_init(void);
+extern void jffs2_zlib_exit(void);
+#else
+static inline int jffs2_zlib_init(void) { return 0; }
+static inline void jffs2_zlib_exit(void) {}
 #endif
 #ifdef CONFIG_JFFS2_LZO
-int jffs2_lzo_init(void);
-void jffs2_lzo_exit(void);
+extern int jffs2_lzo_init(void);
+extern void jffs2_lzo_exit(void);
+#else
+static inline int jffs2_lzo_init(void) { return 0; }
+static inline void jffs2_lzo_exit(void) {}
 #endif
 
 #endif /* __JFFS2_COMPR_H__ */
index 3cf71befa47546c8c9184e1b675dcee85b570f26..96b0275ce95747f3377dcd6ac0240735048ab70b 100644 (file)
@@ -137,19 +137,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
        struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
        struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
        pgoff_t index = pos >> PAGE_SHIFT;
-       uint32_t pageofs = index << PAGE_SHIFT;
        int ret = 0;
 
        jffs2_dbg(1, "%s()\n", __func__);
 
-       if (pageofs > inode->i_size) {
-               /* Make new hole frag from old EOF to new page */
+       if (pos > inode->i_size) {
+               /* Make new hole frag from old EOF to new position */
                struct jffs2_raw_inode ri;
                struct jffs2_full_dnode *fn;
                uint32_t alloc_len;
 
-               jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
-                         (unsigned int)inode->i_size, pageofs);
+               jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new position\n",
+                         (unsigned int)inode->i_size, (uint32_t)pos);
 
                ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
                                          ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
@@ -169,10 +168,10 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
                ri.mode = cpu_to_jemode(inode->i_mode);
                ri.uid = cpu_to_je16(i_uid_read(inode));
                ri.gid = cpu_to_je16(i_gid_read(inode));
-               ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs));
+               ri.isize = cpu_to_je32((uint32_t)pos);
                ri.atime = ri.ctime = ri.mtime = cpu_to_je32(JFFS2_NOW());
                ri.offset = cpu_to_je32(inode->i_size);
-               ri.dsize = cpu_to_je32(pageofs - inode->i_size);
+               ri.dsize = cpu_to_je32((uint32_t)pos - inode->i_size);
                ri.csize = cpu_to_je32(0);
                ri.compr = JFFS2_COMPR_ZERO;
                ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
@@ -202,7 +201,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
                        goto out_err;
                }
                jffs2_complete_reservation(c);
-               inode->i_size = pageofs;
+               inode->i_size = pos;
                mutex_unlock(&f->sem);
        }
 
index 09174898efd008d1b9a2939ee48a3a736931aeff..038516bee1abaac6b8acb0ff8652cc12f474e848 100644 (file)
@@ -403,7 +403,7 @@ int jffs2_do_remount_fs(struct super_block *sb, struct fs_context *fc)
        /* We stop if it was running, then restart if it needs to.
           This also catches the case where it was stopped and this
           is just a remount to restart it.
-          Flush the writebuffer, if neccecary, else we loose it */
+          Flush the writebuffer, if necessary, else we loose it */
        if (!sb_rdonly(sb)) {
                jffs2_stop_garbage_collect_thread(c);
                mutex_lock(&c->alloc_sem);
index e8b9b756f0acaa488ed2c8739a58d05a985213c9..d76eb7b39f56419a703fa28472e1868b97a3c81c 100644 (file)
@@ -209,11 +209,10 @@ long long ubifs_calc_available(const struct ubifs_info *c, int min_idx_lebs)
        subtract_lebs += 1;
 
        /*
-        * The GC journal head LEB is not really accessible. And since
-        * different write types go to different heads, we may count only on
-        * one head's space.
+        * Since different write types go to different heads, we should
+        * reserve one leb for each head.
         */
-       subtract_lebs += c->jhead_cnt - 1;
+       subtract_lebs += c->jhead_cnt;
 
        /* We also reserve one LEB for deletions, which bypass budgeting */
        subtract_lebs += 1;
@@ -400,7 +399,7 @@ static int calc_dd_growth(const struct ubifs_info *c,
        dd_growth = req->dirtied_page ? c->bi.page_budget : 0;
 
        if (req->dirtied_ino)
-               dd_growth += c->bi.inode_budget << (req->dirtied_ino - 1);
+               dd_growth += c->bi.inode_budget * req->dirtied_ino;
        if (req->mod_dent)
                dd_growth += c->bi.dent_budget;
        dd_growth += req->dirtied_ino_d;
index 1e92c1730c16540cf804fa74299a7b1228eda45e..1505539f6fe974ae73990e57773a8c96628136f4 100644 (file)
@@ -1151,7 +1151,6 @@ static int ubifs_symlink(struct mnt_idmap *idmap, struct inode *dir,
        int err, sz_change, len = strlen(symname);
        struct fscrypt_str disk_link;
        struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1,
-                                       .new_ino_d = ALIGN(len, 8),
                                        .dirtied_ino = 1 };
        struct fscrypt_name nm;
 
@@ -1167,6 +1166,7 @@ static int ubifs_symlink(struct mnt_idmap *idmap, struct inode *dir,
         * Budget request settings: new inode, new direntry and changing parent
         * directory inode.
         */
+       req.new_ino_d = ALIGN(disk_link.len - 1, 8);
        err = ubifs_budget_space(c, &req);
        if (err)
                return err;
@@ -1324,6 +1324,8 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (unlink) {
                ubifs_assert(c, inode_is_locked(new_inode));
 
+               /* Budget for old inode's data when its nlink > 1. */
+               req.dirtied_ino_d = ALIGN(ubifs_inode(new_inode)->data_len, 8);
                err = ubifs_purge_xattrs(new_inode);
                if (err)
                        return err;
@@ -1566,6 +1568,15 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
 
        ubifs_assert(c, fst_inode && snd_inode);
 
+       /*
+        * Budget request settings: changing two direntries, changing the two
+        * parent directory inodes.
+        */
+
+       dbg_gen("dent '%pd' ino %lu in dir ino %lu exchange dent '%pd' ino %lu in dir ino %lu",
+               old_dentry, fst_inode->i_ino, old_dir->i_ino,
+               new_dentry, snd_inode->i_ino, new_dir->i_ino);
+
        err = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &fst_nm);
        if (err)
                return err;
@@ -1576,6 +1587,10 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
                return err;
        }
 
+       err = ubifs_budget_space(c, &req);
+       if (err)
+               goto out;
+
        lock_4_inodes(old_dir, new_dir, NULL, NULL);
 
        time = current_time(old_dir);
@@ -1601,6 +1616,7 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
        unlock_4_inodes(old_dir, new_dir, NULL, NULL);
        ubifs_release_budget(c, &req);
 
+out:
        fscrypt_free_filename(&fst_nm);
        fscrypt_free_filename(&snd_nm);
        return err;
index 8cb5d76b301ccde611be04b7662acdcbdb1ead82..979ab1d9d0c39ddf56ea9d9f8e1f5c2e4c30a3d3 100644 (file)
@@ -1032,7 +1032,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
                if (page->index >= synced_i_size >> PAGE_SHIFT) {
                        err = inode->i_sb->s_op->write_inode(inode, NULL);
                        if (err)
-                               goto out_unlock;
+                               goto out_redirty;
                        /*
                         * The inode has been written, but the write-buffer has
                         * not been synchronized, so in case of an unclean
@@ -1060,11 +1060,17 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
        if (i_size > synced_i_size) {
                err = inode->i_sb->s_op->write_inode(inode, NULL);
                if (err)
-                       goto out_unlock;
+                       goto out_redirty;
        }
 
        return do_writepage(page, len);
-
+out_redirty:
+       /*
+        * redirty_page_for_writepage() won't call ubifs_dirty_inode() because
+        * it passes I_DIRTY_PAGES flag while calling __mark_inode_dirty(), so
+        * there is no need to do space budget for dirty inode.
+        */
+       redirty_page_for_writepage(wbc, page);
 out_unlock:
        unlock_page(page);
        return err;
@@ -1466,14 +1472,23 @@ static bool ubifs_release_folio(struct folio *folio, gfp_t unused_gfp_flags)
        struct inode *inode = folio->mapping->host;
        struct ubifs_info *c = inode->i_sb->s_fs_info;
 
-       /*
-        * An attempt to release a dirty page without budgeting for it - should
-        * not happen.
-        */
        if (folio_test_writeback(folio))
                return false;
+
+       /*
+        * Page is private but not dirty, weird? There is one condition
+        * making it happened. ubifs_writepage skipped the page because
+        * page index beyonds isize (for example. truncated by other
+        * process named A), then the page is invalidated by fadvise64
+        * syscall before being truncated by process A.
+        */
        ubifs_assert(c, folio_test_private(folio));
-       ubifs_assert(c, 0);
+       if (folio_test_checked(folio))
+               release_new_page_budget(c);
+       else
+               release_existing_page_budget(c);
+
+       atomic_long_dec(&c->dirty_pg_cnt);
        folio_detach_private(folio);
        folio_clear_checked(folio);
        return true;
index 1607a3c76681a287e1c9c81f79ab06f19f96efc5..01d8eb1703820c0f55c4c7b9a8ca1a2d3c205dcd 100644 (file)
@@ -488,7 +488,7 @@ void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last)
 }
 
 /**
- * wbuf_timer_callback - write-buffer timer callback function.
+ * wbuf_timer_callback_nolock - write-buffer timer callback function.
  * @timer: timer data (write-buffer descriptor)
  *
  * This function is called when the write-buffer timer expires.
@@ -505,7 +505,7 @@ static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer)
 }
 
 /**
- * new_wbuf_timer - start new write-buffer timer.
+ * new_wbuf_timer_nolock - start new write-buffer timer.
  * @c: UBIFS file-system description object
  * @wbuf: write-buffer descriptor
  */
@@ -531,7 +531,7 @@ static void new_wbuf_timer_nolock(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
 }
 
 /**
- * cancel_wbuf_timer - cancel write-buffer timer.
+ * cancel_wbuf_timer_nolock - cancel write-buffer timer.
  * @wbuf: write-buffer descriptor
  */
 static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
index d02509920bafadd9897c3c4fece885d3bc1efed9..dc52ac0f4a345f30d43fc69b7642d76014aeadc0 100644 (file)
@@ -1201,9 +1201,13 @@ out_free:
  * ubifs_jnl_rename - rename a directory entry.
  * @c: UBIFS file-system description object
  * @old_dir: parent inode of directory entry to rename
- * @old_dentry: directory entry to rename
+ * @old_inode: directory entry's inode to rename
+ * @old_nm: name of the old directory entry to rename
  * @new_dir: parent inode of directory entry to rename
- * @new_dentry: new directory entry (or directory entry to replace)
+ * @new_inode: new directory entry's inode (or directory entry's inode to
+ *             replace)
+ * @new_nm: new name of the new directory entry
+ * @whiteout: whiteout inode
  * @sync: non-zero if the write-buffer has to be synchronized
  *
  * This function implements the re-name operation which may involve writing up
index d0c9a09988bc7ba59d78391fe10cc9e15dbdaa28..32cb147597960ca2aa20aab79a3b14152a304900 100644 (file)
@@ -833,7 +833,7 @@ static int alloc_wbufs(struct ubifs_info *c)
                INIT_LIST_HEAD(&c->jheads[i].buds_list);
                err = ubifs_wbuf_init(c, &c->jheads[i].wbuf);
                if (err)
-                       return err;
+                       goto out_wbuf;
 
                c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback;
                c->jheads[i].wbuf.jhead = i;
@@ -841,7 +841,7 @@ static int alloc_wbufs(struct ubifs_info *c)
                c->jheads[i].log_hash = ubifs_hash_get_desc(c);
                if (IS_ERR(c->jheads[i].log_hash)) {
                        err = PTR_ERR(c->jheads[i].log_hash);
-                       goto out;
+                       goto out_log_hash;
                }
        }
 
@@ -854,9 +854,18 @@ static int alloc_wbufs(struct ubifs_info *c)
 
        return 0;
 
-out:
-       while (i--)
+out_log_hash:
+       kfree(c->jheads[i].wbuf.buf);
+       kfree(c->jheads[i].wbuf.inodes);
+
+out_wbuf:
+       while (i--) {
+               kfree(c->jheads[i].wbuf.buf);
+               kfree(c->jheads[i].wbuf.inodes);
                kfree(c->jheads[i].log_hash);
+       }
+       kfree(c->jheads);
+       c->jheads = NULL;
 
        return err;
 }
index 06ad8fa1fcfb08b7c69aec565db4a7186b2629a9..1c958148bb877f7ba6d9c562945798b363d6bd55 100644 (file)
@@ -74,13 +74,13 @@ static const struct sysfs_ops ubifs_attr_ops = {
        .show   = ubifs_attr_show,
 };
 
-static struct kobj_type ubifs_sb_ktype = {
+static const struct kobj_type ubifs_sb_ktype = {
        .default_groups = ubifs_groups,
        .sysfs_ops      = &ubifs_attr_ops,
        .release        = ubifs_sb_release,
 };
 
-static struct kobj_type ubifs_ktype = {
+static const struct kobj_type ubifs_ktype = {
        .sysfs_ops      = &ubifs_attr_ops,
 };
 
@@ -144,6 +144,8 @@ int __init ubifs_sysfs_init(void)
        kobject_set_name(&ubifs_kset.kobj, "ubifs");
        ubifs_kset.kobj.parent = fs_kobj;
        ret = kset_register(&ubifs_kset);
+       if (ret)
+               kset_put(&ubifs_kset);
 
        return ret;
 }
index 488f3da7a6c6ceef0e81a6323605249422c0b5af..2469f72eeaabb16867c6485b8acb745c36f969f5 100644 (file)
@@ -267,11 +267,18 @@ static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c,
        if (zbr->len) {
                err = insert_old_idx(c, zbr->lnum, zbr->offs);
                if (unlikely(err))
-                       return ERR_PTR(err);
+                       /*
+                        * Obsolete znodes will be freed by tnc_destroy_cnext()
+                        * or free_obsolete_znodes(), copied up znodes should
+                        * be added back to tnc and freed by
+                        * ubifs_destroy_tnc_subtree().
+                        */
+                       goto out;
                err = add_idx_dirt(c, zbr->lnum, zbr->len);
        } else
                err = 0;
 
+out:
        zbr->znode = zn;
        zbr->lnum = 0;
        zbr->offs = 0;
@@ -3053,6 +3060,21 @@ static void tnc_destroy_cnext(struct ubifs_info *c)
                cnext = cnext->cnext;
                if (ubifs_zn_obsolete(znode))
                        kfree(znode);
+               else if (!ubifs_zn_cow(znode)) {
+                       /*
+                        * Don't forget to update clean znode count after
+                        * committing failed, because ubifs will check this
+                        * count while closing tnc. Non-obsolete znode could
+                        * be re-dirtied during committing process, so dirty
+                        * flag is untrustable. The flag 'COW_ZNODE' is set
+                        * for each dirty znode before committing, and it is
+                        * cleared as long as the znode become clean, so we
+                        * can statistic clean znode count according to this
+                        * flag.
+                        */
+                       atomic_long_inc(&c->clean_zn_cnt);
+                       atomic_long_inc(&ubifs_clean_zn_cnt);
+               }
        } while (cnext && cnext != c->cnext);
 }
 
index 9063b73536f805cffecd8e5e25bdd8fa48d29fb8..4c36044140e7eba9234b87c3a4bc6ab4b73ec64b 100644 (file)
@@ -1623,8 +1623,13 @@ static inline int ubifs_check_hmac(const struct ubifs_info *c,
        return crypto_memneq(expected, got, c->hmac_desc_len);
 }
 
+#ifdef CONFIG_UBIFS_FS_AUTHENTICATION
 void ubifs_bad_hash(const struct ubifs_info *c, const void *node,
                    const u8 *hash, int lnum, int offs);
+#else
+static inline void ubifs_bad_hash(const struct ubifs_info *c, const void *node,
+                                 const u8 *hash, int lnum, int offs) {};
+#endif
 
 int __ubifs_node_check_hash(const struct ubifs_info *c, const void *buf,
                          const u8 *expected);
index 7d48ea368c5e57c5361da337d01896c20fe5e06e..a529347fd75b2a065f618d45771b27a7bd163f80 100644 (file)
@@ -110,6 +110,7 @@ struct ubi_volume_info {
        int name_len;
        const char *name;
        dev_t cdev;
+       struct device *dev;
 };
 
 /**