Merge tag 'efi-next-for-v6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi
[linux-block.git] / drivers / md / dm.c
index eace45a18d45611e70e1b4a988d674535925a30a..3b694ba3a106e68d4c0d5e64cd9136cf7abce237 100644 (file)
@@ -512,10 +512,10 @@ static void dm_io_acct(struct dm_io *io, bool end)
                sectors = io->sectors;
 
        if (!end)
-               bdev_start_io_acct(bio->bi_bdev, sectors, bio_op(bio),
-                                  start_time);
+               bdev_start_io_acct(bio->bi_bdev, bio_op(bio), start_time);
        else
-               bdev_end_io_acct(bio->bi_bdev, bio_op(bio), start_time);
+               bdev_end_io_acct(bio->bi_bdev, bio_op(bio), sectors,
+                                start_time);
 
        if (static_branch_unlikely(&stats_enabled) &&
            unlikely(dm_stats_used(&md->stats))) {
@@ -1072,6 +1072,15 @@ static void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
        __dm_io_dec_pending(io);
 }
 
+/*
+ * The queue_limits are only valid as long as you have a reference
+ * count on 'md'. But _not_ imposing verification to avoid atomic_read(),
+ */
+static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
+{
+       return &md->queue->limits;
+}
+
 void disable_discard(struct mapped_device *md)
 {
        struct queue_limits *limits = dm_get_queue_limits(md);
@@ -1162,7 +1171,8 @@ static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
        return ti->len - target_offset;
 }
 
-static sector_t max_io_len(struct dm_target *ti, sector_t sector)
+static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
+                            unsigned int max_granularity)
 {
        sector_t target_offset = dm_target_offset(ti, sector);
        sector_t len = max_io_len_target_boundary(ti, target_offset);
@@ -1173,11 +1183,16 @@ static sector_t max_io_len(struct dm_target *ti, sector_t sector)
         *   explains why stacked chunk_sectors based splitting via
         *   bio_split_to_limits() isn't possible here.
         */
-       if (!ti->max_io_len)
+       if (!max_granularity)
                return len;
        return min_t(sector_t, len,
                min(queue_max_sectors(ti->table->md->queue),
-                   blk_chunk_sectors_left(target_offset, ti->max_io_len)));
+                   blk_chunk_sectors_left(target_offset, max_granularity)));
+}
+
+static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)
+{
+       return __max_io_len(ti, sector, ti->max_io_len);
 }
 
 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
@@ -1467,7 +1482,8 @@ static void setup_split_accounting(struct clone_info *ci, unsigned int len)
 }
 
 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
-                               struct dm_target *ti, unsigned int num_bios)
+                               struct dm_target *ti, unsigned int num_bios,
+                               unsigned *len)
 {
        struct bio *bio;
        int try;
@@ -1478,7 +1494,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
                if (try)
                        mutex_lock(&ci->io->md->table_devices_lock);
                for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
-                       bio = alloc_tio(ci, ti, bio_nr, NULL,
+                       bio = alloc_tio(ci, ti, bio_nr, len,
                                        try ? GFP_NOIO : GFP_NOWAIT);
                        if (!bio)
                                break;
@@ -1513,8 +1529,10 @@ static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
                ret = 1;
                break;
        default:
+               if (len)
+                       setup_split_accounting(ci, *len);
                /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
-               alloc_multiple_bios(&blist, ci, ti, num_bios);
+               alloc_multiple_bios(&blist, ci, ti, num_bios, len);
                while ((clone = bio_list_pop(&blist))) {
                        dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
                        __map_bio(clone);
@@ -1562,12 +1580,13 @@ static void __send_empty_flush(struct clone_info *ci)
 }
 
 static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
-                                       unsigned int num_bios)
+                                       unsigned int num_bios,
+                                       unsigned int max_granularity)
 {
        unsigned int len, bios;
 
        len = min_t(sector_t, ci->sector_count,
-                   max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
+                   __max_io_len(ti, ci->sector, max_granularity));
 
        atomic_add(num_bios, &ci->io->io_count);
        bios = __send_duplicate_bios(ci, ti, num_bios, &len);
@@ -1603,16 +1622,24 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci,
                                          struct dm_target *ti)
 {
        unsigned int num_bios = 0;
+       unsigned int max_granularity = 0;
+       struct queue_limits *limits = dm_get_queue_limits(ti->table->md);
 
        switch (bio_op(ci->bio)) {
        case REQ_OP_DISCARD:
                num_bios = ti->num_discard_bios;
+               if (ti->max_discard_granularity)
+                       max_granularity = limits->max_discard_sectors;
                break;
        case REQ_OP_SECURE_ERASE:
                num_bios = ti->num_secure_erase_bios;
+               if (ti->max_secure_erase_granularity)
+                       max_granularity = limits->max_secure_erase_sectors;
                break;
        case REQ_OP_WRITE_ZEROES:
                num_bios = ti->num_write_zeroes_bios;
+               if (ti->max_write_zeroes_granularity)
+                       max_granularity = limits->max_write_zeroes_sectors;
                break;
        default:
                break;
@@ -1627,7 +1654,7 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci,
        if (unlikely(!num_bios))
                return BLK_STS_NOTSUPP;
 
-       __send_changing_extent_only(ci, ti, num_bios);
+       __send_changing_extent_only(ci, ti, num_bios, max_granularity);
        return BLK_STS_OK;
 }
 
@@ -2097,7 +2124,9 @@ static struct mapped_device *alloc_dev(int minor)
        if (!md->pending_io)
                goto bad;
 
-       dm_stats_init(&md->stats);
+       r = dm_stats_init(&md->stats);
+       if (r < 0)
+               goto bad;
 
        /* Populate the mapping, nobody knows we exist yet */
        spin_lock(&_minor_lock);
@@ -2291,17 +2320,6 @@ struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
        return md->immutable_target_type;
 }
 
-/*
- * The queue_limits are only valid as long as you have a reference
- * count on 'md'.
- */
-struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
-{
-       BUG_ON(!atomic_read(&md->holders));
-       return &md->queue->limits;
-}
-EXPORT_SYMBOL_GPL(dm_get_queue_limits);
-
 /*
  * Setup the DM device's queue based on md's type
  */