Merge tag 'efi-next-for-v6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi
[linux-block.git] / drivers / md / dm.c
index dfde0088147a1e5bd7be946d330f30b2b4684dcf..3b694ba3a106e68d4c0d5e64cd9136cf7abce237 100644 (file)
@@ -1072,6 +1072,15 @@ static void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
        __dm_io_dec_pending(io);
 }
 
+/*
+ * The queue_limits are only valid as long as you have a reference
+ * count on 'md'. But _not_ imposing verification to avoid atomic_read(),
+ */
+static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
+{
+       return &md->queue->limits;
+}
+
 void disable_discard(struct mapped_device *md)
 {
        struct queue_limits *limits = dm_get_queue_limits(md);
@@ -1162,7 +1171,8 @@ static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
        return ti->len - target_offset;
 }
 
-static sector_t max_io_len(struct dm_target *ti, sector_t sector)
+static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
+                            unsigned int max_granularity)
 {
        sector_t target_offset = dm_target_offset(ti, sector);
        sector_t len = max_io_len_target_boundary(ti, target_offset);
@@ -1173,11 +1183,16 @@ static sector_t max_io_len(struct dm_target *ti, sector_t sector)
         *   explains why stacked chunk_sectors based splitting via
         *   bio_split_to_limits() isn't possible here.
         */
-       if (!ti->max_io_len)
+       if (!max_granularity)
                return len;
        return min_t(sector_t, len,
                min(queue_max_sectors(ti->table->md->queue),
-                   blk_chunk_sectors_left(target_offset, ti->max_io_len)));
+                   blk_chunk_sectors_left(target_offset, max_granularity)));
+}
+
+static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)
+{
+       return __max_io_len(ti, sector, ti->max_io_len);
 }
 
 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
@@ -1565,12 +1580,13 @@ static void __send_empty_flush(struct clone_info *ci)
 }
 
 static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
-                                       unsigned int num_bios)
+                                       unsigned int num_bios,
+                                       unsigned int max_granularity)
 {
        unsigned int len, bios;
 
        len = min_t(sector_t, ci->sector_count,
-                   max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
+                   __max_io_len(ti, ci->sector, max_granularity));
 
        atomic_add(num_bios, &ci->io->io_count);
        bios = __send_duplicate_bios(ci, ti, num_bios, &len);
@@ -1606,16 +1622,24 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci,
                                          struct dm_target *ti)
 {
        unsigned int num_bios = 0;
+       unsigned int max_granularity = 0;
+       struct queue_limits *limits = dm_get_queue_limits(ti->table->md);
 
        switch (bio_op(ci->bio)) {
        case REQ_OP_DISCARD:
                num_bios = ti->num_discard_bios;
+               if (ti->max_discard_granularity)
+                       max_granularity = limits->max_discard_sectors;
                break;
        case REQ_OP_SECURE_ERASE:
                num_bios = ti->num_secure_erase_bios;
+               if (ti->max_secure_erase_granularity)
+                       max_granularity = limits->max_secure_erase_sectors;
                break;
        case REQ_OP_WRITE_ZEROES:
                num_bios = ti->num_write_zeroes_bios;
+               if (ti->max_write_zeroes_granularity)
+                       max_granularity = limits->max_write_zeroes_sectors;
                break;
        default:
                break;
@@ -1630,7 +1654,7 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci,
        if (unlikely(!num_bios))
                return BLK_STS_NOTSUPP;
 
-       __send_changing_extent_only(ci, ti, num_bios);
+       __send_changing_extent_only(ci, ti, num_bios, max_granularity);
        return BLK_STS_OK;
 }
 
@@ -2296,17 +2320,6 @@ struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
        return md->immutable_target_type;
 }
 
-/*
- * The queue_limits are only valid as long as you have a reference
- * count on 'md'.
- */
-struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
-{
-       BUG_ON(!atomic_read(&md->holders));
-       return &md->queue->limits;
-}
-EXPORT_SYMBOL_GPL(dm_get_queue_limits);
-
 /*
  * Setup the DM device's queue based on md's type
  */