1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to setting various queue properties from drivers
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
9 #include <linux/blkdev.h>
10 #include <linux/pagemap.h>
11 #include <linux/backing-dev-defs.h>
12 #include <linux/gcd.h>
13 #include <linux/lcm.h>
14 #include <linux/jiffies.h>
15 #include <linux/gfp.h>
16 #include <linux/dma-mapping.h>
19 #include "blk-rq-qos.h"
22 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
24 q->rq_timeout = timeout;
26 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
29 * blk_set_stacking_limits - set default limits for stacking devices
30 * @lim: the queue_limits structure to reset
32 * Prepare queue limits for applying limits from underlying devices using
35 void blk_set_stacking_limits(struct queue_limits *lim)
37 memset(lim, 0, sizeof(*lim));
38 lim->logical_block_size = SECTOR_SIZE;
39 lim->physical_block_size = SECTOR_SIZE;
40 lim->io_min = SECTOR_SIZE;
41 lim->discard_granularity = SECTOR_SIZE;
42 lim->dma_alignment = SECTOR_SIZE - 1;
43 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
45 /* Inherit limits from component devices */
46 lim->max_segments = USHRT_MAX;
47 lim->max_discard_segments = USHRT_MAX;
48 lim->max_hw_sectors = UINT_MAX;
49 lim->max_segment_size = UINT_MAX;
50 lim->max_sectors = UINT_MAX;
51 lim->max_dev_sectors = UINT_MAX;
52 lim->max_write_zeroes_sectors = UINT_MAX;
53 lim->max_zone_append_sectors = UINT_MAX;
54 lim->max_user_discard_sectors = UINT_MAX;
56 EXPORT_SYMBOL(blk_set_stacking_limits);
58 static void blk_apply_bdi_limits(struct backing_dev_info *bdi,
59 struct queue_limits *lim)
62 * For read-ahead of large files to be effective, we need to read ahead
63 * at least twice the optimal I/O size.
65 bdi->ra_pages = max(lim->io_opt * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
66 bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
69 static int blk_validate_zoned_limits(struct queue_limits *lim)
72 if (WARN_ON_ONCE(lim->max_open_zones) ||
73 WARN_ON_ONCE(lim->max_active_zones) ||
74 WARN_ON_ONCE(lim->zone_write_granularity) ||
75 WARN_ON_ONCE(lim->max_zone_append_sectors))
80 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
83 if (lim->zone_write_granularity < lim->logical_block_size)
84 lim->zone_write_granularity = lim->logical_block_size;
86 if (lim->max_zone_append_sectors) {
88 * The Zone Append size is limited by the maximum I/O size
89 * and the zone size given that it can't span zones.
91 lim->max_zone_append_sectors =
92 min3(lim->max_hw_sectors,
93 lim->max_zone_append_sectors,
101 * Check that the limits in lim are valid, initialize defaults for unset
102 * values, and cap values based on others where needed.
104 static int blk_validate_limits(struct queue_limits *lim)
106 unsigned int max_hw_sectors;
109 * Unless otherwise specified, default to 512 byte logical blocks and a
110 * physical block size equal to the logical block size.
112 if (!lim->logical_block_size)
113 lim->logical_block_size = SECTOR_SIZE;
114 if (lim->physical_block_size < lim->logical_block_size)
115 lim->physical_block_size = lim->logical_block_size;
118 * The minimum I/O size defaults to the physical block size unless
119 * explicitly overridden.
121 if (lim->io_min < lim->physical_block_size)
122 lim->io_min = lim->physical_block_size;
125 * max_hw_sectors has a somewhat weird default for historical reason,
126 * but driver really should set their own instead of relying on this
129 * The block layer relies on the fact that every driver can
130 * handle at lest a page worth of data per I/O, and needs the value
131 * aligned to the logical block size.
133 if (!lim->max_hw_sectors)
134 lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
135 if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
137 lim->max_hw_sectors = round_down(lim->max_hw_sectors,
138 lim->logical_block_size >> SECTOR_SHIFT);
141 * The actual max_sectors value is a complex beast and also takes the
142 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
143 * value into account. The ->max_sectors value is always calculated
144 * from these, so directly setting it won't have any effect.
146 max_hw_sectors = min_not_zero(lim->max_hw_sectors,
147 lim->max_dev_sectors);
148 if (lim->max_user_sectors) {
149 if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
151 lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
153 lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
155 lim->max_sectors = round_down(lim->max_sectors,
156 lim->logical_block_size >> SECTOR_SHIFT);
159 * Random default for the maximum number of segments. Driver should not
160 * rely on this and set their own.
162 if (!lim->max_segments)
163 lim->max_segments = BLK_MAX_SEGMENTS;
165 lim->max_discard_sectors =
166 min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
168 if (!lim->max_discard_segments)
169 lim->max_discard_segments = 1;
171 if (lim->discard_granularity < lim->physical_block_size)
172 lim->discard_granularity = lim->physical_block_size;
175 * By default there is no limit on the segment boundary alignment,
176 * but if there is one it can't be smaller than the page size as
177 * that would break all the normal I/O patterns.
179 if (!lim->seg_boundary_mask)
180 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
181 if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1))
185 * Stacking device may have both virtual boundary and max segment
186 * size limit, so allow this setting now, and long-term the two
187 * might need to move out of stacking limits since we have immutable
188 * bvec and lower layer bio splitting is supposed to handle the two
191 if (lim->virt_boundary_mask) {
192 if (!lim->max_segment_size)
193 lim->max_segment_size = UINT_MAX;
196 * The maximum segment size has an odd historic 64k default that
197 * drivers probably should override. Just like the I/O size we
198 * require drivers to at least handle a full page per segment.
200 if (!lim->max_segment_size)
201 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
202 if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE))
207 * We require drivers to at least do logical block aligned I/O, but
208 * historically could not check for that due to the separate calls
209 * to set the limits. Once the transition is finished the check
210 * below should be narrowed down to check the logical block size.
212 if (!lim->dma_alignment)
213 lim->dma_alignment = SECTOR_SIZE - 1;
214 if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
217 if (lim->alignment_offset) {
218 lim->alignment_offset &= (lim->physical_block_size - 1);
222 return blk_validate_zoned_limits(lim);
226 * Set the default limits for a newly allocated queue. @lim contains the
227 * initial limits set by the driver, which could be no limit in which case
228 * all fields are cleared to zero.
230 int blk_set_default_limits(struct queue_limits *lim)
233 * Most defaults are set by capping the bounds in blk_validate_limits,
234 * but max_user_discard_sectors is special and needs an explicit
235 * initialization to the max value here.
237 lim->max_user_discard_sectors = UINT_MAX;
238 return blk_validate_limits(lim);
242 * queue_limits_commit_update - commit an atomic update of queue limits
243 * @q: queue to update
244 * @lim: limits to apply
246 * Apply the limits in @lim that were obtained from queue_limits_start_update()
247 * and updated by the caller to @q.
249 * Returns 0 if successful, else a negative error code.
251 int queue_limits_commit_update(struct request_queue *q,
252 struct queue_limits *lim)
253 __releases(q->limits_lock)
255 int error = blk_validate_limits(lim);
260 blk_apply_bdi_limits(q->disk->bdi, lim);
262 mutex_unlock(&q->limits_lock);
265 EXPORT_SYMBOL_GPL(queue_limits_commit_update);
268 * queue_limits_set - apply queue limits to queue
269 * @q: queue to update
270 * @lim: limits to apply
272 * Apply the limits in @lim that were freshly initialized to @q.
273 * To update existing limits use queue_limits_start_update() and
274 * queue_limits_commit_update() instead.
276 * Returns 0 if successful, else a negative error code.
278 int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
280 mutex_lock(&q->limits_lock);
281 return queue_limits_commit_update(q, lim);
283 EXPORT_SYMBOL_GPL(queue_limits_set);
286 * blk_queue_chunk_sectors - set size of the chunk for this queue
287 * @q: the request queue for the device
288 * @chunk_sectors: chunk sectors in the usual 512b unit
291 * If a driver doesn't want IOs to cross a given chunk size, it can set
292 * this limit and prevent merging across chunks. Note that the block layer
293 * must accept a page worth of data at any offset. So if the crossing of
294 * chunks is a hard limitation in the driver, it must still be prepared
295 * to split single page bios.
297 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
299 q->limits.chunk_sectors = chunk_sectors;
301 EXPORT_SYMBOL(blk_queue_chunk_sectors);
304 * blk_queue_max_discard_sectors - set max sectors for a single discard
305 * @q: the request queue for the device
306 * @max_discard_sectors: maximum number of sectors to discard
308 void blk_queue_max_discard_sectors(struct request_queue *q,
309 unsigned int max_discard_sectors)
311 struct queue_limits *lim = &q->limits;
313 lim->max_hw_discard_sectors = max_discard_sectors;
314 lim->max_discard_sectors =
315 min(max_discard_sectors, lim->max_user_discard_sectors);
317 EXPORT_SYMBOL(blk_queue_max_discard_sectors);
320 * blk_queue_max_secure_erase_sectors - set max sectors for a secure erase
321 * @q: the request queue for the device
322 * @max_sectors: maximum number of sectors to secure_erase
324 void blk_queue_max_secure_erase_sectors(struct request_queue *q,
325 unsigned int max_sectors)
327 q->limits.max_secure_erase_sectors = max_sectors;
329 EXPORT_SYMBOL(blk_queue_max_secure_erase_sectors);
332 * blk_queue_max_write_zeroes_sectors - set max sectors for a single
334 * @q: the request queue for the device
335 * @max_write_zeroes_sectors: maximum number of sectors to write per command
337 void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
338 unsigned int max_write_zeroes_sectors)
340 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
342 EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
345 * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
346 * @q: the request queue for the device
347 * @max_zone_append_sectors: maximum number of sectors to write per command
349 * Sets the maximum number of sectors allowed for zone append commands. If
350 * Specifying 0 for @max_zone_append_sectors indicates that the queue does
351 * not natively support zone append operations and that the block layer must
352 * emulate these operations using regular writes.
354 void blk_queue_max_zone_append_sectors(struct request_queue *q,
355 unsigned int max_zone_append_sectors)
357 unsigned int max_sectors = 0;
359 if (WARN_ON(!blk_queue_is_zoned(q)))
362 if (max_zone_append_sectors) {
363 max_sectors = min(q->limits.max_hw_sectors,
364 max_zone_append_sectors);
365 max_sectors = min(q->limits.chunk_sectors, max_sectors);
368 * Signal eventual driver bugs resulting in the max_zone_append
369 * sectors limit being 0 due to the chunk_sectors limit (zone
370 * size) not set or the max_hw_sectors limit not set.
372 WARN_ON_ONCE(!max_sectors);
375 q->limits.max_zone_append_sectors = max_sectors;
377 EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
380 * blk_queue_logical_block_size - set logical block size for the queue
381 * @q: the request queue for the device
382 * @size: the logical block size, in bytes
385 * This should be set to the lowest possible block size that the
386 * storage device can address. The default of 512 covers most
389 void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
391 struct queue_limits *limits = &q->limits;
393 limits->logical_block_size = size;
395 if (limits->discard_granularity < limits->logical_block_size)
396 limits->discard_granularity = limits->logical_block_size;
398 if (limits->physical_block_size < size)
399 limits->physical_block_size = size;
401 if (limits->io_min < limits->physical_block_size)
402 limits->io_min = limits->physical_block_size;
404 limits->max_hw_sectors =
405 round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT);
406 limits->max_sectors =
407 round_down(limits->max_sectors, size >> SECTOR_SHIFT);
409 EXPORT_SYMBOL(blk_queue_logical_block_size);
412 * blk_queue_physical_block_size - set physical block size for the queue
413 * @q: the request queue for the device
414 * @size: the physical block size, in bytes
417 * This should be set to the lowest possible sector size that the
418 * hardware can operate on without reverting to read-modify-write
421 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
423 q->limits.physical_block_size = size;
425 if (q->limits.physical_block_size < q->limits.logical_block_size)
426 q->limits.physical_block_size = q->limits.logical_block_size;
428 if (q->limits.discard_granularity < q->limits.physical_block_size)
429 q->limits.discard_granularity = q->limits.physical_block_size;
431 if (q->limits.io_min < q->limits.physical_block_size)
432 q->limits.io_min = q->limits.physical_block_size;
434 EXPORT_SYMBOL(blk_queue_physical_block_size);
437 * blk_queue_zone_write_granularity - set zone write granularity for the queue
438 * @q: the request queue for the zoned device
439 * @size: the zone write granularity size, in bytes
442 * This should be set to the lowest possible size allowing to write in
443 * sequential zones of a zoned block device.
445 void blk_queue_zone_write_granularity(struct request_queue *q,
448 if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
451 q->limits.zone_write_granularity = size;
453 if (q->limits.zone_write_granularity < q->limits.logical_block_size)
454 q->limits.zone_write_granularity = q->limits.logical_block_size;
456 EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity);
459 * blk_queue_alignment_offset - set physical block alignment offset
460 * @q: the request queue for the device
461 * @offset: alignment offset in bytes
464 * Some devices are naturally misaligned to compensate for things like
465 * the legacy DOS partition table 63-sector offset. Low-level drivers
466 * should call this function for devices whose first sector is not
469 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
471 q->limits.alignment_offset =
472 offset & (q->limits.physical_block_size - 1);
473 q->limits.misaligned = 0;
475 EXPORT_SYMBOL(blk_queue_alignment_offset);
477 void disk_update_readahead(struct gendisk *disk)
479 blk_apply_bdi_limits(disk->bdi, &disk->queue->limits);
481 EXPORT_SYMBOL_GPL(disk_update_readahead);
484 * blk_limits_io_min - set minimum request size for a device
485 * @limits: the queue limits
486 * @min: smallest I/O size in bytes
489 * Some devices have an internal block size bigger than the reported
490 * hardware sector size. This function can be used to signal the
491 * smallest I/O the device can perform without incurring a performance
494 void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
496 limits->io_min = min;
498 if (limits->io_min < limits->logical_block_size)
499 limits->io_min = limits->logical_block_size;
501 if (limits->io_min < limits->physical_block_size)
502 limits->io_min = limits->physical_block_size;
504 EXPORT_SYMBOL(blk_limits_io_min);
507 * blk_queue_io_min - set minimum request size for the queue
508 * @q: the request queue for the device
509 * @min: smallest I/O size in bytes
512 * Storage devices may report a granularity or preferred minimum I/O
513 * size which is the smallest request the device can perform without
514 * incurring a performance penalty. For disk drives this is often the
515 * physical block size. For RAID arrays it is often the stripe chunk
516 * size. A properly aligned multiple of minimum_io_size is the
517 * preferred request size for workloads where a high number of I/O
518 * operations is desired.
520 void blk_queue_io_min(struct request_queue *q, unsigned int min)
522 blk_limits_io_min(&q->limits, min);
524 EXPORT_SYMBOL(blk_queue_io_min);
527 * blk_limits_io_opt - set optimal request size for a device
528 * @limits: the queue limits
529 * @opt: smallest I/O size in bytes
532 * Storage devices may report an optimal I/O size, which is the
533 * device's preferred unit for sustained I/O. This is rarely reported
534 * for disk drives. For RAID arrays it is usually the stripe width or
535 * the internal track size. A properly aligned multiple of
536 * optimal_io_size is the preferred request size for workloads where
537 * sustained throughput is desired.
539 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
541 limits->io_opt = opt;
543 EXPORT_SYMBOL(blk_limits_io_opt);
545 static int queue_limit_alignment_offset(const struct queue_limits *lim,
548 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
549 unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
552 return (granularity + lim->alignment_offset - alignment) % granularity;
555 static unsigned int queue_limit_discard_alignment(
556 const struct queue_limits *lim, sector_t sector)
558 unsigned int alignment, granularity, offset;
560 if (!lim->max_discard_sectors)
563 /* Why are these in bytes, not sectors? */
564 alignment = lim->discard_alignment >> SECTOR_SHIFT;
565 granularity = lim->discard_granularity >> SECTOR_SHIFT;
569 /* Offset of the partition start in 'granularity' sectors */
570 offset = sector_div(sector, granularity);
572 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
573 offset = (granularity + alignment - offset) % granularity;
575 /* Turn it back into bytes, gaah */
576 return offset << SECTOR_SHIFT;
579 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
581 sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
582 if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
583 sectors = PAGE_SIZE >> SECTOR_SHIFT;
588 * blk_stack_limits - adjust queue_limits for stacked devices
589 * @t: the stacking driver limits (top device)
590 * @b: the underlying queue limits (bottom, component device)
591 * @start: first data sector within component device
594 * This function is used by stacking drivers like MD and DM to ensure
595 * that all component devices have compatible block sizes and
596 * alignments. The stacking driver must provide a queue_limits
597 * struct (top) and then iteratively call the stacking function for
598 * all component (bottom) devices. The stacking function will
599 * attempt to combine the values and ensure proper alignment.
601 * Returns 0 if the top and bottom queue_limits are compatible. The
602 * top device's block sizes and alignment offsets may be adjusted to
603 * ensure alignment with the bottom device. If no compatible sizes
604 * and alignments exist, -1 is returned and the resulting top
605 * queue_limits will have the misaligned flag set to indicate that
606 * the alignment_offset is undefined.
608 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
611 unsigned int top, bottom, alignment, ret = 0;
613 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
614 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
615 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
616 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
617 b->max_write_zeroes_sectors);
618 t->max_zone_append_sectors = min(queue_limits_max_zone_append_sectors(t),
619 queue_limits_max_zone_append_sectors(b));
620 t->bounce = max(t->bounce, b->bounce);
622 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
623 b->seg_boundary_mask);
624 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
625 b->virt_boundary_mask);
627 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
628 t->max_discard_segments = min_not_zero(t->max_discard_segments,
629 b->max_discard_segments);
630 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
631 b->max_integrity_segments);
633 t->max_segment_size = min_not_zero(t->max_segment_size,
634 b->max_segment_size);
636 t->misaligned |= b->misaligned;
638 alignment = queue_limit_alignment_offset(b, start);
640 /* Bottom device has different alignment. Check that it is
641 * compatible with the current top alignment.
643 if (t->alignment_offset != alignment) {
645 top = max(t->physical_block_size, t->io_min)
646 + t->alignment_offset;
647 bottom = max(b->physical_block_size, b->io_min) + alignment;
649 /* Verify that top and bottom intervals line up */
650 if (max(top, bottom) % min(top, bottom)) {
656 t->logical_block_size = max(t->logical_block_size,
657 b->logical_block_size);
659 t->physical_block_size = max(t->physical_block_size,
660 b->physical_block_size);
662 t->io_min = max(t->io_min, b->io_min);
663 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
664 t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
666 /* Set non-power-of-2 compatible chunk_sectors boundary */
667 if (b->chunk_sectors)
668 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
670 /* Physical block size a multiple of the logical block size? */
671 if (t->physical_block_size & (t->logical_block_size - 1)) {
672 t->physical_block_size = t->logical_block_size;
677 /* Minimum I/O a multiple of the physical block size? */
678 if (t->io_min & (t->physical_block_size - 1)) {
679 t->io_min = t->physical_block_size;
684 /* Optimal I/O a multiple of the physical block size? */
685 if (t->io_opt & (t->physical_block_size - 1)) {
691 /* chunk_sectors a multiple of the physical block size? */
692 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
693 t->chunk_sectors = 0;
698 t->raid_partial_stripes_expensive =
699 max(t->raid_partial_stripes_expensive,
700 b->raid_partial_stripes_expensive);
702 /* Find lowest common alignment_offset */
703 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
704 % max(t->physical_block_size, t->io_min);
706 /* Verify that new alignment_offset is on a logical block boundary */
707 if (t->alignment_offset & (t->logical_block_size - 1)) {
712 t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
713 t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
714 t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
716 /* Discard alignment and granularity */
717 if (b->discard_granularity) {
718 alignment = queue_limit_discard_alignment(b, start);
720 if (t->discard_granularity != 0 &&
721 t->discard_alignment != alignment) {
722 top = t->discard_granularity + t->discard_alignment;
723 bottom = b->discard_granularity + alignment;
725 /* Verify that top and bottom intervals line up */
726 if ((max(top, bottom) % min(top, bottom)) != 0)
727 t->discard_misaligned = 1;
730 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
731 b->max_discard_sectors);
732 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
733 b->max_hw_discard_sectors);
734 t->discard_granularity = max(t->discard_granularity,
735 b->discard_granularity);
736 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
737 t->discard_granularity;
739 t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
740 b->max_secure_erase_sectors);
741 t->zone_write_granularity = max(t->zone_write_granularity,
742 b->zone_write_granularity);
743 t->zoned = max(t->zoned, b->zoned);
745 t->zone_write_granularity = 0;
746 t->max_zone_append_sectors = 0;
750 EXPORT_SYMBOL(blk_stack_limits);
753 * queue_limits_stack_bdev - adjust queue_limits for stacked devices
754 * @t: the stacking driver limits (top device)
755 * @bdev: the underlying block device (bottom)
756 * @offset: offset to beginning of data within component device
757 * @pfx: prefix to use for warnings logged
760 * This function is used by stacking drivers like MD and DM to ensure
761 * that all component devices have compatible block sizes and
762 * alignments. The stacking driver must provide a queue_limits
763 * struct (top) and then iteratively call the stacking function for
764 * all component (bottom) devices. The stacking function will
765 * attempt to combine the values and ensure proper alignment.
767 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
768 sector_t offset, const char *pfx)
770 if (blk_stack_limits(t, &bdev_get_queue(bdev)->limits,
771 get_start_sect(bdev) + offset))
772 pr_notice("%s: Warning: Device %pg is misaligned\n",
775 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
778 * blk_queue_update_dma_pad - update pad mask
779 * @q: the request queue for the device
782 * Update dma pad mask.
784 * Appending pad buffer to a request modifies the last entry of a
785 * scatter list such that it includes the pad buffer.
787 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
789 if (mask > q->dma_pad_mask)
790 q->dma_pad_mask = mask;
792 EXPORT_SYMBOL(blk_queue_update_dma_pad);
795 * blk_set_queue_depth - tell the block layer about the device queue depth
796 * @q: the request queue for the device
797 * @depth: queue depth
800 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
802 q->queue_depth = depth;
803 rq_qos_queue_depth_changed(q);
805 EXPORT_SYMBOL(blk_set_queue_depth);
808 * blk_queue_write_cache - configure queue's write cache
809 * @q: the request queue for the device
810 * @wc: write back cache on or off
811 * @fua: device supports FUA writes, if true
813 * Tell the block layer about the write cache of @q.
815 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
818 blk_queue_flag_set(QUEUE_FLAG_HW_WC, q);
819 blk_queue_flag_set(QUEUE_FLAG_WC, q);
821 blk_queue_flag_clear(QUEUE_FLAG_HW_WC, q);
822 blk_queue_flag_clear(QUEUE_FLAG_WC, q);
825 blk_queue_flag_set(QUEUE_FLAG_FUA, q);
827 blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
829 EXPORT_SYMBOL_GPL(blk_queue_write_cache);
832 * disk_set_zoned - inidicate a zoned device
833 * @disk: gendisk to configure
835 void disk_set_zoned(struct gendisk *disk)
837 struct request_queue *q = disk->queue;
839 WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
842 * Set the zone write granularity to the device logical block
843 * size by default. The driver can change this value if needed.
845 q->limits.zoned = true;
846 blk_queue_zone_write_granularity(q, queue_logical_block_size(q));
848 EXPORT_SYMBOL_GPL(disk_set_zoned);
850 int bdev_alignment_offset(struct block_device *bdev)
852 struct request_queue *q = bdev_get_queue(bdev);
854 if (q->limits.misaligned)
856 if (bdev_is_partition(bdev))
857 return queue_limit_alignment_offset(&q->limits,
858 bdev->bd_start_sect);
859 return q->limits.alignment_offset;
861 EXPORT_SYMBOL_GPL(bdev_alignment_offset);
863 unsigned int bdev_discard_alignment(struct block_device *bdev)
865 struct request_queue *q = bdev_get_queue(bdev);
867 if (bdev_is_partition(bdev))
868 return queue_limit_discard_alignment(&q->limits,
869 bdev->bd_start_sect);
870 return q->limits.discard_alignment;
872 EXPORT_SYMBOL_GPL(bdev_discard_alignment);