Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
86db1e29 JA |
2 | /* |
3 | * Functions related to setting various queue properties from drivers | |
4 | */ | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/init.h> | |
8 | #include <linux/bio.h> | |
c6e56cf6 | 9 | #include <linux/blk-integrity.h> |
4ee60ec1 | 10 | #include <linux/pagemap.h> |
edb0872f | 11 | #include <linux/backing-dev-defs.h> |
70dd5bf3 | 12 | #include <linux/gcd.h> |
2cda2728 | 13 | #include <linux/lcm.h> |
ad5ebd2f | 14 | #include <linux/jiffies.h> |
5a0e3ad6 | 15 | #include <linux/gfp.h> |
45147fb5 | 16 | #include <linux/dma-mapping.h> |
86db1e29 JA |
17 | |
18 | #include "blk.h" | |
0bc65bd4 | 19 | #include "blk-rq-qos.h" |
87760e5e | 20 | #include "blk-wbt.h" |
86db1e29 | 21 | |
242f9dcb JA |
22 | void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) |
23 | { | |
d23977fe | 24 | WRITE_ONCE(q->rq_timeout, timeout); |
242f9dcb JA |
25 | } |
26 | EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); | |
27 | ||
b1bd055d MP |
28 | /** |
29 | * blk_set_stacking_limits - set default limits for stacking devices | |
30 | * @lim: the queue_limits structure to reset | |
31 | * | |
c490f226 CH |
32 | * Prepare queue limits for applying limits from underlying devices using |
33 | * blk_stack_limits(). | |
b1bd055d MP |
34 | */ |
35 | void blk_set_stacking_limits(struct queue_limits *lim) | |
36 | { | |
c490f226 CH |
37 | memset(lim, 0, sizeof(*lim)); |
38 | lim->logical_block_size = SECTOR_SIZE; | |
39 | lim->physical_block_size = SECTOR_SIZE; | |
40 | lim->io_min = SECTOR_SIZE; | |
41 | lim->discard_granularity = SECTOR_SIZE; | |
42 | lim->dma_alignment = SECTOR_SIZE - 1; | |
43 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; | |
b1bd055d MP |
44 | |
45 | /* Inherit limits from component devices */ | |
b1bd055d | 46 | lim->max_segments = USHRT_MAX; |
42c9cdfe | 47 | lim->max_discard_segments = USHRT_MAX; |
b1bd055d | 48 | lim->max_hw_sectors = UINT_MAX; |
d82ae52e | 49 | lim->max_segment_size = UINT_MAX; |
fe86cdce | 50 | lim->max_sectors = UINT_MAX; |
ca369d51 | 51 | lim->max_dev_sectors = UINT_MAX; |
a6f0788e | 52 | lim->max_write_zeroes_sectors = UINT_MAX; |
559218d4 | 53 | lim->max_hw_zone_append_sectors = UINT_MAX; |
4f563a64 | 54 | lim->max_user_discard_sectors = UINT_MAX; |
b1bd055d MP |
55 | } |
56 | EXPORT_SYMBOL(blk_set_stacking_limits); | |
57 | ||
73781b3b | 58 | void blk_apply_bdi_limits(struct backing_dev_info *bdi, |
b9947297 CH |
59 | struct queue_limits *lim) |
60 | { | |
61 | /* | |
62 | * For read-ahead of large files to be effective, we need to read ahead | |
63 | * at least twice the optimal I/O size. | |
7b720c72 CH |
64 | * |
65 | * There is no hardware limitation for the read-ahead size and the user | |
66 | * might have increased the read-ahead size through sysfs, so don't ever | |
67 | * decrease it. | |
b9947297 | 68 | */ |
7b720c72 CH |
69 | bdi->ra_pages = max3(bdi->ra_pages, |
70 | lim->io_opt * 2 / PAGE_SIZE, | |
71 | VM_READAHEAD_PAGES); | |
b9947297 CH |
72 | bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT; |
73 | } | |
74 | ||
d690cb8a CH |
75 | static int blk_validate_zoned_limits(struct queue_limits *lim) |
76 | { | |
b1fc937a | 77 | if (!(lim->features & BLK_FEAT_ZONED)) { |
d690cb8a CH |
78 | if (WARN_ON_ONCE(lim->max_open_zones) || |
79 | WARN_ON_ONCE(lim->max_active_zones) || | |
80 | WARN_ON_ONCE(lim->zone_write_granularity) || | |
81 | WARN_ON_ONCE(lim->max_zone_append_sectors)) | |
82 | return -EINVAL; | |
83 | return 0; | |
84 | } | |
85 | ||
86 | if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED))) | |
87 | return -EINVAL; | |
88 | ||
e21d12c7 DLM |
89 | /* |
90 | * Given that active zones include open zones, the maximum number of | |
91 | * open zones cannot be larger than the maximum number of active zones. | |
92 | */ | |
93 | if (lim->max_active_zones && | |
94 | lim->max_open_zones > lim->max_active_zones) | |
95 | return -EINVAL; | |
96 | ||
d690cb8a CH |
97 | if (lim->zone_write_granularity < lim->logical_block_size) |
98 | lim->zone_write_granularity = lim->logical_block_size; | |
99 | ||
559218d4 CH |
100 | /* |
101 | * The Zone Append size is limited by the maximum I/O size and the zone | |
102 | * size given that it can't span zones. | |
103 | * | |
104 | * If no max_hw_zone_append_sectors limit is provided, the block layer | |
105 | * will emulated it, else we're also bound by the hardware limit. | |
106 | */ | |
107 | lim->max_zone_append_sectors = | |
108 | min_not_zero(lim->max_hw_zone_append_sectors, | |
109 | min(lim->chunk_sectors, lim->max_hw_sectors)); | |
d690cb8a CH |
110 | return 0; |
111 | } | |
112 | ||
c6e56cf6 CH |
113 | static int blk_validate_integrity_limits(struct queue_limits *lim) |
114 | { | |
115 | struct blk_integrity *bi = &lim->integrity; | |
116 | ||
117 | if (!bi->tuple_size) { | |
118 | if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE || | |
119 | bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) { | |
120 | pr_warn("invalid PI settings.\n"); | |
121 | return -EINVAL; | |
122 | } | |
85f72925 | 123 | bi->flags |= BLK_INTEGRITY_NOGENERATE | BLK_INTEGRITY_NOVERIFY; |
c6e56cf6 CH |
124 | return 0; |
125 | } | |
126 | ||
127 | if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) { | |
128 | pr_warn("integrity support disabled.\n"); | |
129 | return -EINVAL; | |
130 | } | |
131 | ||
132 | if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE && | |
133 | (bi->flags & BLK_INTEGRITY_REF_TAG)) { | |
134 | pr_warn("ref tag not support without checksum.\n"); | |
135 | return -EINVAL; | |
136 | } | |
137 | ||
138 | if (!bi->interval_exp) | |
139 | bi->interval_exp = ilog2(lim->logical_block_size); | |
140 | ||
141 | return 0; | |
142 | } | |
143 | ||
9da3d1e9 JG |
144 | /* |
145 | * Returns max guaranteed bytes which we can fit in a bio. | |
146 | * | |
147 | * We request that an atomic_write is ITER_UBUF iov_iter (so a single vector), | |
148 | * so we assume that we can fit in at least PAGE_SIZE in a segment, apart from | |
149 | * the first and last segments. | |
150 | */ | |
3302f6f0 | 151 | static unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *lim) |
9da3d1e9 JG |
152 | { |
153 | unsigned int max_segments = min(BIO_MAX_VECS, lim->max_segments); | |
154 | unsigned int length; | |
155 | ||
156 | length = min(max_segments, 2) * lim->logical_block_size; | |
157 | if (max_segments > 2) | |
158 | length += (max_segments - 2) * PAGE_SIZE; | |
159 | ||
160 | return length; | |
161 | } | |
162 | ||
163 | static void blk_atomic_writes_update_limits(struct queue_limits *lim) | |
164 | { | |
165 | unsigned int unit_limit = min(lim->max_hw_sectors << SECTOR_SHIFT, | |
166 | blk_queue_max_guaranteed_bio(lim)); | |
167 | ||
168 | unit_limit = rounddown_pow_of_two(unit_limit); | |
169 | ||
170 | lim->atomic_write_max_sectors = | |
171 | min(lim->atomic_write_hw_max >> SECTOR_SHIFT, | |
172 | lim->max_hw_sectors); | |
173 | lim->atomic_write_unit_min = | |
174 | min(lim->atomic_write_hw_unit_min, unit_limit); | |
175 | lim->atomic_write_unit_max = | |
176 | min(lim->atomic_write_hw_unit_max, unit_limit); | |
177 | lim->atomic_write_boundary_sectors = | |
178 | lim->atomic_write_hw_boundary >> SECTOR_SHIFT; | |
179 | } | |
180 | ||
181 | static void blk_validate_atomic_write_limits(struct queue_limits *lim) | |
182 | { | |
9da3d1e9 JG |
183 | unsigned int boundary_sectors; |
184 | ||
6a7e17b2 JG |
185 | if (!(lim->features & BLK_FEAT_ATOMIC_WRITES)) |
186 | goto unsupported; | |
187 | ||
9da3d1e9 JG |
188 | if (!lim->atomic_write_hw_max) |
189 | goto unsupported; | |
190 | ||
d00eea91 JG |
191 | if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_min))) |
192 | goto unsupported; | |
193 | ||
194 | if (WARN_ON_ONCE(!is_power_of_2(lim->atomic_write_hw_unit_max))) | |
195 | goto unsupported; | |
196 | ||
197 | if (WARN_ON_ONCE(lim->atomic_write_hw_unit_min > | |
198 | lim->atomic_write_hw_unit_max)) | |
199 | goto unsupported; | |
200 | ||
201 | if (WARN_ON_ONCE(lim->atomic_write_hw_unit_max > | |
202 | lim->atomic_write_hw_max)) | |
203 | goto unsupported; | |
204 | ||
9da3d1e9 JG |
205 | boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT; |
206 | ||
207 | if (boundary_sectors) { | |
d00eea91 JG |
208 | if (WARN_ON_ONCE(lim->atomic_write_hw_max > |
209 | lim->atomic_write_hw_boundary)) | |
210 | goto unsupported; | |
9da3d1e9 JG |
211 | /* |
212 | * A feature of boundary support is that it disallows bios to | |
213 | * be merged which would result in a merged request which | |
214 | * crosses either a chunk sector or atomic write HW boundary, | |
215 | * even though chunk sectors may be just set for performance. | |
216 | * For simplicity, disallow atomic writes for a chunk sector | |
217 | * which is non-zero and smaller than atomic write HW boundary. | |
218 | * Furthermore, chunk sectors must be a multiple of atomic | |
219 | * write HW boundary. Otherwise boundary support becomes | |
220 | * complicated. | |
221 | * Devices which do not conform to these rules can be dealt | |
222 | * with if and when they show up. | |
223 | */ | |
8324bb75 | 224 | if (WARN_ON_ONCE(lim->chunk_sectors % boundary_sectors)) |
9da3d1e9 JG |
225 | goto unsupported; |
226 | ||
227 | /* | |
228 | * The boundary size just needs to be a multiple of unit_max | |
229 | * (and not necessarily a power-of-2), so this following check | |
230 | * could be relaxed in future. | |
231 | * Furthermore, if needed, unit_max could even be reduced so | |
232 | * that it is compliant with a !power-of-2 boundary. | |
233 | */ | |
234 | if (!is_power_of_2(boundary_sectors)) | |
235 | goto unsupported; | |
236 | } | |
237 | ||
238 | blk_atomic_writes_update_limits(lim); | |
239 | return; | |
240 | ||
241 | unsupported: | |
242 | lim->atomic_write_max_sectors = 0; | |
243 | lim->atomic_write_boundary_sectors = 0; | |
244 | lim->atomic_write_unit_min = 0; | |
245 | lim->atomic_write_unit_max = 0; | |
246 | } | |
247 | ||
d690cb8a CH |
248 | /* |
249 | * Check that the limits in lim are valid, initialize defaults for unset | |
250 | * values, and cap values based on others where needed. | |
251 | */ | |
470d2bc3 | 252 | int blk_validate_limits(struct queue_limits *lim) |
d690cb8a CH |
253 | { |
254 | unsigned int max_hw_sectors; | |
e993db2d | 255 | unsigned int logical_block_sectors; |
889c5706 | 256 | unsigned long seg_size; |
c6e56cf6 | 257 | int err; |
d690cb8a CH |
258 | |
259 | /* | |
260 | * Unless otherwise specified, default to 512 byte logical blocks and a | |
261 | * physical block size equal to the logical block size. | |
262 | */ | |
263 | if (!lim->logical_block_size) | |
264 | lim->logical_block_size = SECTOR_SIZE; | |
fe3d508b JG |
265 | else if (blk_validate_block_size(lim->logical_block_size)) { |
266 | pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size); | |
267 | return -EINVAL; | |
268 | } | |
d690cb8a CH |
269 | if (lim->physical_block_size < lim->logical_block_size) |
270 | lim->physical_block_size = lim->logical_block_size; | |
271 | ||
272 | /* | |
273 | * The minimum I/O size defaults to the physical block size unless | |
274 | * explicitly overridden. | |
275 | */ | |
276 | if (lim->io_min < lim->physical_block_size) | |
277 | lim->io_min = lim->physical_block_size; | |
278 | ||
9c0ba148 MP |
279 | /* |
280 | * The optimal I/O size may not be aligned to physical block size | |
281 | * (because it may be limited by dma engines which have no clue about | |
282 | * block size of the disks attached to them), so we round it down here. | |
283 | */ | |
284 | lim->io_opt = round_down(lim->io_opt, lim->physical_block_size); | |
285 | ||
d690cb8a CH |
286 | /* |
287 | * max_hw_sectors has a somewhat weird default for historical reason, | |
288 | * but driver really should set their own instead of relying on this | |
289 | * value. | |
290 | * | |
291 | * The block layer relies on the fact that every driver can | |
292 | * handle at lest a page worth of data per I/O, and needs the value | |
293 | * aligned to the logical block size. | |
294 | */ | |
295 | if (!lim->max_hw_sectors) | |
296 | lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; | |
297 | if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS)) | |
298 | return -EINVAL; | |
e993db2d HR |
299 | logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT; |
300 | if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors)) | |
301 | return -EINVAL; | |
d690cb8a | 302 | lim->max_hw_sectors = round_down(lim->max_hw_sectors, |
e993db2d | 303 | logical_block_sectors); |
d690cb8a CH |
304 | |
305 | /* | |
306 | * The actual max_sectors value is a complex beast and also takes the | |
307 | * max_dev_sectors value (set by SCSI ULPs) and a user configurable | |
308 | * value into account. The ->max_sectors value is always calculated | |
309 | * from these, so directly setting it won't have any effect. | |
310 | */ | |
311 | max_hw_sectors = min_not_zero(lim->max_hw_sectors, | |
312 | lim->max_dev_sectors); | |
313 | if (lim->max_user_sectors) { | |
889c5706 | 314 | if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE) |
d690cb8a CH |
315 | return -EINVAL; |
316 | lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors); | |
37105615 | 317 | } else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) { |
a2363464 CH |
318 | lim->max_sectors = |
319 | min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT); | |
f62e8edc | 320 | } else if (lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) { |
a2363464 CH |
321 | lim->max_sectors = |
322 | min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT); | |
d690cb8a CH |
323 | } else { |
324 | lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP); | |
325 | } | |
326 | lim->max_sectors = round_down(lim->max_sectors, | |
e993db2d | 327 | logical_block_sectors); |
d690cb8a CH |
328 | |
329 | /* | |
330 | * Random default for the maximum number of segments. Driver should not | |
331 | * rely on this and set their own. | |
332 | */ | |
333 | if (!lim->max_segments) | |
334 | lim->max_segments = BLK_MAX_SEGMENTS; | |
335 | ||
4f563a64 CH |
336 | lim->max_discard_sectors = |
337 | min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors); | |
338 | ||
d690cb8a CH |
339 | if (!lim->max_discard_segments) |
340 | lim->max_discard_segments = 1; | |
341 | ||
342 | if (lim->discard_granularity < lim->physical_block_size) | |
343 | lim->discard_granularity = lim->physical_block_size; | |
344 | ||
345 | /* | |
346 | * By default there is no limit on the segment boundary alignment, | |
347 | * but if there is one it can't be smaller than the page size as | |
348 | * that would break all the normal I/O patterns. | |
349 | */ | |
350 | if (!lim->seg_boundary_mask) | |
351 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; | |
889c5706 | 352 | if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1)) |
d690cb8a CH |
353 | return -EINVAL; |
354 | ||
d690cb8a | 355 | /* |
b561ea56 ML |
356 | * Stacking device may have both virtual boundary and max segment |
357 | * size limit, so allow this setting now, and long-term the two | |
358 | * might need to move out of stacking limits since we have immutable | |
359 | * bvec and lower layer bio splitting is supposed to handle the two | |
360 | * correctly. | |
d690cb8a | 361 | */ |
ffd379c1 ML |
362 | if (lim->virt_boundary_mask) { |
363 | if (!lim->max_segment_size) | |
364 | lim->max_segment_size = UINT_MAX; | |
365 | } else { | |
a3911966 CH |
366 | /* |
367 | * The maximum segment size has an odd historic 64k default that | |
368 | * drivers probably should override. Just like the I/O size we | |
369 | * require drivers to at least handle a full page per segment. | |
370 | */ | |
371 | if (!lim->max_segment_size) | |
372 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; | |
889c5706 | 373 | if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE)) |
a3911966 | 374 | return -EINVAL; |
d690cb8a CH |
375 | } |
376 | ||
889c5706 ML |
377 | /* setup min segment size for building new segment in fast path */ |
378 | if (lim->seg_boundary_mask > lim->max_segment_size - 1) | |
379 | seg_size = lim->max_segment_size; | |
380 | else | |
381 | seg_size = lim->seg_boundary_mask + 1; | |
382 | lim->min_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE); | |
383 | ||
d690cb8a CH |
384 | /* |
385 | * We require drivers to at least do logical block aligned I/O, but | |
386 | * historically could not check for that due to the separate calls | |
387 | * to set the limits. Once the transition is finished the check | |
388 | * below should be narrowed down to check the logical block size. | |
389 | */ | |
390 | if (!lim->dma_alignment) | |
391 | lim->dma_alignment = SECTOR_SIZE - 1; | |
392 | if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE)) | |
393 | return -EINVAL; | |
394 | ||
395 | if (lim->alignment_offset) { | |
396 | lim->alignment_offset &= (lim->physical_block_size - 1); | |
ec9b1cf0 | 397 | lim->flags &= ~BLK_FLAG_MISALIGNED; |
d690cb8a CH |
398 | } |
399 | ||
1122c0c1 CH |
400 | if (!(lim->features & BLK_FEAT_WRITE_CACHE)) |
401 | lim->features &= ~BLK_FEAT_FUA; | |
402 | ||
9da3d1e9 JG |
403 | blk_validate_atomic_write_limits(lim); |
404 | ||
c6e56cf6 CH |
405 | err = blk_validate_integrity_limits(lim); |
406 | if (err) | |
407 | return err; | |
d690cb8a CH |
408 | return blk_validate_zoned_limits(lim); |
409 | } | |
470d2bc3 | 410 | EXPORT_SYMBOL_GPL(blk_validate_limits); |
d690cb8a CH |
411 | |
412 | /* | |
413 | * Set the default limits for a newly allocated queue. @lim contains the | |
414 | * initial limits set by the driver, which could be no limit in which case | |
415 | * all fields are cleared to zero. | |
416 | */ | |
417 | int blk_set_default_limits(struct queue_limits *lim) | |
418 | { | |
4f563a64 CH |
419 | /* |
420 | * Most defaults are set by capping the bounds in blk_validate_limits, | |
421 | * but max_user_discard_sectors is special and needs an explicit | |
422 | * initialization to the max value here. | |
423 | */ | |
424 | lim->max_user_discard_sectors = UINT_MAX; | |
d690cb8a CH |
425 | return blk_validate_limits(lim); |
426 | } | |
427 | ||
428 | /** | |
429 | * queue_limits_commit_update - commit an atomic update of queue limits | |
430 | * @q: queue to update | |
431 | * @lim: limits to apply | |
432 | * | |
433 | * Apply the limits in @lim that were obtained from queue_limits_start_update() | |
9c96821b CH |
434 | * and updated by the caller to @q. The caller must have frozen the queue or |
435 | * ensure that there are no outstanding I/Os by other means. | |
d690cb8a CH |
436 | * |
437 | * Returns 0 if successful, else a negative error code. | |
438 | */ | |
439 | int queue_limits_commit_update(struct request_queue *q, | |
440 | struct queue_limits *lim) | |
d690cb8a | 441 | { |
c6e56cf6 | 442 | int error; |
d690cb8a | 443 | |
c6e56cf6 CH |
444 | error = blk_validate_limits(lim); |
445 | if (error) | |
446 | goto out_unlock; | |
447 | ||
448 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION | |
449 | if (q->crypto_profile && lim->integrity.tag_size) { | |
450 | pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n"); | |
451 | error = -EINVAL; | |
452 | goto out_unlock; | |
d690cb8a | 453 | } |
c6e56cf6 CH |
454 | #endif |
455 | ||
456 | q->limits = *lim; | |
457 | if (q->disk) | |
458 | blk_apply_bdi_limits(q->disk->bdi, lim); | |
459 | out_unlock: | |
d690cb8a CH |
460 | mutex_unlock(&q->limits_lock); |
461 | return error; | |
462 | } | |
463 | EXPORT_SYMBOL_GPL(queue_limits_commit_update); | |
464 | ||
aa427d7b CH |
465 | /** |
466 | * queue_limits_commit_update_frozen - commit an atomic update of queue limits | |
467 | * @q: queue to update | |
468 | * @lim: limits to apply | |
469 | * | |
470 | * Apply the limits in @lim that were obtained from queue_limits_start_update() | |
471 | * and updated with the new values by the caller to @q. Freezes the queue | |
472 | * before the update and unfreezes it after. | |
473 | * | |
474 | * Returns 0 if successful, else a negative error code. | |
475 | */ | |
476 | int queue_limits_commit_update_frozen(struct request_queue *q, | |
477 | struct queue_limits *lim) | |
478 | { | |
1e1a9cec | 479 | unsigned int memflags; |
aa427d7b CH |
480 | int ret; |
481 | ||
1e1a9cec | 482 | memflags = blk_mq_freeze_queue(q); |
aa427d7b | 483 | ret = queue_limits_commit_update(q, lim); |
1e1a9cec | 484 | blk_mq_unfreeze_queue(q, memflags); |
aa427d7b CH |
485 | |
486 | return ret; | |
487 | } | |
488 | EXPORT_SYMBOL_GPL(queue_limits_commit_update_frozen); | |
489 | ||
631d4efb | 490 | /** |
4c4ab8ae | 491 | * queue_limits_set - apply queue limits to queue |
631d4efb CH |
492 | * @q: queue to update |
493 | * @lim: limits to apply | |
494 | * | |
495 | * Apply the limits in @lim that were freshly initialized to @q. | |
496 | * To update existing limits use queue_limits_start_update() and | |
497 | * queue_limits_commit_update() instead. | |
498 | * | |
499 | * Returns 0 if successful, else a negative error code. | |
500 | */ | |
501 | int queue_limits_set(struct request_queue *q, struct queue_limits *lim) | |
502 | { | |
503 | mutex_lock(&q->limits_lock); | |
504 | return queue_limits_commit_update(q, lim); | |
505 | } | |
506 | EXPORT_SYMBOL_GPL(queue_limits_set); | |
507 | ||
aa261f20 | 508 | static int queue_limit_alignment_offset(const struct queue_limits *lim, |
89098b07 CH |
509 | sector_t sector) |
510 | { | |
511 | unsigned int granularity = max(lim->physical_block_size, lim->io_min); | |
512 | unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT) | |
513 | << SECTOR_SHIFT; | |
514 | ||
515 | return (granularity + lim->alignment_offset - alignment) % granularity; | |
516 | } | |
517 | ||
aa261f20 BVA |
518 | static unsigned int queue_limit_discard_alignment( |
519 | const struct queue_limits *lim, sector_t sector) | |
5c4b4a5c CH |
520 | { |
521 | unsigned int alignment, granularity, offset; | |
522 | ||
523 | if (!lim->max_discard_sectors) | |
524 | return 0; | |
525 | ||
526 | /* Why are these in bytes, not sectors? */ | |
527 | alignment = lim->discard_alignment >> SECTOR_SHIFT; | |
528 | granularity = lim->discard_granularity >> SECTOR_SHIFT; | |
5c4b4a5c CH |
529 | |
530 | /* Offset of the partition start in 'granularity' sectors */ | |
531 | offset = sector_div(sector, granularity); | |
532 | ||
533 | /* And why do we do this modulus *again* in blkdev_issue_discard()? */ | |
534 | offset = (granularity + alignment - offset) % granularity; | |
535 | ||
536 | /* Turn it back into bytes, gaah */ | |
537 | return offset << SECTOR_SHIFT; | |
538 | } | |
539 | ||
97f433c3 MP |
540 | static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs) |
541 | { | |
542 | sectors = round_down(sectors, lbs >> SECTOR_SHIFT); | |
543 | if (sectors < PAGE_SIZE >> SECTOR_SHIFT) | |
544 | sectors = PAGE_SIZE >> SECTOR_SHIFT; | |
545 | return sectors; | |
546 | } | |
547 | ||
d7f36dc4 JG |
548 | /* Check if second and later bottom devices are compliant */ |
549 | static bool blk_stack_atomic_writes_tail(struct queue_limits *t, | |
550 | struct queue_limits *b) | |
551 | { | |
552 | /* We're not going to support different boundary sizes.. yet */ | |
553 | if (t->atomic_write_hw_boundary != b->atomic_write_hw_boundary) | |
554 | return false; | |
555 | ||
556 | /* Can't support this */ | |
557 | if (t->atomic_write_hw_unit_min > b->atomic_write_hw_unit_max) | |
558 | return false; | |
559 | ||
560 | /* Or this */ | |
561 | if (t->atomic_write_hw_unit_max < b->atomic_write_hw_unit_min) | |
562 | return false; | |
563 | ||
564 | t->atomic_write_hw_max = min(t->atomic_write_hw_max, | |
565 | b->atomic_write_hw_max); | |
566 | t->atomic_write_hw_unit_min = max(t->atomic_write_hw_unit_min, | |
567 | b->atomic_write_hw_unit_min); | |
568 | t->atomic_write_hw_unit_max = min(t->atomic_write_hw_unit_max, | |
569 | b->atomic_write_hw_unit_max); | |
570 | return true; | |
571 | } | |
572 | ||
573 | /* Check for valid boundary of first bottom device */ | |
574 | static bool blk_stack_atomic_writes_boundary_head(struct queue_limits *t, | |
575 | struct queue_limits *b) | |
576 | { | |
577 | /* | |
578 | * Ensure atomic write boundary is aligned with chunk sectors. Stacked | |
579 | * devices store chunk sectors in t->io_min. | |
580 | */ | |
581 | if (b->atomic_write_hw_boundary > t->io_min && | |
582 | b->atomic_write_hw_boundary % t->io_min) | |
583 | return false; | |
584 | if (t->io_min > b->atomic_write_hw_boundary && | |
585 | t->io_min % b->atomic_write_hw_boundary) | |
586 | return false; | |
587 | ||
588 | t->atomic_write_hw_boundary = b->atomic_write_hw_boundary; | |
589 | return true; | |
590 | } | |
591 | ||
592 | ||
593 | /* Check stacking of first bottom device */ | |
594 | static bool blk_stack_atomic_writes_head(struct queue_limits *t, | |
595 | struct queue_limits *b) | |
596 | { | |
597 | if (b->atomic_write_hw_boundary && | |
598 | !blk_stack_atomic_writes_boundary_head(t, b)) | |
599 | return false; | |
600 | ||
601 | if (t->io_min <= SECTOR_SIZE) { | |
602 | /* No chunk sectors, so use bottom device values directly */ | |
603 | t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max; | |
604 | t->atomic_write_hw_unit_min = b->atomic_write_hw_unit_min; | |
605 | t->atomic_write_hw_max = b->atomic_write_hw_max; | |
606 | return true; | |
607 | } | |
608 | ||
609 | /* | |
610 | * Find values for limits which work for chunk size. | |
611 | * b->atomic_write_hw_unit_{min, max} may not be aligned with chunk | |
612 | * size (t->io_min), as chunk size is not restricted to a power-of-2. | |
613 | * So we need to find highest power-of-2 which works for the chunk | |
614 | * size. | |
615 | * As an example scenario, we could have b->unit_max = 16K and | |
616 | * t->io_min = 24K. For this case, reduce t->unit_max to a value | |
617 | * aligned with both limits, i.e. 8K in this example. | |
618 | */ | |
619 | t->atomic_write_hw_unit_max = b->atomic_write_hw_unit_max; | |
620 | while (t->io_min % t->atomic_write_hw_unit_max) | |
621 | t->atomic_write_hw_unit_max /= 2; | |
622 | ||
623 | t->atomic_write_hw_unit_min = min(b->atomic_write_hw_unit_min, | |
624 | t->atomic_write_hw_unit_max); | |
625 | t->atomic_write_hw_max = min(b->atomic_write_hw_max, t->io_min); | |
626 | ||
627 | return true; | |
628 | } | |
629 | ||
630 | static void blk_stack_atomic_writes_limits(struct queue_limits *t, | |
6564862d | 631 | struct queue_limits *b, sector_t start) |
d7f36dc4 | 632 | { |
6a7e17b2 | 633 | if (!(b->features & BLK_FEAT_ATOMIC_WRITES)) |
d7f36dc4 JG |
634 | goto unsupported; |
635 | ||
5d1f7ee7 | 636 | if (!b->atomic_write_hw_unit_min) |
d7f36dc4 JG |
637 | goto unsupported; |
638 | ||
6564862d JG |
639 | if (!blk_atomic_write_start_sect_aligned(start, b)) |
640 | goto unsupported; | |
641 | ||
d7f36dc4 JG |
642 | /* |
643 | * If atomic_write_hw_max is set, we have already stacked 1x bottom | |
644 | * device, so check for compliance. | |
645 | */ | |
646 | if (t->atomic_write_hw_max) { | |
647 | if (!blk_stack_atomic_writes_tail(t, b)) | |
648 | goto unsupported; | |
649 | return; | |
650 | } | |
651 | ||
652 | if (!blk_stack_atomic_writes_head(t, b)) | |
653 | goto unsupported; | |
654 | return; | |
655 | ||
656 | unsupported: | |
657 | t->atomic_write_hw_max = 0; | |
658 | t->atomic_write_hw_unit_max = 0; | |
659 | t->atomic_write_hw_unit_min = 0; | |
660 | t->atomic_write_hw_boundary = 0; | |
d7f36dc4 JG |
661 | } |
662 | ||
c72758f3 MP |
663 | /** |
664 | * blk_stack_limits - adjust queue_limits for stacked devices | |
81744ee4 MP |
665 | * @t: the stacking driver limits (top device) |
666 | * @b: the underlying queue limits (bottom, component device) | |
e03a72e1 | 667 | * @start: first data sector within component device |
c72758f3 MP |
668 | * |
669 | * Description: | |
81744ee4 MP |
670 | * This function is used by stacking drivers like MD and DM to ensure |
671 | * that all component devices have compatible block sizes and | |
672 | * alignments. The stacking driver must provide a queue_limits | |
673 | * struct (top) and then iteratively call the stacking function for | |
674 | * all component (bottom) devices. The stacking function will | |
675 | * attempt to combine the values and ensure proper alignment. | |
676 | * | |
677 | * Returns 0 if the top and bottom queue_limits are compatible. The | |
678 | * top device's block sizes and alignment offsets may be adjusted to | |
679 | * ensure alignment with the bottom device. If no compatible sizes | |
680 | * and alignments exist, -1 is returned and the resulting top | |
681 | * queue_limits will have the misaligned flag set to indicate that | |
682 | * the alignment_offset is undefined. | |
c72758f3 MP |
683 | */ |
684 | int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |
e03a72e1 | 685 | sector_t start) |
c72758f3 | 686 | { |
e03a72e1 | 687 | unsigned int top, bottom, alignment, ret = 0; |
86b37281 | 688 | |
1122c0c1 CH |
689 | t->features |= (b->features & BLK_FEAT_INHERIT_MASK); |
690 | ||
f76af42f | 691 | /* |
05df0166 CH |
692 | * Some feaures need to be supported both by the stacking driver and all |
693 | * underlying devices. The stacking driver sets these flags before | |
694 | * stacking the limits, and this will clear the flags if any of the | |
695 | * underlying devices does not support it. | |
f76af42f CH |
696 | */ |
697 | if (!(b->features & BLK_FEAT_NOWAIT)) | |
698 | t->features &= ~BLK_FEAT_NOWAIT; | |
8023e144 CH |
699 | if (!(b->features & BLK_FEAT_POLL)) |
700 | t->features &= ~BLK_FEAT_POLL; | |
f76af42f | 701 | |
ec9b1cf0 | 702 | t->flags |= (b->flags & BLK_FLAG_MISALIGNED); |
5543217b | 703 | |
c72758f3 | 704 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); |
e528bede CH |
705 | t->max_user_sectors = min_not_zero(t->max_user_sectors, |
706 | b->max_user_sectors); | |
c72758f3 | 707 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); |
ca369d51 | 708 | t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); |
a6f0788e CK |
709 | t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors, |
710 | b->max_write_zeroes_sectors); | |
559218d4 CH |
711 | t->max_hw_zone_append_sectors = min(t->max_hw_zone_append_sectors, |
712 | b->max_hw_zone_append_sectors); | |
c72758f3 MP |
713 | |
714 | t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, | |
715 | b->seg_boundary_mask); | |
03100aad KB |
716 | t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask, |
717 | b->virt_boundary_mask); | |
c72758f3 | 718 | |
8a78362c | 719 | t->max_segments = min_not_zero(t->max_segments, b->max_segments); |
1e739730 CH |
720 | t->max_discard_segments = min_not_zero(t->max_discard_segments, |
721 | b->max_discard_segments); | |
13f05c8d MP |
722 | t->max_integrity_segments = min_not_zero(t->max_integrity_segments, |
723 | b->max_integrity_segments); | |
c72758f3 MP |
724 | |
725 | t->max_segment_size = min_not_zero(t->max_segment_size, | |
726 | b->max_segment_size); | |
727 | ||
e03a72e1 | 728 | alignment = queue_limit_alignment_offset(b, start); |
9504e086 | 729 | |
81744ee4 MP |
730 | /* Bottom device has different alignment. Check that it is |
731 | * compatible with the current top alignment. | |
732 | */ | |
9504e086 MP |
733 | if (t->alignment_offset != alignment) { |
734 | ||
735 | top = max(t->physical_block_size, t->io_min) | |
736 | + t->alignment_offset; | |
81744ee4 | 737 | bottom = max(b->physical_block_size, b->io_min) + alignment; |
9504e086 | 738 | |
81744ee4 | 739 | /* Verify that top and bottom intervals line up */ |
b8839b8c | 740 | if (max(top, bottom) % min(top, bottom)) { |
ec9b1cf0 | 741 | t->flags |= BLK_FLAG_MISALIGNED; |
fe0b393f MP |
742 | ret = -1; |
743 | } | |
9504e086 MP |
744 | } |
745 | ||
c72758f3 MP |
746 | t->logical_block_size = max(t->logical_block_size, |
747 | b->logical_block_size); | |
748 | ||
749 | t->physical_block_size = max(t->physical_block_size, | |
750 | b->physical_block_size); | |
751 | ||
752 | t->io_min = max(t->io_min, b->io_min); | |
e9637415 | 753 | t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); |
c964d62f | 754 | t->dma_alignment = max(t->dma_alignment, b->dma_alignment); |
7e7986f9 MS |
755 | |
756 | /* Set non-power-of-2 compatible chunk_sectors boundary */ | |
757 | if (b->chunk_sectors) | |
758 | t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors); | |
9504e086 | 759 | |
81744ee4 | 760 | /* Physical block size a multiple of the logical block size? */ |
9504e086 MP |
761 | if (t->physical_block_size & (t->logical_block_size - 1)) { |
762 | t->physical_block_size = t->logical_block_size; | |
ec9b1cf0 | 763 | t->flags |= BLK_FLAG_MISALIGNED; |
fe0b393f | 764 | ret = -1; |
86b37281 MP |
765 | } |
766 | ||
81744ee4 | 767 | /* Minimum I/O a multiple of the physical block size? */ |
9504e086 MP |
768 | if (t->io_min & (t->physical_block_size - 1)) { |
769 | t->io_min = t->physical_block_size; | |
ec9b1cf0 | 770 | t->flags |= BLK_FLAG_MISALIGNED; |
fe0b393f | 771 | ret = -1; |
c72758f3 MP |
772 | } |
773 | ||
81744ee4 | 774 | /* Optimal I/O a multiple of the physical block size? */ |
9504e086 MP |
775 | if (t->io_opt & (t->physical_block_size - 1)) { |
776 | t->io_opt = 0; | |
ec9b1cf0 | 777 | t->flags |= BLK_FLAG_MISALIGNED; |
fe0b393f | 778 | ret = -1; |
9504e086 | 779 | } |
c72758f3 | 780 | |
22ada802 MS |
781 | /* chunk_sectors a multiple of the physical block size? */ |
782 | if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { | |
783 | t->chunk_sectors = 0; | |
ec9b1cf0 | 784 | t->flags |= BLK_FLAG_MISALIGNED; |
22ada802 MS |
785 | ret = -1; |
786 | } | |
787 | ||
81744ee4 | 788 | /* Find lowest common alignment_offset */ |
e9637415 | 789 | t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) |
b8839b8c | 790 | % max(t->physical_block_size, t->io_min); |
86b37281 | 791 | |
81744ee4 | 792 | /* Verify that new alignment_offset is on a logical block boundary */ |
fe0b393f | 793 | if (t->alignment_offset & (t->logical_block_size - 1)) { |
ec9b1cf0 | 794 | t->flags |= BLK_FLAG_MISALIGNED; |
fe0b393f MP |
795 | ret = -1; |
796 | } | |
c72758f3 | 797 | |
97f433c3 MP |
798 | t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size); |
799 | t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size); | |
800 | t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size); | |
801 | ||
9504e086 MP |
802 | /* Discard alignment and granularity */ |
803 | if (b->discard_granularity) { | |
e03a72e1 | 804 | alignment = queue_limit_discard_alignment(b, start); |
9504e086 | 805 | |
81744ee4 MP |
806 | t->max_discard_sectors = min_not_zero(t->max_discard_sectors, |
807 | b->max_discard_sectors); | |
0034af03 JA |
808 | t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors, |
809 | b->max_hw_discard_sectors); | |
9504e086 MP |
810 | t->discard_granularity = max(t->discard_granularity, |
811 | b->discard_granularity); | |
e9637415 | 812 | t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % |
8dd2cb7e | 813 | t->discard_granularity; |
9504e086 | 814 | } |
44abff2c CH |
815 | t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors, |
816 | b->max_secure_erase_sectors); | |
a805a4fa DLM |
817 | t->zone_write_granularity = max(t->zone_write_granularity, |
818 | b->zone_write_granularity); | |
b1fc937a | 819 | if (!(t->features & BLK_FEAT_ZONED)) { |
c8f6f88d DLM |
820 | t->zone_write_granularity = 0; |
821 | t->max_zone_append_sectors = 0; | |
822 | } | |
6564862d | 823 | blk_stack_atomic_writes_limits(t, b, start); |
d7f36dc4 | 824 | |
fe0b393f | 825 | return ret; |
c72758f3 | 826 | } |
5d85d324 | 827 | EXPORT_SYMBOL(blk_stack_limits); |
c72758f3 | 828 | |
c1373f1c CH |
829 | /** |
830 | * queue_limits_stack_bdev - adjust queue_limits for stacked devices | |
831 | * @t: the stacking driver limits (top device) | |
832 | * @bdev: the underlying block device (bottom) | |
833 | * @offset: offset to beginning of data within component device | |
834 | * @pfx: prefix to use for warnings logged | |
835 | * | |
836 | * Description: | |
837 | * This function is used by stacking drivers like MD and DM to ensure | |
838 | * that all component devices have compatible block sizes and | |
839 | * alignments. The stacking driver must provide a queue_limits | |
840 | * struct (top) and then iteratively call the stacking function for | |
841 | * all component (bottom) devices. The stacking function will | |
842 | * attempt to combine the values and ensure proper alignment. | |
843 | */ | |
844 | void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev, | |
845 | sector_t offset, const char *pfx) | |
846 | { | |
2f5a65ef | 847 | if (blk_stack_limits(t, bdev_limits(bdev), |
c1373f1c CH |
848 | get_start_sect(bdev) + offset)) |
849 | pr_notice("%s: Warning: Device %pg is misaligned\n", | |
850 | pfx, bdev); | |
851 | } | |
852 | EXPORT_SYMBOL_GPL(queue_limits_stack_bdev); | |
853 | ||
c6e56cf6 CH |
854 | /** |
855 | * queue_limits_stack_integrity - stack integrity profile | |
856 | * @t: target queue limits | |
857 | * @b: base queue limits | |
858 | * | |
859 | * Check if the integrity profile in the @b can be stacked into the | |
860 | * target @t. Stacking is possible if either: | |
861 | * | |
862 | * a) does not have any integrity information stacked into it yet | |
863 | * b) the integrity profile in @b is identical to the one in @t | |
864 | * | |
865 | * If @b can be stacked into @t, return %true. Else return %false and clear the | |
866 | * integrity information in @t. | |
867 | */ | |
868 | bool queue_limits_stack_integrity(struct queue_limits *t, | |
869 | struct queue_limits *b) | |
870 | { | |
871 | struct blk_integrity *ti = &t->integrity; | |
872 | struct blk_integrity *bi = &b->integrity; | |
873 | ||
874 | if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) | |
875 | return true; | |
876 | ||
677e332e AG |
877 | if (ti->flags & BLK_INTEGRITY_STACKED) { |
878 | if (ti->tuple_size != bi->tuple_size) | |
879 | goto incompatible; | |
880 | if (ti->interval_exp != bi->interval_exp) | |
881 | goto incompatible; | |
882 | if (ti->tag_size != bi->tag_size) | |
883 | goto incompatible; | |
884 | if (ti->csum_type != bi->csum_type) | |
885 | goto incompatible; | |
886 | if ((ti->flags & BLK_INTEGRITY_REF_TAG) != | |
887 | (bi->flags & BLK_INTEGRITY_REF_TAG)) | |
888 | goto incompatible; | |
889 | } else { | |
890 | ti->flags = BLK_INTEGRITY_STACKED; | |
891 | ti->flags |= (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) | | |
892 | (bi->flags & BLK_INTEGRITY_REF_TAG); | |
893 | ti->csum_type = bi->csum_type; | |
894 | ti->tuple_size = bi->tuple_size; | |
895 | ti->pi_offset = bi->pi_offset; | |
896 | ti->interval_exp = bi->interval_exp; | |
897 | ti->tag_size = bi->tag_size; | |
c6e56cf6 | 898 | } |
c6e56cf6 CH |
899 | return true; |
900 | ||
901 | incompatible: | |
902 | memset(ti, 0, sizeof(*ti)); | |
903 | return false; | |
904 | } | |
905 | EXPORT_SYMBOL_GPL(queue_limits_stack_integrity); | |
906 | ||
d278d4a8 JA |
907 | /** |
908 | * blk_set_queue_depth - tell the block layer about the device queue depth | |
909 | * @q: the request queue for the device | |
910 | * @depth: queue depth | |
911 | * | |
912 | */ | |
913 | void blk_set_queue_depth(struct request_queue *q, unsigned int depth) | |
914 | { | |
915 | q->queue_depth = depth; | |
9677a3e0 | 916 | rq_qos_queue_depth_changed(q); |
d278d4a8 JA |
917 | } |
918 | EXPORT_SYMBOL(blk_set_queue_depth); | |
919 | ||
89098b07 CH |
920 | int bdev_alignment_offset(struct block_device *bdev) |
921 | { | |
922 | struct request_queue *q = bdev_get_queue(bdev); | |
923 | ||
ec9b1cf0 | 924 | if (q->limits.flags & BLK_FLAG_MISALIGNED) |
89098b07 CH |
925 | return -1; |
926 | if (bdev_is_partition(bdev)) | |
927 | return queue_limit_alignment_offset(&q->limits, | |
928 | bdev->bd_start_sect); | |
929 | return q->limits.alignment_offset; | |
930 | } | |
931 | EXPORT_SYMBOL_GPL(bdev_alignment_offset); | |
5c4b4a5c CH |
932 | |
933 | unsigned int bdev_discard_alignment(struct block_device *bdev) | |
934 | { | |
935 | struct request_queue *q = bdev_get_queue(bdev); | |
936 | ||
937 | if (bdev_is_partition(bdev)) | |
938 | return queue_limit_discard_alignment(&q->limits, | |
939 | bdev->bd_start_sect); | |
940 | return q->limits.discard_alignment; | |
941 | } | |
942 | EXPORT_SYMBOL_GPL(bdev_discard_alignment); |