2 * Functions related to generic helpers functions
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
15 struct completion *wait;
18 static void bio_batch_end_io(struct bio *bio, int err)
20 struct bio_batch *bb = bio->bi_private;
22 if (err && (err != -EOPNOTSUPP))
23 clear_bit(BIO_UPTODATE, &bb->flags);
24 if (atomic_dec_and_test(&bb->done))
30 * blkdev_issue_discard - queue a discard
31 * @bdev: blockdev to issue discard for
32 * @sector: start sector
33 * @nr_sects: number of sectors to discard
34 * @gfp_mask: memory allocation flags (for bio_alloc)
35 * @flags: BLKDEV_IFL_* flags to control behaviour
38 * Issue a discard request for the sectors in question.
40 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
41 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
43 DECLARE_COMPLETION_ONSTACK(wait);
44 struct request_queue *q = bdev_get_queue(bdev);
45 int type = REQ_WRITE | REQ_DISCARD;
46 sector_t max_discard_sectors;
47 sector_t granularity, alignment;
55 if (!blk_queue_discard(q))
58 /* Zero-sector (unknown) and one-sector granularities are the same. */
59 granularity = max(q->limits.discard_granularity >> 9, 1U);
60 alignment = bdev_discard_alignment(bdev) >> 9;
61 alignment = sector_div(alignment, granularity);
64 * Ensure that max_discard_sectors is of the proper
65 * granularity, so that requests stay aligned after a split.
67 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
68 sector_div(max_discard_sectors, granularity);
69 max_discard_sectors *= granularity;
70 if (unlikely(!max_discard_sectors)) {
71 /* Avoid infinite loop below. Being cautious never hurts. */
75 if (flags & BLKDEV_DISCARD_SECURE) {
76 if (!blk_queue_secdiscard(q))
81 atomic_set(&bb.done, 1);
82 bb.flags = 1 << BIO_UPTODATE;
86 unsigned int req_sects;
87 sector_t end_sect, tmp;
89 bio = bio_alloc(gfp_mask, 1);
95 req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
98 * If splitting a request, and the next starting sector would be
99 * misaligned, stop the discard at the previous aligned sector.
101 end_sect = sector + req_sects;
103 if (req_sects < nr_sects &&
104 sector_div(tmp, granularity) != alignment) {
105 end_sect = end_sect - alignment;
106 sector_div(end_sect, granularity);
107 end_sect = end_sect * granularity + alignment;
108 req_sects = end_sect - sector;
111 bio->bi_sector = sector;
112 bio->bi_end_io = bio_batch_end_io;
114 bio->bi_private = &bb;
116 bio->bi_size = req_sects << 9;
117 nr_sects -= req_sects;
120 atomic_inc(&bb.done);
121 submit_bio(type, bio);
124 /* Wait for bios in-flight */
125 if (!atomic_dec_and_test(&bb.done))
126 wait_for_completion(&wait);
128 if (!test_bit(BIO_UPTODATE, &bb.flags))
133 EXPORT_SYMBOL(blkdev_issue_discard);
136 * blkdev_issue_write_same - queue a write same operation
137 * @bdev: target blockdev
138 * @sector: start sector
139 * @nr_sects: number of sectors to write
140 * @gfp_mask: memory allocation flags (for bio_alloc)
141 * @page: page containing data to write
144 * Issue a write same request for the sectors in question.
146 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
147 sector_t nr_sects, gfp_t gfp_mask,
150 DECLARE_COMPLETION_ONSTACK(wait);
151 struct request_queue *q = bdev_get_queue(bdev);
152 unsigned int max_write_same_sectors;
160 max_write_same_sectors = q->limits.max_write_same_sectors;
162 if (max_write_same_sectors == 0)
165 atomic_set(&bb.done, 1);
166 bb.flags = 1 << BIO_UPTODATE;
170 bio = bio_alloc(gfp_mask, 1);
176 bio->bi_sector = sector;
177 bio->bi_end_io = bio_batch_end_io;
179 bio->bi_private = &bb;
181 bio->bi_io_vec->bv_page = page;
182 bio->bi_io_vec->bv_offset = 0;
183 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
185 if (nr_sects > max_write_same_sectors) {
186 bio->bi_size = max_write_same_sectors << 9;
187 nr_sects -= max_write_same_sectors;
188 sector += max_write_same_sectors;
190 bio->bi_size = nr_sects << 9;
194 atomic_inc(&bb.done);
195 submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
198 /* Wait for bios in-flight */
199 if (!atomic_dec_and_test(&bb.done))
200 wait_for_completion(&wait);
202 if (!test_bit(BIO_UPTODATE, &bb.flags))
207 EXPORT_SYMBOL(blkdev_issue_write_same);
210 * blkdev_issue_zeroout - generate number of zero filed write bios
211 * @bdev: blockdev to issue
212 * @sector: start sector
213 * @nr_sects: number of sectors to write
214 * @gfp_mask: memory allocation flags (for bio_alloc)
217 * Generate and issue number of bios with zerofiled pages.
220 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
221 sector_t nr_sects, gfp_t gfp_mask)
227 DECLARE_COMPLETION_ONSTACK(wait);
229 atomic_set(&bb.done, 1);
230 bb.flags = 1 << BIO_UPTODATE;
234 while (nr_sects != 0) {
235 bio = bio_alloc(gfp_mask,
236 min(nr_sects, (sector_t)BIO_MAX_PAGES));
242 bio->bi_sector = sector;
244 bio->bi_end_io = bio_batch_end_io;
245 bio->bi_private = &bb;
247 while (nr_sects != 0) {
248 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
249 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
250 nr_sects -= ret >> 9;
256 atomic_inc(&bb.done);
257 submit_bio(WRITE, bio);
260 /* Wait for bios in-flight */
261 if (!atomic_dec_and_test(&bb.done))
262 wait_for_completion(&wait);
264 if (!test_bit(BIO_UPTODATE, &bb.flags))
265 /* One of bios in the batch was completed with error.*/
272 * blkdev_issue_zeroout - zero-fill a block range
273 * @bdev: blockdev to write
274 * @sector: start sector
275 * @nr_sects: number of sectors to write
276 * @gfp_mask: memory allocation flags (for bio_alloc)
279 * Generate and issue number of bios with zerofiled pages.
282 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
283 sector_t nr_sects, gfp_t gfp_mask)
285 if (bdev_write_same(bdev)) {
286 unsigned char bdn[BDEVNAME_SIZE];
288 if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
293 pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn);
296 return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
298 EXPORT_SYMBOL(blkdev_issue_zeroout);