Commit | Line | Data |
---|---|---|
cd82cca7 CH |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * Copyright (C) 1991, 1992 Linus Torvalds | |
4 | * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE | |
5 | * Copyright (C) 2016 - 2020 Christoph Hellwig | |
6 | */ | |
7 | #include <linux/init.h> | |
8 | #include <linux/mm.h> | |
9 | #include <linux/blkdev.h> | |
10 | #include <linux/buffer_head.h> | |
11 | #include <linux/mpage.h> | |
12 | #include <linux/uio.h> | |
13 | #include <linux/namei.h> | |
14 | #include <linux/task_io_accounting_ops.h> | |
15 | #include <linux/falloc.h> | |
16 | #include <linux/suspend.h> | |
f278eb3d | 17 | #include <linux/fs.h> |
487c607d | 18 | #include <linux/iomap.h> |
8581fd40 | 19 | #include <linux/module.h> |
cd82cca7 CH |
20 | #include "blk.h" |
21 | ||
fac7c6d5 | 22 | static inline struct inode *bdev_file_inode(struct file *file) |
cd82cca7 CH |
23 | { |
24 | return file->f_mapping->host; | |
25 | } | |
26 | ||
16458cf3 | 27 | static blk_opf_t dio_bio_write_op(struct kiocb *iocb) |
cd82cca7 | 28 | { |
16458cf3 | 29 | blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; |
cd82cca7 CH |
30 | |
31 | /* avoid the need for a I/O completion work item */ | |
91b94c5d | 32 | if (iocb_is_dsync(iocb)) |
16458cf3 BVA |
33 | opf |= REQ_FUA; |
34 | return opf; | |
cd82cca7 CH |
35 | } |
36 | ||
37fee2e4 KB |
37 | static bool blkdev_dio_unaligned(struct block_device *bdev, loff_t pos, |
38 | struct iov_iter *iter) | |
39 | { | |
b1a000d3 KB |
40 | return pos & (bdev_logical_block_size(bdev) - 1) || |
41 | !bdev_iter_is_aligned(bdev, iter); | |
37fee2e4 KB |
42 | } |
43 | ||
cd82cca7 CH |
44 | #define DIO_INLINE_BIO_VECS 4 |
45 | ||
cd82cca7 CH |
46 | static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, |
47 | struct iov_iter *iter, unsigned int nr_pages) | |
48 | { | |
4e762d86 | 49 | struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host); |
cd82cca7 CH |
50 | struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs; |
51 | loff_t pos = iocb->ki_pos; | |
52 | bool should_dirty = false; | |
53 | struct bio bio; | |
54 | ssize_t ret; | |
cd82cca7 | 55 | |
37fee2e4 | 56 | if (blkdev_dio_unaligned(bdev, pos, iter)) |
cd82cca7 CH |
57 | return -EINVAL; |
58 | ||
59 | if (nr_pages <= DIO_INLINE_BIO_VECS) | |
60 | vecs = inline_vecs; | |
61 | else { | |
62 | vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec), | |
63 | GFP_KERNEL); | |
64 | if (!vecs) | |
65 | return -ENOMEM; | |
66 | } | |
67 | ||
49add496 CH |
68 | if (iov_iter_rw(iter) == READ) { |
69 | bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ); | |
fcb14cb1 | 70 | if (user_backed_iter(iter)) |
49add496 CH |
71 | should_dirty = true; |
72 | } else { | |
73 | bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb)); | |
74 | } | |
6549a874 | 75 | bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT; |
44981351 | 76 | bio.bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint; |
cd82cca7 CH |
77 | bio.bi_ioprio = iocb->ki_ioprio; |
78 | ||
79 | ret = bio_iov_iter_get_pages(&bio, iter); | |
80 | if (unlikely(ret)) | |
81 | goto out; | |
82 | ret = bio.bi_iter.bi_size; | |
83 | ||
49add496 | 84 | if (iov_iter_rw(iter) == WRITE) |
cd82cca7 | 85 | task_io_account_write(ret); |
49add496 | 86 | |
cd82cca7 CH |
87 | if (iocb->ki_flags & IOCB_NOWAIT) |
88 | bio.bi_opf |= REQ_NOWAIT; | |
9650b453 ML |
89 | |
90 | submit_bio_wait(&bio); | |
cd82cca7 CH |
91 | |
92 | bio_release_pages(&bio, should_dirty); | |
93 | if (unlikely(bio.bi_status)) | |
94 | ret = blk_status_to_errno(bio.bi_status); | |
95 | ||
96 | out: | |
97 | if (vecs != inline_vecs) | |
98 | kfree(vecs); | |
99 | ||
100 | bio_uninit(&bio); | |
101 | ||
102 | return ret; | |
103 | } | |
104 | ||
09ce8744 | 105 | enum { |
e71aa913 PB |
106 | DIO_SHOULD_DIRTY = 1, |
107 | DIO_IS_SYNC = 2, | |
09ce8744 JA |
108 | }; |
109 | ||
cd82cca7 CH |
110 | struct blkdev_dio { |
111 | union { | |
112 | struct kiocb *iocb; | |
113 | struct task_struct *waiter; | |
114 | }; | |
115 | size_t size; | |
116 | atomic_t ref; | |
09ce8744 | 117 | unsigned int flags; |
6155631a | 118 | struct bio bio ____cacheline_aligned_in_smp; |
cd82cca7 CH |
119 | }; |
120 | ||
121 | static struct bio_set blkdev_dio_pool; | |
122 | ||
cd82cca7 CH |
123 | static void blkdev_bio_end_io(struct bio *bio) |
124 | { | |
125 | struct blkdev_dio *dio = bio->bi_private; | |
09ce8744 | 126 | bool should_dirty = dio->flags & DIO_SHOULD_DIRTY; |
cd82cca7 CH |
127 | |
128 | if (bio->bi_status && !dio->bio.bi_status) | |
129 | dio->bio.bi_status = bio->bi_status; | |
130 | ||
e71aa913 | 131 | if (atomic_dec_and_test(&dio->ref)) { |
09ce8744 | 132 | if (!(dio->flags & DIO_IS_SYNC)) { |
cd82cca7 CH |
133 | struct kiocb *iocb = dio->iocb; |
134 | ssize_t ret; | |
135 | ||
3e08773c CH |
136 | WRITE_ONCE(iocb->private, NULL); |
137 | ||
cd82cca7 CH |
138 | if (likely(!dio->bio.bi_status)) { |
139 | ret = dio->size; | |
140 | iocb->ki_pos += ret; | |
141 | } else { | |
142 | ret = blk_status_to_errno(dio->bio.bi_status); | |
143 | } | |
144 | ||
6b19b766 | 145 | dio->iocb->ki_complete(iocb, ret); |
e71aa913 | 146 | bio_put(&dio->bio); |
cd82cca7 CH |
147 | } else { |
148 | struct task_struct *waiter = dio->waiter; | |
149 | ||
150 | WRITE_ONCE(dio->waiter, NULL); | |
151 | blk_wake_io_task(waiter); | |
152 | } | |
153 | } | |
154 | ||
155 | if (should_dirty) { | |
156 | bio_check_pages_dirty(bio); | |
157 | } else { | |
158 | bio_release_pages(bio, false); | |
159 | bio_put(bio); | |
160 | } | |
161 | } | |
162 | ||
163 | static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, | |
164 | unsigned int nr_pages) | |
165 | { | |
4e762d86 | 166 | struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host); |
cd82cca7 CH |
167 | struct blk_plug plug; |
168 | struct blkdev_dio *dio; | |
169 | struct bio *bio; | |
cd82cca7 | 170 | bool is_read = (iov_iter_rw(iter) == READ), is_sync; |
16458cf3 | 171 | blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); |
cd82cca7 | 172 | loff_t pos = iocb->ki_pos; |
cd82cca7 CH |
173 | int ret = 0; |
174 | ||
37fee2e4 | 175 | if (blkdev_dio_unaligned(bdev, pos, iter)) |
cd82cca7 CH |
176 | return -EINVAL; |
177 | ||
0df71650 MS |
178 | if (iocb->ki_flags & IOCB_ALLOC_CACHE) |
179 | opf |= REQ_ALLOC_CACHE; | |
180 | bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL, | |
181 | &blkdev_dio_pool); | |
cd82cca7 | 182 | dio = container_of(bio, struct blkdev_dio, bio); |
e71aa913 PB |
183 | atomic_set(&dio->ref, 1); |
184 | /* | |
185 | * Grab an extra reference to ensure the dio structure which is embedded | |
186 | * into the first bio stays around. | |
187 | */ | |
188 | bio_get(bio); | |
189 | ||
09ce8744 JA |
190 | is_sync = is_sync_kiocb(iocb); |
191 | if (is_sync) { | |
192 | dio->flags = DIO_IS_SYNC; | |
cd82cca7 | 193 | dio->waiter = current; |
cd82cca7 | 194 | } else { |
09ce8744 | 195 | dio->flags = 0; |
cd82cca7 CH |
196 | dio->iocb = iocb; |
197 | } | |
198 | ||
199 | dio->size = 0; | |
fcb14cb1 | 200 | if (is_read && user_backed_iter(iter)) |
09ce8744 | 201 | dio->flags |= DIO_SHOULD_DIRTY; |
cd82cca7 | 202 | |
25d207dc | 203 | blk_start_plug(&plug); |
cd82cca7 CH |
204 | |
205 | for (;;) { | |
6549a874 | 206 | bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; |
44981351 | 207 | bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint; |
cd82cca7 CH |
208 | bio->bi_private = dio; |
209 | bio->bi_end_io = blkdev_bio_end_io; | |
210 | bio->bi_ioprio = iocb->ki_ioprio; | |
211 | ||
212 | ret = bio_iov_iter_get_pages(bio, iter); | |
213 | if (unlikely(ret)) { | |
214 | bio->bi_status = BLK_STS_IOERR; | |
215 | bio_endio(bio); | |
216 | break; | |
217 | } | |
67d59247 JA |
218 | if (iocb->ki_flags & IOCB_NOWAIT) { |
219 | /* | |
220 | * This is nonblocking IO, and we need to allocate | |
221 | * another bio if we have data left to map. As we | |
222 | * cannot guarantee that one of the sub bios will not | |
223 | * fail getting issued FOR NOWAIT and as error results | |
224 | * are coalesced across all of them, be safe and ask for | |
225 | * a retry of this from blocking context. | |
226 | */ | |
227 | if (unlikely(iov_iter_count(iter))) { | |
228 | bio_release_pages(bio, false); | |
229 | bio_clear_flag(bio, BIO_REFFED); | |
230 | bio_put(bio); | |
231 | blk_finish_plug(&plug); | |
232 | return -EAGAIN; | |
233 | } | |
234 | bio->bi_opf |= REQ_NOWAIT; | |
235 | } | |
cd82cca7 CH |
236 | |
237 | if (is_read) { | |
09ce8744 | 238 | if (dio->flags & DIO_SHOULD_DIRTY) |
cd82cca7 CH |
239 | bio_set_pages_dirty(bio); |
240 | } else { | |
cd82cca7 CH |
241 | task_io_account_write(bio->bi_iter.bi_size); |
242 | } | |
cd82cca7 CH |
243 | dio->size += bio->bi_iter.bi_size; |
244 | pos += bio->bi_iter.bi_size; | |
245 | ||
246 | nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS); | |
247 | if (!nr_pages) { | |
3e08773c | 248 | submit_bio(bio); |
cd82cca7 CH |
249 | break; |
250 | } | |
e71aa913 | 251 | atomic_inc(&dio->ref); |
cd82cca7 | 252 | submit_bio(bio); |
07888c66 | 253 | bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL); |
cd82cca7 CH |
254 | } |
255 | ||
25d207dc | 256 | blk_finish_plug(&plug); |
cd82cca7 CH |
257 | |
258 | if (!is_sync) | |
259 | return -EIOCBQUEUED; | |
260 | ||
261 | for (;;) { | |
262 | set_current_state(TASK_UNINTERRUPTIBLE); | |
263 | if (!READ_ONCE(dio->waiter)) | |
264 | break; | |
25d207dc | 265 | blk_io_schedule(); |
cd82cca7 CH |
266 | } |
267 | __set_current_state(TASK_RUNNING); | |
268 | ||
269 | if (!ret) | |
270 | ret = blk_status_to_errno(dio->bio.bi_status); | |
271 | if (likely(!ret)) | |
272 | ret = dio->size; | |
273 | ||
274 | bio_put(&dio->bio); | |
275 | return ret; | |
276 | } | |
277 | ||
54a88eb8 PB |
278 | static void blkdev_bio_end_io_async(struct bio *bio) |
279 | { | |
280 | struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio); | |
281 | struct kiocb *iocb = dio->iocb; | |
282 | ssize_t ret; | |
283 | ||
bb49c6fa SG |
284 | WRITE_ONCE(iocb->private, NULL); |
285 | ||
54a88eb8 PB |
286 | if (likely(!bio->bi_status)) { |
287 | ret = dio->size; | |
288 | iocb->ki_pos += ret; | |
289 | } else { | |
290 | ret = blk_status_to_errno(bio->bi_status); | |
291 | } | |
292 | ||
b6773cdb | 293 | iocb->ki_complete(iocb, ret); |
54a88eb8 PB |
294 | |
295 | if (dio->flags & DIO_SHOULD_DIRTY) { | |
296 | bio_check_pages_dirty(bio); | |
297 | } else { | |
298 | bio_release_pages(bio, false); | |
299 | bio_put(bio); | |
300 | } | |
301 | } | |
302 | ||
303 | static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb, | |
304 | struct iov_iter *iter, | |
305 | unsigned int nr_pages) | |
306 | { | |
4e762d86 | 307 | struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host); |
b77c88c2 | 308 | bool is_read = iov_iter_rw(iter) == READ; |
16458cf3 | 309 | blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); |
54a88eb8 PB |
310 | struct blkdev_dio *dio; |
311 | struct bio *bio; | |
312 | loff_t pos = iocb->ki_pos; | |
313 | int ret = 0; | |
314 | ||
37fee2e4 | 315 | if (blkdev_dio_unaligned(bdev, pos, iter)) |
54a88eb8 PB |
316 | return -EINVAL; |
317 | ||
0df71650 MS |
318 | if (iocb->ki_flags & IOCB_ALLOC_CACHE) |
319 | opf |= REQ_ALLOC_CACHE; | |
320 | bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL, | |
321 | &blkdev_dio_pool); | |
54a88eb8 PB |
322 | dio = container_of(bio, struct blkdev_dio, bio); |
323 | dio->flags = 0; | |
324 | dio->iocb = iocb; | |
54a88eb8 | 325 | bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; |
44981351 | 326 | bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint; |
54a88eb8 PB |
327 | bio->bi_end_io = blkdev_bio_end_io_async; |
328 | bio->bi_ioprio = iocb->ki_ioprio; | |
329 | ||
1bb6b810 PB |
330 | if (iov_iter_is_bvec(iter)) { |
331 | /* | |
332 | * Users don't rely on the iterator being in any particular | |
333 | * state for async I/O returning -EIOCBQUEUED, hence we can | |
334 | * avoid expensive iov_iter_advance(). Bypass | |
335 | * bio_iov_iter_get_pages() and set the bvec directly. | |
336 | */ | |
337 | bio_iov_bvec_set(bio, iter); | |
338 | } else { | |
339 | ret = bio_iov_iter_get_pages(bio, iter); | |
340 | if (unlikely(ret)) { | |
75feae73 | 341 | bio_put(bio); |
1bb6b810 PB |
342 | return ret; |
343 | } | |
54a88eb8 PB |
344 | } |
345 | dio->size = bio->bi_iter.bi_size; | |
346 | ||
b77c88c2 | 347 | if (is_read) { |
fcb14cb1 | 348 | if (user_backed_iter(iter)) { |
54a88eb8 PB |
349 | dio->flags |= DIO_SHOULD_DIRTY; |
350 | bio_set_pages_dirty(bio); | |
351 | } | |
352 | } else { | |
54a88eb8 PB |
353 | task_io_account_write(bio->bi_iter.bi_size); |
354 | } | |
355 | ||
2bc05769 JA |
356 | if (iocb->ki_flags & IOCB_NOWAIT) |
357 | bio->bi_opf |= REQ_NOWAIT; | |
358 | ||
54a88eb8 | 359 | if (iocb->ki_flags & IOCB_HIPRI) { |
2bc05769 | 360 | bio->bi_opf |= REQ_POLLED; |
54a88eb8 PB |
361 | submit_bio(bio); |
362 | WRITE_ONCE(iocb->private, bio); | |
363 | } else { | |
364 | submit_bio(bio); | |
365 | } | |
366 | return -EIOCBQUEUED; | |
367 | } | |
368 | ||
cd82cca7 CH |
369 | static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter) |
370 | { | |
371 | unsigned int nr_pages; | |
372 | ||
373 | if (!iov_iter_count(iter)) | |
374 | return 0; | |
375 | ||
376 | nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1); | |
54a88eb8 PB |
377 | if (likely(nr_pages <= BIO_MAX_VECS)) { |
378 | if (is_sync_kiocb(iocb)) | |
379 | return __blkdev_direct_IO_simple(iocb, iter, nr_pages); | |
380 | return __blkdev_direct_IO_async(iocb, iter, nr_pages); | |
381 | } | |
cd82cca7 CH |
382 | return __blkdev_direct_IO(iocb, iter, bio_max_segs(nr_pages)); |
383 | } | |
384 | ||
487c607d CH |
385 | static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length, |
386 | unsigned int flags, struct iomap *iomap, struct iomap *srcmap) | |
387 | { | |
388 | struct block_device *bdev = I_BDEV(inode); | |
389 | loff_t isize = i_size_read(inode); | |
390 | ||
391 | iomap->bdev = bdev; | |
392 | iomap->offset = ALIGN_DOWN(offset, bdev_logical_block_size(bdev)); | |
393 | if (iomap->offset >= isize) | |
394 | return -EIO; | |
395 | iomap->type = IOMAP_MAPPED; | |
396 | iomap->addr = iomap->offset; | |
397 | iomap->length = isize - iomap->offset; | |
925c86a1 | 398 | iomap->flags |= IOMAP_F_BUFFER_HEAD; /* noop for !CONFIG_BUFFER_HEAD */ |
487c607d CH |
399 | return 0; |
400 | } | |
401 | ||
402 | static const struct iomap_ops blkdev_iomap_ops = { | |
403 | .iomap_begin = blkdev_iomap_begin, | |
404 | }; | |
405 | ||
925c86a1 CH |
406 | #ifdef CONFIG_BUFFER_HEAD |
407 | static int blkdev_get_block(struct inode *inode, sector_t iblock, | |
408 | struct buffer_head *bh, int create) | |
409 | { | |
410 | bh->b_bdev = I_BDEV(inode); | |
411 | bh->b_blocknr = iblock; | |
412 | set_buffer_mapped(bh); | |
413 | return 0; | |
414 | } | |
415 | ||
17bf23a9 MWO |
416 | /* |
417 | * We cannot call mpage_writepages() as it does not take the buffer lock. | |
418 | * We must use block_write_full_folio() directly which holds the buffer | |
419 | * lock. The buffer lock provides the synchronisation with writeback | |
420 | * that filesystems rely on when they use the blockdev's mapping. | |
421 | */ | |
422 | static int blkdev_writepages(struct address_space *mapping, | |
423 | struct writeback_control *wbc) | |
cd82cca7 | 424 | { |
17bf23a9 MWO |
425 | struct blk_plug plug; |
426 | int err; | |
427 | ||
428 | blk_start_plug(&plug); | |
429 | err = write_cache_pages(mapping, wbc, block_write_full_folio, | |
430 | blkdev_get_block); | |
431 | blk_finish_plug(&plug); | |
432 | ||
433 | return err; | |
cd82cca7 CH |
434 | } |
435 | ||
2c69e205 | 436 | static int blkdev_read_folio(struct file *file, struct folio *folio) |
cd82cca7 | 437 | { |
2c69e205 | 438 | return block_read_full_folio(folio, blkdev_get_block); |
cd82cca7 CH |
439 | } |
440 | ||
441 | static void blkdev_readahead(struct readahead_control *rac) | |
442 | { | |
443 | mpage_readahead(rac, blkdev_get_block); | |
444 | } | |
445 | ||
446 | static int blkdev_write_begin(struct file *file, struct address_space *mapping, | |
9d6b0cd7 | 447 | loff_t pos, unsigned len, struct page **pagep, void **fsdata) |
cd82cca7 | 448 | { |
b3992d1e | 449 | return block_write_begin(mapping, pos, len, pagep, blkdev_get_block); |
cd82cca7 CH |
450 | } |
451 | ||
452 | static int blkdev_write_end(struct file *file, struct address_space *mapping, | |
453 | loff_t pos, unsigned len, unsigned copied, struct page *page, | |
454 | void *fsdata) | |
455 | { | |
456 | int ret; | |
457 | ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); | |
458 | ||
459 | unlock_page(page); | |
460 | put_page(page); | |
461 | ||
462 | return ret; | |
463 | } | |
464 | ||
cd82cca7 | 465 | const struct address_space_operations def_blk_aops = { |
e621900a | 466 | .dirty_folio = block_dirty_folio, |
7ba13abb | 467 | .invalidate_folio = block_invalidate_folio, |
2c69e205 | 468 | .read_folio = blkdev_read_folio, |
cd82cca7 | 469 | .readahead = blkdev_readahead, |
17bf23a9 | 470 | .writepages = blkdev_writepages, |
cd82cca7 CH |
471 | .write_begin = blkdev_write_begin, |
472 | .write_end = blkdev_write_end, | |
67235182 | 473 | .migrate_folio = buffer_migrate_folio_norefs, |
cd82cca7 CH |
474 | .is_dirty_writeback = buffer_check_dirty_writeback, |
475 | }; | |
925c86a1 CH |
476 | #else /* CONFIG_BUFFER_HEAD */ |
477 | static int blkdev_read_folio(struct file *file, struct folio *folio) | |
478 | { | |
479 | return iomap_read_folio(folio, &blkdev_iomap_ops); | |
480 | } | |
481 | ||
482 | static void blkdev_readahead(struct readahead_control *rac) | |
483 | { | |
484 | iomap_readahead(rac, &blkdev_iomap_ops); | |
485 | } | |
486 | ||
487 | static int blkdev_map_blocks(struct iomap_writepage_ctx *wpc, | |
19871b5c | 488 | struct inode *inode, loff_t offset, unsigned int len) |
925c86a1 CH |
489 | { |
490 | loff_t isize = i_size_read(inode); | |
491 | ||
492 | if (WARN_ON_ONCE(offset >= isize)) | |
493 | return -EIO; | |
494 | if (offset >= wpc->iomap.offset && | |
495 | offset < wpc->iomap.offset + wpc->iomap.length) | |
496 | return 0; | |
497 | return blkdev_iomap_begin(inode, offset, isize - offset, | |
498 | IOMAP_WRITE, &wpc->iomap, NULL); | |
499 | } | |
500 | ||
501 | static const struct iomap_writeback_ops blkdev_writeback_ops = { | |
502 | .map_blocks = blkdev_map_blocks, | |
503 | }; | |
504 | ||
505 | static int blkdev_writepages(struct address_space *mapping, | |
506 | struct writeback_control *wbc) | |
507 | { | |
508 | struct iomap_writepage_ctx wpc = { }; | |
509 | ||
510 | return iomap_writepages(mapping, wbc, &wpc, &blkdev_writeback_ops); | |
511 | } | |
512 | ||
513 | const struct address_space_operations def_blk_aops = { | |
514 | .dirty_folio = filemap_dirty_folio, | |
515 | .release_folio = iomap_release_folio, | |
516 | .invalidate_folio = iomap_invalidate_folio, | |
517 | .read_folio = blkdev_read_folio, | |
518 | .readahead = blkdev_readahead, | |
519 | .writepages = blkdev_writepages, | |
520 | .is_partially_uptodate = iomap_is_partially_uptodate, | |
af7628d6 | 521 | .error_remove_folio = generic_error_remove_folio, |
925c86a1 CH |
522 | .migrate_folio = filemap_migrate_folio, |
523 | }; | |
524 | #endif /* CONFIG_BUFFER_HEAD */ | |
cd82cca7 CH |
525 | |
526 | /* | |
527 | * for a block special file file_inode(file)->i_size is zero | |
528 | * so we compute the size by hand (just as in block_read/write above) | |
529 | */ | |
530 | static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence) | |
531 | { | |
532 | struct inode *bd_inode = bdev_file_inode(file); | |
533 | loff_t retval; | |
534 | ||
535 | inode_lock(bd_inode); | |
536 | retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode)); | |
537 | inode_unlock(bd_inode); | |
538 | return retval; | |
539 | } | |
540 | ||
541 | static int blkdev_fsync(struct file *filp, loff_t start, loff_t end, | |
542 | int datasync) | |
543 | { | |
4e762d86 | 544 | struct block_device *bdev = I_BDEV(filp->f_mapping->host); |
cd82cca7 CH |
545 | int error; |
546 | ||
547 | error = file_write_and_wait_range(filp, start, end); | |
548 | if (error) | |
549 | return error; | |
550 | ||
551 | /* | |
552 | * There is no need to serialise calls to blkdev_issue_flush with | |
553 | * i_mutex and doing so causes performance issues with concurrent | |
554 | * O_SYNC writers to a block device. | |
555 | */ | |
556 | error = blkdev_issue_flush(bdev); | |
557 | if (error == -EOPNOTSUPP) | |
558 | error = 0; | |
559 | ||
560 | return error; | |
561 | } | |
562 | ||
841dd789 JK |
563 | /** |
564 | * file_to_blk_mode - get block open flags from file flags | |
565 | * @file: file whose open flags should be converted | |
566 | * | |
567 | * Look at file open flags and generate corresponding block open flags from | |
568 | * them. The function works both for file just being open (e.g. during ->open | |
569 | * callback) and for file that is already open. This is actually non-trivial | |
570 | * (see comment in the function). | |
571 | */ | |
05bdb996 CH |
572 | blk_mode_t file_to_blk_mode(struct file *file) |
573 | { | |
574 | blk_mode_t mode = 0; | |
575 | ||
576 | if (file->f_mode & FMODE_READ) | |
577 | mode |= BLK_OPEN_READ; | |
578 | if (file->f_mode & FMODE_WRITE) | |
579 | mode |= BLK_OPEN_WRITE; | |
841dd789 | 580 | /* |
ab838b3f CB |
581 | * do_dentry_open() clears O_EXCL from f_flags, use file->private_data |
582 | * to determine whether the open was exclusive for already open files. | |
841dd789 | 583 | */ |
ab838b3f CB |
584 | if (file->private_data) |
585 | mode |= BLK_OPEN_EXCL; | |
841dd789 | 586 | else if (file->f_flags & O_EXCL) |
05bdb996 CH |
587 | mode |= BLK_OPEN_EXCL; |
588 | if (file->f_flags & O_NDELAY) | |
589 | mode |= BLK_OPEN_NDELAY; | |
590 | ||
591 | /* | |
592 | * If all bits in O_ACCMODE set (aka O_RDWR | O_WRONLY), the floppy | |
593 | * driver has historically allowed ioctls as if the file was opened for | |
594 | * writing, but does not allow and actual reads or writes. | |
595 | */ | |
596 | if ((file->f_flags & O_ACCMODE) == (O_RDWR | O_WRONLY)) | |
597 | mode |= BLK_OPEN_WRITE_IOCTL; | |
598 | ||
599 | return mode; | |
600 | } | |
601 | ||
cd82cca7 CH |
602 | static int blkdev_open(struct inode *inode, struct file *filp) |
603 | { | |
a56aefca | 604 | struct block_device *bdev; |
841dd789 | 605 | blk_mode_t mode; |
a56aefca | 606 | int ret; |
cd82cca7 | 607 | |
841dd789 | 608 | mode = file_to_blk_mode(filp); |
ab838b3f CB |
609 | /* Use the file as the holder. */ |
610 | if (mode & BLK_OPEN_EXCL) | |
611 | filp->private_data = filp; | |
612 | ret = bdev_permission(inode->i_rdev, mode, filp->private_data); | |
a56aefca CB |
613 | if (ret) |
614 | return ret; | |
615 | ||
616 | bdev = blkdev_get_no_open(inode->i_rdev); | |
617 | if (!bdev) | |
618 | return -ENXIO; | |
619 | ||
ab838b3f | 620 | ret = bdev_open(bdev, mode, filp->private_data, NULL, filp); |
a56aefca CB |
621 | if (ret) |
622 | blkdev_put_no_open(bdev); | |
623 | return ret; | |
cd82cca7 CH |
624 | } |
625 | ||
7ee34cbc | 626 | static int blkdev_release(struct inode *inode, struct file *filp) |
cd82cca7 | 627 | { |
ab838b3f | 628 | bdev_release(filp); |
cd82cca7 CH |
629 | return 0; |
630 | } | |
631 | ||
727cfe97 CH |
632 | static ssize_t |
633 | blkdev_direct_write(struct kiocb *iocb, struct iov_iter *from) | |
634 | { | |
635 | size_t count = iov_iter_count(from); | |
636 | ssize_t written; | |
637 | ||
638 | written = kiocb_invalidate_pages(iocb, count); | |
639 | if (written) { | |
640 | if (written == -EBUSY) | |
641 | return 0; | |
642 | return written; | |
643 | } | |
644 | ||
645 | written = blkdev_direct_IO(iocb, from); | |
646 | if (written > 0) { | |
647 | kiocb_invalidate_post_direct_write(iocb, count); | |
648 | iocb->ki_pos += written; | |
649 | count -= written; | |
650 | } | |
651 | if (written != -EIOCBQUEUED) | |
652 | iov_iter_revert(from, count - iov_iter_count(from)); | |
653 | return written; | |
654 | } | |
655 | ||
487c607d CH |
656 | static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from) |
657 | { | |
658 | return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops); | |
659 | } | |
660 | ||
cd82cca7 CH |
661 | /* |
662 | * Write data to the block device. Only intended for the block device itself | |
663 | * and the raw driver which basically is a fake block device. | |
664 | * | |
665 | * Does not take i_mutex for the write and thus is not for general purpose | |
666 | * use. | |
667 | */ | |
668 | static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) | |
669 | { | |
727cfe97 CH |
670 | struct file *file = iocb->ki_filp; |
671 | struct block_device *bdev = I_BDEV(file->f_mapping->host); | |
fac7c6d5 | 672 | struct inode *bd_inode = bdev->bd_inode; |
138c1a38 | 673 | loff_t size = bdev_nr_bytes(bdev); |
cd82cca7 CH |
674 | size_t shorted = 0; |
675 | ssize_t ret; | |
676 | ||
fac7c6d5 | 677 | if (bdev_read_only(bdev)) |
cd82cca7 CH |
678 | return -EPERM; |
679 | ||
680 | if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev)) | |
681 | return -ETXTBSY; | |
682 | ||
683 | if (!iov_iter_count(from)) | |
684 | return 0; | |
685 | ||
686 | if (iocb->ki_pos >= size) | |
687 | return -ENOSPC; | |
688 | ||
689 | if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT) | |
690 | return -EOPNOTSUPP; | |
691 | ||
692 | size -= iocb->ki_pos; | |
693 | if (iov_iter_count(from) > size) { | |
694 | shorted = iov_iter_count(from) - size; | |
695 | iov_iter_truncate(from, size); | |
696 | } | |
697 | ||
727cfe97 CH |
698 | ret = file_update_time(file); |
699 | if (ret) | |
700 | return ret; | |
701 | ||
702 | if (iocb->ki_flags & IOCB_DIRECT) { | |
703 | ret = blkdev_direct_write(iocb, from); | |
704 | if (ret >= 0 && iov_iter_count(from)) | |
705 | ret = direct_write_fallback(iocb, from, ret, | |
487c607d | 706 | blkdev_buffered_write(iocb, from)); |
727cfe97 | 707 | } else { |
487c607d | 708 | ret = blkdev_buffered_write(iocb, from); |
727cfe97 CH |
709 | } |
710 | ||
cd82cca7 CH |
711 | if (ret > 0) |
712 | ret = generic_write_sync(iocb, ret); | |
713 | iov_iter_reexpand(from, iov_iter_count(from) + shorted); | |
cd82cca7 CH |
714 | return ret; |
715 | } | |
716 | ||
717 | static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) | |
718 | { | |
4e762d86 | 719 | struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host); |
138c1a38 | 720 | loff_t size = bdev_nr_bytes(bdev); |
cd82cca7 CH |
721 | loff_t pos = iocb->ki_pos; |
722 | size_t shorted = 0; | |
ceaa7625 | 723 | ssize_t ret = 0; |
3e1f941d | 724 | size_t count; |
cd82cca7 | 725 | |
3e1f941d | 726 | if (unlikely(pos + iov_iter_count(to) > size)) { |
6450fe1f PB |
727 | if (pos >= size) |
728 | return 0; | |
729 | size -= pos; | |
3e1f941d ID |
730 | shorted = iov_iter_count(to) - size; |
731 | iov_iter_truncate(to, size); | |
cd82cca7 CH |
732 | } |
733 | ||
3e1f941d ID |
734 | count = iov_iter_count(to); |
735 | if (!count) | |
736 | goto reexpand; /* skip atime */ | |
737 | ||
ceaa7625 | 738 | if (iocb->ki_flags & IOCB_DIRECT) { |
3c435a0f CH |
739 | ret = kiocb_write_and_wait(iocb, count); |
740 | if (ret < 0) | |
741 | goto reexpand; | |
ceaa7625 JA |
742 | file_accessed(iocb->ki_filp); |
743 | ||
744 | ret = blkdev_direct_IO(iocb, to); | |
745 | if (ret >= 0) { | |
746 | iocb->ki_pos += ret; | |
747 | count -= ret; | |
748 | } | |
3e1f941d | 749 | iov_iter_revert(to, count - iov_iter_count(to)); |
ceaa7625 | 750 | if (ret < 0 || !count) |
3e1f941d | 751 | goto reexpand; |
ceaa7625 JA |
752 | } |
753 | ||
754 | ret = filemap_read(iocb, to, ret); | |
6450fe1f | 755 | |
3e1f941d | 756 | reexpand: |
6450fe1f PB |
757 | if (unlikely(shorted)) |
758 | iov_iter_reexpand(to, iov_iter_count(to) + shorted); | |
cd82cca7 CH |
759 | return ret; |
760 | } | |
761 | ||
762 | #define BLKDEV_FALLOC_FL_SUPPORTED \ | |
763 | (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \ | |
764 | FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE) | |
765 | ||
766 | static long blkdev_fallocate(struct file *file, int mode, loff_t start, | |
767 | loff_t len) | |
768 | { | |
f278eb3d ML |
769 | struct inode *inode = bdev_file_inode(file); |
770 | struct block_device *bdev = I_BDEV(inode); | |
cd82cca7 CH |
771 | loff_t end = start + len - 1; |
772 | loff_t isize; | |
773 | int error; | |
774 | ||
775 | /* Fail if we don't recognize the flags. */ | |
776 | if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED) | |
777 | return -EOPNOTSUPP; | |
778 | ||
779 | /* Don't go off the end of the device. */ | |
2a93ad8f | 780 | isize = bdev_nr_bytes(bdev); |
cd82cca7 CH |
781 | if (start >= isize) |
782 | return -EINVAL; | |
783 | if (end >= isize) { | |
784 | if (mode & FALLOC_FL_KEEP_SIZE) { | |
785 | len = isize - start; | |
786 | end = start + len - 1; | |
787 | } else | |
788 | return -EINVAL; | |
789 | } | |
790 | ||
791 | /* | |
792 | * Don't allow IO that isn't aligned to logical block size. | |
793 | */ | |
794 | if ((start | len) & (bdev_logical_block_size(bdev) - 1)) | |
795 | return -EINVAL; | |
796 | ||
f278eb3d ML |
797 | filemap_invalidate_lock(inode->i_mapping); |
798 | ||
1364a3c3 SK |
799 | /* |
800 | * Invalidate the page cache, including dirty pages, for valid | |
801 | * de-allocate mode calls to fallocate(). | |
802 | */ | |
cd82cca7 CH |
803 | switch (mode) { |
804 | case FALLOC_FL_ZERO_RANGE: | |
805 | case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE: | |
1364a3c3 SK |
806 | error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end); |
807 | if (error) | |
808 | goto fail; | |
809 | ||
6549a874 PB |
810 | error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT, |
811 | len >> SECTOR_SHIFT, GFP_KERNEL, | |
812 | BLKDEV_ZERO_NOUNMAP); | |
cd82cca7 CH |
813 | break; |
814 | case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE: | |
1364a3c3 SK |
815 | error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end); |
816 | if (error) | |
817 | goto fail; | |
818 | ||
6549a874 PB |
819 | error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT, |
820 | len >> SECTOR_SHIFT, GFP_KERNEL, | |
821 | BLKDEV_ZERO_NOFALLBACK); | |
cd82cca7 CH |
822 | break; |
823 | case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE: | |
1364a3c3 SK |
824 | error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end); |
825 | if (error) | |
826 | goto fail; | |
827 | ||
6549a874 | 828 | error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT, |
44abff2c | 829 | len >> SECTOR_SHIFT, GFP_KERNEL); |
cd82cca7 CH |
830 | break; |
831 | default: | |
f278eb3d | 832 | error = -EOPNOTSUPP; |
cd82cca7 | 833 | } |
cd82cca7 | 834 | |
f278eb3d ML |
835 | fail: |
836 | filemap_invalidate_unlock(inode->i_mapping); | |
837 | return error; | |
cd82cca7 CH |
838 | } |
839 | ||
69baa3a6 LP |
840 | static int blkdev_mmap(struct file *file, struct vm_area_struct *vma) |
841 | { | |
842 | struct inode *bd_inode = bdev_file_inode(file); | |
843 | ||
844 | if (bdev_read_only(I_BDEV(bd_inode))) | |
845 | return generic_file_readonly_mmap(file, vma); | |
846 | ||
847 | return generic_file_mmap(file, vma); | |
848 | } | |
849 | ||
cd82cca7 CH |
850 | const struct file_operations def_blk_fops = { |
851 | .open = blkdev_open, | |
7ee34cbc | 852 | .release = blkdev_release, |
cd82cca7 CH |
853 | .llseek = blkdev_llseek, |
854 | .read_iter = blkdev_read_iter, | |
855 | .write_iter = blkdev_write_iter, | |
3e08773c | 856 | .iopoll = iocb_bio_iopoll, |
69baa3a6 | 857 | .mmap = blkdev_mmap, |
cd82cca7 | 858 | .fsync = blkdev_fsync, |
8a709512 | 859 | .unlocked_ioctl = blkdev_ioctl, |
cd82cca7 CH |
860 | #ifdef CONFIG_COMPAT |
861 | .compat_ioctl = compat_blkdev_ioctl, | |
862 | #endif | |
2cb1e089 | 863 | .splice_read = filemap_splice_read, |
cd82cca7 CH |
864 | .splice_write = iter_file_splice_write, |
865 | .fallocate = blkdev_fallocate, | |
210a03c9 | 866 | .fop_flags = FOP_BUFFER_RASYNC, |
cd82cca7 CH |
867 | }; |
868 | ||
869 | static __init int blkdev_init(void) | |
870 | { | |
871 | return bioset_init(&blkdev_dio_pool, 4, | |
872 | offsetof(struct blkdev_dio, bio), | |
873 | BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE); | |
874 | } | |
875 | module_init(blkdev_init); |