Commit | Line | Data |
---|---|---|
cd82cca7 CH |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * Copyright (C) 1991, 1992 Linus Torvalds | |
4 | * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE | |
5 | * Copyright (C) 2016 - 2020 Christoph Hellwig | |
6 | */ | |
7 | #include <linux/init.h> | |
8 | #include <linux/mm.h> | |
9 | #include <linux/blkdev.h> | |
10 | #include <linux/buffer_head.h> | |
11 | #include <linux/mpage.h> | |
12 | #include <linux/uio.h> | |
13 | #include <linux/namei.h> | |
14 | #include <linux/task_io_accounting_ops.h> | |
15 | #include <linux/falloc.h> | |
16 | #include <linux/suspend.h> | |
f278eb3d | 17 | #include <linux/fs.h> |
487c607d | 18 | #include <linux/iomap.h> |
8581fd40 | 19 | #include <linux/module.h> |
50c52250 | 20 | #include <linux/io_uring/cmd.h> |
cd82cca7 CH |
21 | #include "blk.h" |
22 | ||
fac7c6d5 | 23 | static inline struct inode *bdev_file_inode(struct file *file) |
cd82cca7 CH |
24 | { |
25 | return file->f_mapping->host; | |
26 | } | |
27 | ||
16458cf3 | 28 | static blk_opf_t dio_bio_write_op(struct kiocb *iocb) |
cd82cca7 | 29 | { |
16458cf3 | 30 | blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; |
cd82cca7 CH |
31 | |
32 | /* avoid the need for a I/O completion work item */ | |
91b94c5d | 33 | if (iocb_is_dsync(iocb)) |
16458cf3 BVA |
34 | opf |= REQ_FUA; |
35 | return opf; | |
cd82cca7 CH |
36 | } |
37 | ||
caf336f8 JG |
38 | static bool blkdev_dio_invalid(struct block_device *bdev, loff_t pos, |
39 | struct iov_iter *iter, bool is_atomic) | |
37fee2e4 | 40 | { |
caf336f8 JG |
41 | if (is_atomic && !generic_atomic_write_valid(iter, pos)) |
42 | return true; | |
43 | ||
b1a000d3 KB |
44 | return pos & (bdev_logical_block_size(bdev) - 1) || |
45 | !bdev_iter_is_aligned(bdev, iter); | |
37fee2e4 KB |
46 | } |
47 | ||
cd82cca7 CH |
48 | #define DIO_INLINE_BIO_VECS 4 |
49 | ||
cd82cca7 | 50 | static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, |
de4c7bef JG |
51 | struct iov_iter *iter, struct block_device *bdev, |
52 | unsigned int nr_pages) | |
cd82cca7 | 53 | { |
cd82cca7 CH |
54 | struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs; |
55 | loff_t pos = iocb->ki_pos; | |
56 | bool should_dirty = false; | |
57 | struct bio bio; | |
58 | ssize_t ret; | |
cd82cca7 | 59 | |
cd82cca7 CH |
60 | if (nr_pages <= DIO_INLINE_BIO_VECS) |
61 | vecs = inline_vecs; | |
62 | else { | |
63 | vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec), | |
64 | GFP_KERNEL); | |
65 | if (!vecs) | |
66 | return -ENOMEM; | |
67 | } | |
68 | ||
49add496 CH |
69 | if (iov_iter_rw(iter) == READ) { |
70 | bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ); | |
fcb14cb1 | 71 | if (user_backed_iter(iter)) |
49add496 CH |
72 | should_dirty = true; |
73 | } else { | |
74 | bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb)); | |
75 | } | |
6549a874 | 76 | bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT; |
44981351 | 77 | bio.bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint; |
cd82cca7 | 78 | bio.bi_ioprio = iocb->ki_ioprio; |
caf336f8 JG |
79 | if (iocb->ki_flags & IOCB_ATOMIC) |
80 | bio.bi_opf |= REQ_ATOMIC; | |
cd82cca7 CH |
81 | |
82 | ret = bio_iov_iter_get_pages(&bio, iter); | |
83 | if (unlikely(ret)) | |
84 | goto out; | |
85 | ret = bio.bi_iter.bi_size; | |
86 | ||
49add496 | 87 | if (iov_iter_rw(iter) == WRITE) |
cd82cca7 | 88 | task_io_account_write(ret); |
49add496 | 89 | |
cd82cca7 CH |
90 | if (iocb->ki_flags & IOCB_NOWAIT) |
91 | bio.bi_opf |= REQ_NOWAIT; | |
9650b453 ML |
92 | |
93 | submit_bio_wait(&bio); | |
cd82cca7 CH |
94 | |
95 | bio_release_pages(&bio, should_dirty); | |
96 | if (unlikely(bio.bi_status)) | |
97 | ret = blk_status_to_errno(bio.bi_status); | |
98 | ||
99 | out: | |
100 | if (vecs != inline_vecs) | |
101 | kfree(vecs); | |
102 | ||
103 | bio_uninit(&bio); | |
104 | ||
105 | return ret; | |
106 | } | |
107 | ||
09ce8744 | 108 | enum { |
e71aa913 PB |
109 | DIO_SHOULD_DIRTY = 1, |
110 | DIO_IS_SYNC = 2, | |
09ce8744 JA |
111 | }; |
112 | ||
cd82cca7 CH |
113 | struct blkdev_dio { |
114 | union { | |
115 | struct kiocb *iocb; | |
116 | struct task_struct *waiter; | |
117 | }; | |
118 | size_t size; | |
119 | atomic_t ref; | |
09ce8744 | 120 | unsigned int flags; |
6155631a | 121 | struct bio bio ____cacheline_aligned_in_smp; |
cd82cca7 CH |
122 | }; |
123 | ||
124 | static struct bio_set blkdev_dio_pool; | |
125 | ||
cd82cca7 CH |
126 | static void blkdev_bio_end_io(struct bio *bio) |
127 | { | |
128 | struct blkdev_dio *dio = bio->bi_private; | |
09ce8744 | 129 | bool should_dirty = dio->flags & DIO_SHOULD_DIRTY; |
cd82cca7 CH |
130 | |
131 | if (bio->bi_status && !dio->bio.bi_status) | |
132 | dio->bio.bi_status = bio->bi_status; | |
133 | ||
e71aa913 | 134 | if (atomic_dec_and_test(&dio->ref)) { |
09ce8744 | 135 | if (!(dio->flags & DIO_IS_SYNC)) { |
cd82cca7 CH |
136 | struct kiocb *iocb = dio->iocb; |
137 | ssize_t ret; | |
138 | ||
3e08773c CH |
139 | WRITE_ONCE(iocb->private, NULL); |
140 | ||
cd82cca7 CH |
141 | if (likely(!dio->bio.bi_status)) { |
142 | ret = dio->size; | |
143 | iocb->ki_pos += ret; | |
144 | } else { | |
145 | ret = blk_status_to_errno(dio->bio.bi_status); | |
146 | } | |
147 | ||
6b19b766 | 148 | dio->iocb->ki_complete(iocb, ret); |
e71aa913 | 149 | bio_put(&dio->bio); |
cd82cca7 CH |
150 | } else { |
151 | struct task_struct *waiter = dio->waiter; | |
152 | ||
153 | WRITE_ONCE(dio->waiter, NULL); | |
154 | blk_wake_io_task(waiter); | |
155 | } | |
156 | } | |
157 | ||
158 | if (should_dirty) { | |
159 | bio_check_pages_dirty(bio); | |
160 | } else { | |
161 | bio_release_pages(bio, false); | |
162 | bio_put(bio); | |
163 | } | |
164 | } | |
165 | ||
166 | static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, | |
de4c7bef | 167 | struct block_device *bdev, unsigned int nr_pages) |
cd82cca7 | 168 | { |
cd82cca7 CH |
169 | struct blk_plug plug; |
170 | struct blkdev_dio *dio; | |
171 | struct bio *bio; | |
cd82cca7 | 172 | bool is_read = (iov_iter_rw(iter) == READ), is_sync; |
16458cf3 | 173 | blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); |
cd82cca7 | 174 | loff_t pos = iocb->ki_pos; |
cd82cca7 CH |
175 | int ret = 0; |
176 | ||
0df71650 MS |
177 | if (iocb->ki_flags & IOCB_ALLOC_CACHE) |
178 | opf |= REQ_ALLOC_CACHE; | |
179 | bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL, | |
180 | &blkdev_dio_pool); | |
cd82cca7 | 181 | dio = container_of(bio, struct blkdev_dio, bio); |
e71aa913 PB |
182 | atomic_set(&dio->ref, 1); |
183 | /* | |
184 | * Grab an extra reference to ensure the dio structure which is embedded | |
185 | * into the first bio stays around. | |
186 | */ | |
187 | bio_get(bio); | |
188 | ||
09ce8744 JA |
189 | is_sync = is_sync_kiocb(iocb); |
190 | if (is_sync) { | |
191 | dio->flags = DIO_IS_SYNC; | |
cd82cca7 | 192 | dio->waiter = current; |
cd82cca7 | 193 | } else { |
09ce8744 | 194 | dio->flags = 0; |
cd82cca7 CH |
195 | dio->iocb = iocb; |
196 | } | |
197 | ||
198 | dio->size = 0; | |
fcb14cb1 | 199 | if (is_read && user_backed_iter(iter)) |
09ce8744 | 200 | dio->flags |= DIO_SHOULD_DIRTY; |
cd82cca7 | 201 | |
25d207dc | 202 | blk_start_plug(&plug); |
cd82cca7 CH |
203 | |
204 | for (;;) { | |
6549a874 | 205 | bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; |
44981351 | 206 | bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint; |
cd82cca7 CH |
207 | bio->bi_private = dio; |
208 | bio->bi_end_io = blkdev_bio_end_io; | |
209 | bio->bi_ioprio = iocb->ki_ioprio; | |
210 | ||
211 | ret = bio_iov_iter_get_pages(bio, iter); | |
212 | if (unlikely(ret)) { | |
213 | bio->bi_status = BLK_STS_IOERR; | |
214 | bio_endio(bio); | |
215 | break; | |
216 | } | |
67d59247 JA |
217 | if (iocb->ki_flags & IOCB_NOWAIT) { |
218 | /* | |
219 | * This is nonblocking IO, and we need to allocate | |
220 | * another bio if we have data left to map. As we | |
221 | * cannot guarantee that one of the sub bios will not | |
222 | * fail getting issued FOR NOWAIT and as error results | |
223 | * are coalesced across all of them, be safe and ask for | |
224 | * a retry of this from blocking context. | |
225 | */ | |
226 | if (unlikely(iov_iter_count(iter))) { | |
227 | bio_release_pages(bio, false); | |
228 | bio_clear_flag(bio, BIO_REFFED); | |
229 | bio_put(bio); | |
230 | blk_finish_plug(&plug); | |
231 | return -EAGAIN; | |
232 | } | |
233 | bio->bi_opf |= REQ_NOWAIT; | |
234 | } | |
cd82cca7 CH |
235 | |
236 | if (is_read) { | |
09ce8744 | 237 | if (dio->flags & DIO_SHOULD_DIRTY) |
cd82cca7 CH |
238 | bio_set_pages_dirty(bio); |
239 | } else { | |
cd82cca7 CH |
240 | task_io_account_write(bio->bi_iter.bi_size); |
241 | } | |
cd82cca7 CH |
242 | dio->size += bio->bi_iter.bi_size; |
243 | pos += bio->bi_iter.bi_size; | |
244 | ||
245 | nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS); | |
246 | if (!nr_pages) { | |
3e08773c | 247 | submit_bio(bio); |
cd82cca7 CH |
248 | break; |
249 | } | |
e71aa913 | 250 | atomic_inc(&dio->ref); |
cd82cca7 | 251 | submit_bio(bio); |
07888c66 | 252 | bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL); |
cd82cca7 CH |
253 | } |
254 | ||
25d207dc | 255 | blk_finish_plug(&plug); |
cd82cca7 CH |
256 | |
257 | if (!is_sync) | |
258 | return -EIOCBQUEUED; | |
259 | ||
260 | for (;;) { | |
261 | set_current_state(TASK_UNINTERRUPTIBLE); | |
262 | if (!READ_ONCE(dio->waiter)) | |
263 | break; | |
25d207dc | 264 | blk_io_schedule(); |
cd82cca7 CH |
265 | } |
266 | __set_current_state(TASK_RUNNING); | |
267 | ||
268 | if (!ret) | |
269 | ret = blk_status_to_errno(dio->bio.bi_status); | |
270 | if (likely(!ret)) | |
271 | ret = dio->size; | |
272 | ||
273 | bio_put(&dio->bio); | |
274 | return ret; | |
275 | } | |
276 | ||
54a88eb8 PB |
277 | static void blkdev_bio_end_io_async(struct bio *bio) |
278 | { | |
279 | struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio); | |
280 | struct kiocb *iocb = dio->iocb; | |
281 | ssize_t ret; | |
282 | ||
bb49c6fa SG |
283 | WRITE_ONCE(iocb->private, NULL); |
284 | ||
54a88eb8 PB |
285 | if (likely(!bio->bi_status)) { |
286 | ret = dio->size; | |
287 | iocb->ki_pos += ret; | |
288 | } else { | |
289 | ret = blk_status_to_errno(bio->bi_status); | |
290 | } | |
291 | ||
b6773cdb | 292 | iocb->ki_complete(iocb, ret); |
54a88eb8 PB |
293 | |
294 | if (dio->flags & DIO_SHOULD_DIRTY) { | |
295 | bio_check_pages_dirty(bio); | |
296 | } else { | |
297 | bio_release_pages(bio, false); | |
298 | bio_put(bio); | |
299 | } | |
300 | } | |
301 | ||
302 | static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb, | |
303 | struct iov_iter *iter, | |
de4c7bef | 304 | struct block_device *bdev, |
54a88eb8 PB |
305 | unsigned int nr_pages) |
306 | { | |
b77c88c2 | 307 | bool is_read = iov_iter_rw(iter) == READ; |
16458cf3 | 308 | blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); |
54a88eb8 PB |
309 | struct blkdev_dio *dio; |
310 | struct bio *bio; | |
311 | loff_t pos = iocb->ki_pos; | |
312 | int ret = 0; | |
313 | ||
0df71650 MS |
314 | if (iocb->ki_flags & IOCB_ALLOC_CACHE) |
315 | opf |= REQ_ALLOC_CACHE; | |
316 | bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL, | |
317 | &blkdev_dio_pool); | |
54a88eb8 PB |
318 | dio = container_of(bio, struct blkdev_dio, bio); |
319 | dio->flags = 0; | |
320 | dio->iocb = iocb; | |
54a88eb8 | 321 | bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; |
44981351 | 322 | bio->bi_write_hint = file_inode(iocb->ki_filp)->i_write_hint; |
54a88eb8 PB |
323 | bio->bi_end_io = blkdev_bio_end_io_async; |
324 | bio->bi_ioprio = iocb->ki_ioprio; | |
325 | ||
1bb6b810 PB |
326 | if (iov_iter_is_bvec(iter)) { |
327 | /* | |
328 | * Users don't rely on the iterator being in any particular | |
329 | * state for async I/O returning -EIOCBQUEUED, hence we can | |
330 | * avoid expensive iov_iter_advance(). Bypass | |
331 | * bio_iov_iter_get_pages() and set the bvec directly. | |
332 | */ | |
333 | bio_iov_bvec_set(bio, iter); | |
334 | } else { | |
335 | ret = bio_iov_iter_get_pages(bio, iter); | |
336 | if (unlikely(ret)) { | |
75feae73 | 337 | bio_put(bio); |
1bb6b810 PB |
338 | return ret; |
339 | } | |
54a88eb8 PB |
340 | } |
341 | dio->size = bio->bi_iter.bi_size; | |
342 | ||
b77c88c2 | 343 | if (is_read) { |
fcb14cb1 | 344 | if (user_backed_iter(iter)) { |
54a88eb8 PB |
345 | dio->flags |= DIO_SHOULD_DIRTY; |
346 | bio_set_pages_dirty(bio); | |
347 | } | |
348 | } else { | |
54a88eb8 PB |
349 | task_io_account_write(bio->bi_iter.bi_size); |
350 | } | |
351 | ||
caf336f8 JG |
352 | if (iocb->ki_flags & IOCB_ATOMIC) |
353 | bio->bi_opf |= REQ_ATOMIC; | |
354 | ||
2bc05769 JA |
355 | if (iocb->ki_flags & IOCB_NOWAIT) |
356 | bio->bi_opf |= REQ_NOWAIT; | |
357 | ||
54a88eb8 | 358 | if (iocb->ki_flags & IOCB_HIPRI) { |
2bc05769 | 359 | bio->bi_opf |= REQ_POLLED; |
54a88eb8 PB |
360 | submit_bio(bio); |
361 | WRITE_ONCE(iocb->private, bio); | |
362 | } else { | |
363 | submit_bio(bio); | |
364 | } | |
365 | return -EIOCBQUEUED; | |
366 | } | |
367 | ||
cd82cca7 CH |
368 | static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter) |
369 | { | |
de4c7bef | 370 | struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host); |
caf336f8 | 371 | bool is_atomic = iocb->ki_flags & IOCB_ATOMIC; |
cd82cca7 CH |
372 | unsigned int nr_pages; |
373 | ||
374 | if (!iov_iter_count(iter)) | |
375 | return 0; | |
376 | ||
caf336f8 | 377 | if (blkdev_dio_invalid(bdev, iocb->ki_pos, iter, is_atomic)) |
de4c7bef JG |
378 | return -EINVAL; |
379 | ||
cd82cca7 | 380 | nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1); |
54a88eb8 PB |
381 | if (likely(nr_pages <= BIO_MAX_VECS)) { |
382 | if (is_sync_kiocb(iocb)) | |
de4c7bef JG |
383 | return __blkdev_direct_IO_simple(iocb, iter, bdev, |
384 | nr_pages); | |
385 | return __blkdev_direct_IO_async(iocb, iter, bdev, nr_pages); | |
caf336f8 JG |
386 | } else if (is_atomic) { |
387 | return -EINVAL; | |
54a88eb8 | 388 | } |
de4c7bef | 389 | return __blkdev_direct_IO(iocb, iter, bdev, bio_max_segs(nr_pages)); |
cd82cca7 CH |
390 | } |
391 | ||
487c607d CH |
392 | static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length, |
393 | unsigned int flags, struct iomap *iomap, struct iomap *srcmap) | |
394 | { | |
395 | struct block_device *bdev = I_BDEV(inode); | |
396 | loff_t isize = i_size_read(inode); | |
397 | ||
0c12028a | 398 | if (offset >= isize) |
487c607d | 399 | return -EIO; |
e269537e LN |
400 | |
401 | iomap->bdev = bdev; | |
402 | iomap->offset = ALIGN_DOWN(offset, bdev_logical_block_size(bdev)); | |
487c607d CH |
403 | iomap->type = IOMAP_MAPPED; |
404 | iomap->addr = iomap->offset; | |
405 | iomap->length = isize - iomap->offset; | |
925c86a1 | 406 | iomap->flags |= IOMAP_F_BUFFER_HEAD; /* noop for !CONFIG_BUFFER_HEAD */ |
487c607d CH |
407 | return 0; |
408 | } | |
409 | ||
410 | static const struct iomap_ops blkdev_iomap_ops = { | |
411 | .iomap_begin = blkdev_iomap_begin, | |
412 | }; | |
413 | ||
925c86a1 CH |
414 | #ifdef CONFIG_BUFFER_HEAD |
415 | static int blkdev_get_block(struct inode *inode, sector_t iblock, | |
416 | struct buffer_head *bh, int create) | |
417 | { | |
418 | bh->b_bdev = I_BDEV(inode); | |
419 | bh->b_blocknr = iblock; | |
420 | set_buffer_mapped(bh); | |
421 | return 0; | |
422 | } | |
423 | ||
17bf23a9 MWO |
424 | /* |
425 | * We cannot call mpage_writepages() as it does not take the buffer lock. | |
426 | * We must use block_write_full_folio() directly which holds the buffer | |
427 | * lock. The buffer lock provides the synchronisation with writeback | |
428 | * that filesystems rely on when they use the blockdev's mapping. | |
429 | */ | |
430 | static int blkdev_writepages(struct address_space *mapping, | |
431 | struct writeback_control *wbc) | |
cd82cca7 | 432 | { |
17bf23a9 MWO |
433 | struct blk_plug plug; |
434 | int err; | |
435 | ||
436 | blk_start_plug(&plug); | |
437 | err = write_cache_pages(mapping, wbc, block_write_full_folio, | |
438 | blkdev_get_block); | |
439 | blk_finish_plug(&plug); | |
440 | ||
441 | return err; | |
cd82cca7 CH |
442 | } |
443 | ||
2c69e205 | 444 | static int blkdev_read_folio(struct file *file, struct folio *folio) |
cd82cca7 | 445 | { |
2c69e205 | 446 | return block_read_full_folio(folio, blkdev_get_block); |
cd82cca7 CH |
447 | } |
448 | ||
449 | static void blkdev_readahead(struct readahead_control *rac) | |
450 | { | |
451 | mpage_readahead(rac, blkdev_get_block); | |
452 | } | |
453 | ||
454 | static int blkdev_write_begin(struct file *file, struct address_space *mapping, | |
1da86618 | 455 | loff_t pos, unsigned len, struct folio **foliop, void **fsdata) |
cd82cca7 | 456 | { |
1da86618 | 457 | return block_write_begin(mapping, pos, len, foliop, blkdev_get_block); |
cd82cca7 CH |
458 | } |
459 | ||
460 | static int blkdev_write_end(struct file *file, struct address_space *mapping, | |
a225800f | 461 | loff_t pos, unsigned len, unsigned copied, struct folio *folio, |
cd82cca7 CH |
462 | void *fsdata) |
463 | { | |
464 | int ret; | |
97edbc02 | 465 | ret = block_write_end(file, mapping, pos, len, copied, folio, fsdata); |
cd82cca7 | 466 | |
1262249d MWO |
467 | folio_unlock(folio); |
468 | folio_put(folio); | |
cd82cca7 CH |
469 | |
470 | return ret; | |
471 | } | |
472 | ||
cd82cca7 | 473 | const struct address_space_operations def_blk_aops = { |
e621900a | 474 | .dirty_folio = block_dirty_folio, |
7ba13abb | 475 | .invalidate_folio = block_invalidate_folio, |
2c69e205 | 476 | .read_folio = blkdev_read_folio, |
cd82cca7 | 477 | .readahead = blkdev_readahead, |
17bf23a9 | 478 | .writepages = blkdev_writepages, |
cd82cca7 CH |
479 | .write_begin = blkdev_write_begin, |
480 | .write_end = blkdev_write_end, | |
67235182 | 481 | .migrate_folio = buffer_migrate_folio_norefs, |
cd82cca7 CH |
482 | .is_dirty_writeback = buffer_check_dirty_writeback, |
483 | }; | |
925c86a1 CH |
484 | #else /* CONFIG_BUFFER_HEAD */ |
485 | static int blkdev_read_folio(struct file *file, struct folio *folio) | |
486 | { | |
487 | return iomap_read_folio(folio, &blkdev_iomap_ops); | |
488 | } | |
489 | ||
490 | static void blkdev_readahead(struct readahead_control *rac) | |
491 | { | |
492 | iomap_readahead(rac, &blkdev_iomap_ops); | |
493 | } | |
494 | ||
495 | static int blkdev_map_blocks(struct iomap_writepage_ctx *wpc, | |
19871b5c | 496 | struct inode *inode, loff_t offset, unsigned int len) |
925c86a1 CH |
497 | { |
498 | loff_t isize = i_size_read(inode); | |
499 | ||
500 | if (WARN_ON_ONCE(offset >= isize)) | |
501 | return -EIO; | |
502 | if (offset >= wpc->iomap.offset && | |
503 | offset < wpc->iomap.offset + wpc->iomap.length) | |
504 | return 0; | |
505 | return blkdev_iomap_begin(inode, offset, isize - offset, | |
506 | IOMAP_WRITE, &wpc->iomap, NULL); | |
507 | } | |
508 | ||
509 | static const struct iomap_writeback_ops blkdev_writeback_ops = { | |
510 | .map_blocks = blkdev_map_blocks, | |
511 | }; | |
512 | ||
513 | static int blkdev_writepages(struct address_space *mapping, | |
514 | struct writeback_control *wbc) | |
515 | { | |
516 | struct iomap_writepage_ctx wpc = { }; | |
517 | ||
518 | return iomap_writepages(mapping, wbc, &wpc, &blkdev_writeback_ops); | |
519 | } | |
520 | ||
521 | const struct address_space_operations def_blk_aops = { | |
522 | .dirty_folio = filemap_dirty_folio, | |
523 | .release_folio = iomap_release_folio, | |
524 | .invalidate_folio = iomap_invalidate_folio, | |
525 | .read_folio = blkdev_read_folio, | |
526 | .readahead = blkdev_readahead, | |
527 | .writepages = blkdev_writepages, | |
528 | .is_partially_uptodate = iomap_is_partially_uptodate, | |
af7628d6 | 529 | .error_remove_folio = generic_error_remove_folio, |
925c86a1 CH |
530 | .migrate_folio = filemap_migrate_folio, |
531 | }; | |
532 | #endif /* CONFIG_BUFFER_HEAD */ | |
cd82cca7 CH |
533 | |
534 | /* | |
535 | * for a block special file file_inode(file)->i_size is zero | |
536 | * so we compute the size by hand (just as in block_read/write above) | |
537 | */ | |
538 | static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence) | |
539 | { | |
540 | struct inode *bd_inode = bdev_file_inode(file); | |
541 | loff_t retval; | |
542 | ||
543 | inode_lock(bd_inode); | |
544 | retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode)); | |
545 | inode_unlock(bd_inode); | |
546 | return retval; | |
547 | } | |
548 | ||
549 | static int blkdev_fsync(struct file *filp, loff_t start, loff_t end, | |
550 | int datasync) | |
551 | { | |
4e762d86 | 552 | struct block_device *bdev = I_BDEV(filp->f_mapping->host); |
cd82cca7 CH |
553 | int error; |
554 | ||
555 | error = file_write_and_wait_range(filp, start, end); | |
556 | if (error) | |
557 | return error; | |
558 | ||
559 | /* | |
560 | * There is no need to serialise calls to blkdev_issue_flush with | |
561 | * i_mutex and doing so causes performance issues with concurrent | |
562 | * O_SYNC writers to a block device. | |
563 | */ | |
564 | error = blkdev_issue_flush(bdev); | |
565 | if (error == -EOPNOTSUPP) | |
566 | error = 0; | |
567 | ||
568 | return error; | |
569 | } | |
570 | ||
841dd789 JK |
571 | /** |
572 | * file_to_blk_mode - get block open flags from file flags | |
573 | * @file: file whose open flags should be converted | |
574 | * | |
575 | * Look at file open flags and generate corresponding block open flags from | |
576 | * them. The function works both for file just being open (e.g. during ->open | |
577 | * callback) and for file that is already open. This is actually non-trivial | |
578 | * (see comment in the function). | |
579 | */ | |
05bdb996 CH |
580 | blk_mode_t file_to_blk_mode(struct file *file) |
581 | { | |
582 | blk_mode_t mode = 0; | |
583 | ||
584 | if (file->f_mode & FMODE_READ) | |
585 | mode |= BLK_OPEN_READ; | |
586 | if (file->f_mode & FMODE_WRITE) | |
587 | mode |= BLK_OPEN_WRITE; | |
841dd789 | 588 | /* |
ab838b3f CB |
589 | * do_dentry_open() clears O_EXCL from f_flags, use file->private_data |
590 | * to determine whether the open was exclusive for already open files. | |
841dd789 | 591 | */ |
ab838b3f CB |
592 | if (file->private_data) |
593 | mode |= BLK_OPEN_EXCL; | |
841dd789 | 594 | else if (file->f_flags & O_EXCL) |
05bdb996 CH |
595 | mode |= BLK_OPEN_EXCL; |
596 | if (file->f_flags & O_NDELAY) | |
597 | mode |= BLK_OPEN_NDELAY; | |
598 | ||
599 | /* | |
600 | * If all bits in O_ACCMODE set (aka O_RDWR | O_WRONLY), the floppy | |
601 | * driver has historically allowed ioctls as if the file was opened for | |
602 | * writing, but does not allow and actual reads or writes. | |
603 | */ | |
604 | if ((file->f_flags & O_ACCMODE) == (O_RDWR | O_WRONLY)) | |
605 | mode |= BLK_OPEN_WRITE_IOCTL; | |
606 | ||
607 | return mode; | |
608 | } | |
609 | ||
cd82cca7 CH |
610 | static int blkdev_open(struct inode *inode, struct file *filp) |
611 | { | |
a56aefca | 612 | struct block_device *bdev; |
841dd789 | 613 | blk_mode_t mode; |
a56aefca | 614 | int ret; |
cd82cca7 | 615 | |
841dd789 | 616 | mode = file_to_blk_mode(filp); |
ab838b3f CB |
617 | /* Use the file as the holder. */ |
618 | if (mode & BLK_OPEN_EXCL) | |
619 | filp->private_data = filp; | |
620 | ret = bdev_permission(inode->i_rdev, mode, filp->private_data); | |
a56aefca CB |
621 | if (ret) |
622 | return ret; | |
623 | ||
624 | bdev = blkdev_get_no_open(inode->i_rdev); | |
625 | if (!bdev) | |
626 | return -ENXIO; | |
627 | ||
caf336f8 JG |
628 | if (bdev_can_atomic_write(bdev) && filp->f_flags & O_DIRECT) |
629 | filp->f_mode |= FMODE_CAN_ATOMIC_WRITE; | |
630 | ||
ab838b3f | 631 | ret = bdev_open(bdev, mode, filp->private_data, NULL, filp); |
a56aefca CB |
632 | if (ret) |
633 | blkdev_put_no_open(bdev); | |
634 | return ret; | |
cd82cca7 CH |
635 | } |
636 | ||
7ee34cbc | 637 | static int blkdev_release(struct inode *inode, struct file *filp) |
cd82cca7 | 638 | { |
ab838b3f | 639 | bdev_release(filp); |
cd82cca7 CH |
640 | return 0; |
641 | } | |
642 | ||
727cfe97 CH |
643 | static ssize_t |
644 | blkdev_direct_write(struct kiocb *iocb, struct iov_iter *from) | |
645 | { | |
646 | size_t count = iov_iter_count(from); | |
647 | ssize_t written; | |
648 | ||
649 | written = kiocb_invalidate_pages(iocb, count); | |
650 | if (written) { | |
651 | if (written == -EBUSY) | |
652 | return 0; | |
653 | return written; | |
654 | } | |
655 | ||
656 | written = blkdev_direct_IO(iocb, from); | |
657 | if (written > 0) { | |
658 | kiocb_invalidate_post_direct_write(iocb, count); | |
659 | iocb->ki_pos += written; | |
660 | count -= written; | |
661 | } | |
662 | if (written != -EIOCBQUEUED) | |
663 | iov_iter_revert(from, count - iov_iter_count(from)); | |
664 | return written; | |
665 | } | |
666 | ||
487c607d CH |
667 | static ssize_t blkdev_buffered_write(struct kiocb *iocb, struct iov_iter *from) |
668 | { | |
31754ea6 | 669 | return iomap_file_buffered_write(iocb, from, &blkdev_iomap_ops, NULL); |
487c607d CH |
670 | } |
671 | ||
cd82cca7 CH |
672 | /* |
673 | * Write data to the block device. Only intended for the block device itself | |
674 | * and the raw driver which basically is a fake block device. | |
675 | * | |
676 | * Does not take i_mutex for the write and thus is not for general purpose | |
677 | * use. | |
678 | */ | |
679 | static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) | |
680 | { | |
727cfe97 | 681 | struct file *file = iocb->ki_filp; |
39c3b4e7 AV |
682 | struct inode *bd_inode = bdev_file_inode(file); |
683 | struct block_device *bdev = I_BDEV(bd_inode); | |
138c1a38 | 684 | loff_t size = bdev_nr_bytes(bdev); |
cd82cca7 CH |
685 | size_t shorted = 0; |
686 | ssize_t ret; | |
687 | ||
fac7c6d5 | 688 | if (bdev_read_only(bdev)) |
cd82cca7 CH |
689 | return -EPERM; |
690 | ||
691 | if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev)) | |
692 | return -ETXTBSY; | |
693 | ||
694 | if (!iov_iter_count(from)) | |
695 | return 0; | |
696 | ||
697 | if (iocb->ki_pos >= size) | |
698 | return -ENOSPC; | |
699 | ||
700 | if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT) | |
701 | return -EOPNOTSUPP; | |
702 | ||
703 | size -= iocb->ki_pos; | |
704 | if (iov_iter_count(from) > size) { | |
705 | shorted = iov_iter_count(from) - size; | |
706 | iov_iter_truncate(from, size); | |
707 | } | |
708 | ||
727cfe97 CH |
709 | ret = file_update_time(file); |
710 | if (ret) | |
711 | return ret; | |
712 | ||
713 | if (iocb->ki_flags & IOCB_DIRECT) { | |
714 | ret = blkdev_direct_write(iocb, from); | |
715 | if (ret >= 0 && iov_iter_count(from)) | |
716 | ret = direct_write_fallback(iocb, from, ret, | |
487c607d | 717 | blkdev_buffered_write(iocb, from)); |
727cfe97 | 718 | } else { |
487c607d | 719 | ret = blkdev_buffered_write(iocb, from); |
727cfe97 CH |
720 | } |
721 | ||
cd82cca7 CH |
722 | if (ret > 0) |
723 | ret = generic_write_sync(iocb, ret); | |
724 | iov_iter_reexpand(from, iov_iter_count(from) + shorted); | |
cd82cca7 CH |
725 | return ret; |
726 | } | |
727 | ||
728 | static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) | |
729 | { | |
4e762d86 | 730 | struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host); |
138c1a38 | 731 | loff_t size = bdev_nr_bytes(bdev); |
cd82cca7 CH |
732 | loff_t pos = iocb->ki_pos; |
733 | size_t shorted = 0; | |
ceaa7625 | 734 | ssize_t ret = 0; |
3e1f941d | 735 | size_t count; |
cd82cca7 | 736 | |
3e1f941d | 737 | if (unlikely(pos + iov_iter_count(to) > size)) { |
6450fe1f PB |
738 | if (pos >= size) |
739 | return 0; | |
740 | size -= pos; | |
3e1f941d ID |
741 | shorted = iov_iter_count(to) - size; |
742 | iov_iter_truncate(to, size); | |
cd82cca7 CH |
743 | } |
744 | ||
3e1f941d ID |
745 | count = iov_iter_count(to); |
746 | if (!count) | |
747 | goto reexpand; /* skip atime */ | |
748 | ||
ceaa7625 | 749 | if (iocb->ki_flags & IOCB_DIRECT) { |
3c435a0f CH |
750 | ret = kiocb_write_and_wait(iocb, count); |
751 | if (ret < 0) | |
752 | goto reexpand; | |
ceaa7625 JA |
753 | file_accessed(iocb->ki_filp); |
754 | ||
755 | ret = blkdev_direct_IO(iocb, to); | |
756 | if (ret >= 0) { | |
757 | iocb->ki_pos += ret; | |
758 | count -= ret; | |
759 | } | |
3e1f941d | 760 | iov_iter_revert(to, count - iov_iter_count(to)); |
ceaa7625 | 761 | if (ret < 0 || !count) |
3e1f941d | 762 | goto reexpand; |
ceaa7625 JA |
763 | } |
764 | ||
765 | ret = filemap_read(iocb, to, ret); | |
6450fe1f | 766 | |
3e1f941d | 767 | reexpand: |
6450fe1f PB |
768 | if (unlikely(shorted)) |
769 | iov_iter_reexpand(to, iov_iter_count(to) + shorted); | |
cd82cca7 CH |
770 | return ret; |
771 | } | |
772 | ||
773 | #define BLKDEV_FALLOC_FL_SUPPORTED \ | |
774 | (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \ | |
ad01dada | 775 | FALLOC_FL_ZERO_RANGE) |
cd82cca7 CH |
776 | |
777 | static long blkdev_fallocate(struct file *file, int mode, loff_t start, | |
778 | loff_t len) | |
779 | { | |
f278eb3d ML |
780 | struct inode *inode = bdev_file_inode(file); |
781 | struct block_device *bdev = I_BDEV(inode); | |
cd82cca7 CH |
782 | loff_t end = start + len - 1; |
783 | loff_t isize; | |
784 | int error; | |
785 | ||
786 | /* Fail if we don't recognize the flags. */ | |
787 | if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED) | |
788 | return -EOPNOTSUPP; | |
789 | ||
790 | /* Don't go off the end of the device. */ | |
2a93ad8f | 791 | isize = bdev_nr_bytes(bdev); |
cd82cca7 CH |
792 | if (start >= isize) |
793 | return -EINVAL; | |
794 | if (end >= isize) { | |
795 | if (mode & FALLOC_FL_KEEP_SIZE) { | |
796 | len = isize - start; | |
797 | end = start + len - 1; | |
798 | } else | |
799 | return -EINVAL; | |
800 | } | |
801 | ||
802 | /* | |
803 | * Don't allow IO that isn't aligned to logical block size. | |
804 | */ | |
805 | if ((start | len) & (bdev_logical_block_size(bdev) - 1)) | |
806 | return -EINVAL; | |
807 | ||
f278eb3d ML |
808 | filemap_invalidate_lock(inode->i_mapping); |
809 | ||
1364a3c3 SK |
810 | /* |
811 | * Invalidate the page cache, including dirty pages, for valid | |
812 | * de-allocate mode calls to fallocate(). | |
813 | */ | |
cd82cca7 CH |
814 | switch (mode) { |
815 | case FALLOC_FL_ZERO_RANGE: | |
816 | case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE: | |
1364a3c3 SK |
817 | error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end); |
818 | if (error) | |
819 | goto fail; | |
820 | ||
6549a874 PB |
821 | error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT, |
822 | len >> SECTOR_SHIFT, GFP_KERNEL, | |
823 | BLKDEV_ZERO_NOUNMAP); | |
cd82cca7 CH |
824 | break; |
825 | case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE: | |
1364a3c3 SK |
826 | error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end); |
827 | if (error) | |
828 | goto fail; | |
829 | ||
6549a874 PB |
830 | error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT, |
831 | len >> SECTOR_SHIFT, GFP_KERNEL, | |
832 | BLKDEV_ZERO_NOFALLBACK); | |
cd82cca7 | 833 | break; |
cd82cca7 | 834 | default: |
f278eb3d | 835 | error = -EOPNOTSUPP; |
cd82cca7 | 836 | } |
cd82cca7 | 837 | |
f278eb3d ML |
838 | fail: |
839 | filemap_invalidate_unlock(inode->i_mapping); | |
840 | return error; | |
cd82cca7 CH |
841 | } |
842 | ||
69baa3a6 LP |
843 | static int blkdev_mmap(struct file *file, struct vm_area_struct *vma) |
844 | { | |
845 | struct inode *bd_inode = bdev_file_inode(file); | |
846 | ||
847 | if (bdev_read_only(I_BDEV(bd_inode))) | |
848 | return generic_file_readonly_mmap(file, vma); | |
849 | ||
850 | return generic_file_mmap(file, vma); | |
851 | } | |
852 | ||
cd82cca7 CH |
853 | const struct file_operations def_blk_fops = { |
854 | .open = blkdev_open, | |
7ee34cbc | 855 | .release = blkdev_release, |
cd82cca7 CH |
856 | .llseek = blkdev_llseek, |
857 | .read_iter = blkdev_read_iter, | |
858 | .write_iter = blkdev_write_iter, | |
3e08773c | 859 | .iopoll = iocb_bio_iopoll, |
69baa3a6 | 860 | .mmap = blkdev_mmap, |
cd82cca7 | 861 | .fsync = blkdev_fsync, |
8a709512 | 862 | .unlocked_ioctl = blkdev_ioctl, |
cd82cca7 CH |
863 | #ifdef CONFIG_COMPAT |
864 | .compat_ioctl = compat_blkdev_ioctl, | |
865 | #endif | |
2cb1e089 | 866 | .splice_read = filemap_splice_read, |
cd82cca7 CH |
867 | .splice_write = iter_file_splice_write, |
868 | .fallocate = blkdev_fallocate, | |
50c52250 | 869 | .uring_cmd = blkdev_uring_cmd, |
210a03c9 | 870 | .fop_flags = FOP_BUFFER_RASYNC, |
cd82cca7 CH |
871 | }; |
872 | ||
873 | static __init int blkdev_init(void) | |
874 | { | |
875 | return bioset_init(&blkdev_dio_pool, 4, | |
876 | offsetof(struct blkdev_dio, bio), | |
877 | BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE); | |
878 | } | |
879 | module_init(blkdev_init); |