| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 4 | * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE |
| 5 | * Copyright (C) 2016 - 2020 Christoph Hellwig |
| 6 | */ |
| 7 | #include <linux/init.h> |
| 8 | #include <linux/mm.h> |
| 9 | #include <linux/blkdev.h> |
| 10 | #include <linux/buffer_head.h> |
| 11 | #include <linux/mpage.h> |
| 12 | #include <linux/uio.h> |
| 13 | #include <linux/namei.h> |
| 14 | #include <linux/task_io_accounting_ops.h> |
| 15 | #include <linux/falloc.h> |
| 16 | #include <linux/suspend.h> |
| 17 | #include <linux/fs.h> |
| 18 | #include <linux/module.h> |
| 19 | #include "blk.h" |
| 20 | |
| 21 | static inline struct inode *bdev_file_inode(struct file *file) |
| 22 | { |
| 23 | return file->f_mapping->host; |
| 24 | } |
| 25 | |
| 26 | static int blkdev_get_block(struct inode *inode, sector_t iblock, |
| 27 | struct buffer_head *bh, int create) |
| 28 | { |
| 29 | bh->b_bdev = I_BDEV(inode); |
| 30 | bh->b_blocknr = iblock; |
| 31 | set_buffer_mapped(bh); |
| 32 | return 0; |
| 33 | } |
| 34 | |
| 35 | static blk_opf_t dio_bio_write_op(struct kiocb *iocb) |
| 36 | { |
| 37 | blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; |
| 38 | |
| 39 | /* avoid the need for a I/O completion work item */ |
| 40 | if (iocb_is_dsync(iocb)) |
| 41 | opf |= REQ_FUA; |
| 42 | return opf; |
| 43 | } |
| 44 | |
| 45 | static bool blkdev_dio_unaligned(struct block_device *bdev, loff_t pos, |
| 46 | struct iov_iter *iter) |
| 47 | { |
| 48 | return pos & (bdev_logical_block_size(bdev) - 1) || |
| 49 | !bdev_iter_is_aligned(bdev, iter); |
| 50 | } |
| 51 | |
| 52 | #define DIO_INLINE_BIO_VECS 4 |
| 53 | |
| 54 | static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb, |
| 55 | struct iov_iter *iter, unsigned int nr_pages) |
| 56 | { |
| 57 | struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host); |
| 58 | struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs; |
| 59 | loff_t pos = iocb->ki_pos; |
| 60 | bool should_dirty = false; |
| 61 | struct bio bio; |
| 62 | ssize_t ret; |
| 63 | |
| 64 | if (blkdev_dio_unaligned(bdev, pos, iter)) |
| 65 | return -EINVAL; |
| 66 | |
| 67 | if (nr_pages <= DIO_INLINE_BIO_VECS) |
| 68 | vecs = inline_vecs; |
| 69 | else { |
| 70 | vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec), |
| 71 | GFP_KERNEL); |
| 72 | if (!vecs) |
| 73 | return -ENOMEM; |
| 74 | } |
| 75 | |
| 76 | if (iov_iter_rw(iter) == READ) { |
| 77 | bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ); |
| 78 | if (user_backed_iter(iter)) |
| 79 | should_dirty = true; |
| 80 | } else { |
| 81 | bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb)); |
| 82 | } |
| 83 | bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT; |
| 84 | bio.bi_ioprio = iocb->ki_ioprio; |
| 85 | |
| 86 | ret = bio_iov_iter_get_pages(&bio, iter); |
| 87 | if (unlikely(ret)) |
| 88 | goto out; |
| 89 | ret = bio.bi_iter.bi_size; |
| 90 | |
| 91 | if (iov_iter_rw(iter) == WRITE) |
| 92 | task_io_account_write(ret); |
| 93 | |
| 94 | if (iocb->ki_flags & IOCB_NOWAIT) |
| 95 | bio.bi_opf |= REQ_NOWAIT; |
| 96 | |
| 97 | submit_bio_wait(&bio); |
| 98 | |
| 99 | bio_release_pages(&bio, should_dirty); |
| 100 | if (unlikely(bio.bi_status)) |
| 101 | ret = blk_status_to_errno(bio.bi_status); |
| 102 | |
| 103 | out: |
| 104 | if (vecs != inline_vecs) |
| 105 | kfree(vecs); |
| 106 | |
| 107 | bio_uninit(&bio); |
| 108 | |
| 109 | return ret; |
| 110 | } |
| 111 | |
| 112 | enum { |
| 113 | DIO_SHOULD_DIRTY = 1, |
| 114 | DIO_IS_SYNC = 2, |
| 115 | }; |
| 116 | |
| 117 | struct blkdev_dio { |
| 118 | union { |
| 119 | struct kiocb *iocb; |
| 120 | struct task_struct *waiter; |
| 121 | }; |
| 122 | size_t size; |
| 123 | atomic_t ref; |
| 124 | unsigned int flags; |
| 125 | struct bio bio ____cacheline_aligned_in_smp; |
| 126 | }; |
| 127 | |
| 128 | static struct bio_set blkdev_dio_pool; |
| 129 | |
| 130 | static void blkdev_bio_end_io(struct bio *bio) |
| 131 | { |
| 132 | struct blkdev_dio *dio = bio->bi_private; |
| 133 | bool should_dirty = dio->flags & DIO_SHOULD_DIRTY; |
| 134 | |
| 135 | if (bio->bi_status && !dio->bio.bi_status) |
| 136 | dio->bio.bi_status = bio->bi_status; |
| 137 | |
| 138 | if (atomic_dec_and_test(&dio->ref)) { |
| 139 | if (!(dio->flags & DIO_IS_SYNC)) { |
| 140 | struct kiocb *iocb = dio->iocb; |
| 141 | ssize_t ret; |
| 142 | |
| 143 | WRITE_ONCE(iocb->private, NULL); |
| 144 | |
| 145 | if (likely(!dio->bio.bi_status)) { |
| 146 | ret = dio->size; |
| 147 | iocb->ki_pos += ret; |
| 148 | } else { |
| 149 | ret = blk_status_to_errno(dio->bio.bi_status); |
| 150 | } |
| 151 | |
| 152 | dio->iocb->ki_complete(iocb, ret); |
| 153 | bio_put(&dio->bio); |
| 154 | } else { |
| 155 | struct task_struct *waiter = dio->waiter; |
| 156 | |
| 157 | WRITE_ONCE(dio->waiter, NULL); |
| 158 | blk_wake_io_task(waiter); |
| 159 | } |
| 160 | } |
| 161 | |
| 162 | if (should_dirty) { |
| 163 | bio_check_pages_dirty(bio); |
| 164 | } else { |
| 165 | bio_release_pages(bio, false); |
| 166 | bio_put(bio); |
| 167 | } |
| 168 | } |
| 169 | |
| 170 | static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
| 171 | unsigned int nr_pages) |
| 172 | { |
| 173 | struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host); |
| 174 | struct blk_plug plug; |
| 175 | struct blkdev_dio *dio; |
| 176 | struct bio *bio; |
| 177 | bool is_read = (iov_iter_rw(iter) == READ), is_sync; |
| 178 | blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); |
| 179 | loff_t pos = iocb->ki_pos; |
| 180 | int ret = 0; |
| 181 | |
| 182 | if (blkdev_dio_unaligned(bdev, pos, iter)) |
| 183 | return -EINVAL; |
| 184 | |
| 185 | if (iocb->ki_flags & IOCB_ALLOC_CACHE) |
| 186 | opf |= REQ_ALLOC_CACHE; |
| 187 | bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL, |
| 188 | &blkdev_dio_pool); |
| 189 | dio = container_of(bio, struct blkdev_dio, bio); |
| 190 | atomic_set(&dio->ref, 1); |
| 191 | /* |
| 192 | * Grab an extra reference to ensure the dio structure which is embedded |
| 193 | * into the first bio stays around. |
| 194 | */ |
| 195 | bio_get(bio); |
| 196 | |
| 197 | is_sync = is_sync_kiocb(iocb); |
| 198 | if (is_sync) { |
| 199 | dio->flags = DIO_IS_SYNC; |
| 200 | dio->waiter = current; |
| 201 | } else { |
| 202 | dio->flags = 0; |
| 203 | dio->iocb = iocb; |
| 204 | } |
| 205 | |
| 206 | dio->size = 0; |
| 207 | if (is_read && user_backed_iter(iter)) |
| 208 | dio->flags |= DIO_SHOULD_DIRTY; |
| 209 | |
| 210 | blk_start_plug(&plug); |
| 211 | |
| 212 | for (;;) { |
| 213 | bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; |
| 214 | bio->bi_private = dio; |
| 215 | bio->bi_end_io = blkdev_bio_end_io; |
| 216 | bio->bi_ioprio = iocb->ki_ioprio; |
| 217 | |
| 218 | ret = bio_iov_iter_get_pages(bio, iter); |
| 219 | if (unlikely(ret)) { |
| 220 | bio->bi_status = BLK_STS_IOERR; |
| 221 | bio_endio(bio); |
| 222 | break; |
| 223 | } |
| 224 | if (iocb->ki_flags & IOCB_NOWAIT) { |
| 225 | /* |
| 226 | * This is nonblocking IO, and we need to allocate |
| 227 | * another bio if we have data left to map. As we |
| 228 | * cannot guarantee that one of the sub bios will not |
| 229 | * fail getting issued FOR NOWAIT and as error results |
| 230 | * are coalesced across all of them, be safe and ask for |
| 231 | * a retry of this from blocking context. |
| 232 | */ |
| 233 | if (unlikely(iov_iter_count(iter))) { |
| 234 | bio_release_pages(bio, false); |
| 235 | bio_clear_flag(bio, BIO_REFFED); |
| 236 | bio_put(bio); |
| 237 | blk_finish_plug(&plug); |
| 238 | return -EAGAIN; |
| 239 | } |
| 240 | bio->bi_opf |= REQ_NOWAIT; |
| 241 | } |
| 242 | |
| 243 | if (is_read) { |
| 244 | if (dio->flags & DIO_SHOULD_DIRTY) |
| 245 | bio_set_pages_dirty(bio); |
| 246 | } else { |
| 247 | task_io_account_write(bio->bi_iter.bi_size); |
| 248 | } |
| 249 | dio->size += bio->bi_iter.bi_size; |
| 250 | pos += bio->bi_iter.bi_size; |
| 251 | |
| 252 | nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS); |
| 253 | if (!nr_pages) { |
| 254 | submit_bio(bio); |
| 255 | break; |
| 256 | } |
| 257 | atomic_inc(&dio->ref); |
| 258 | submit_bio(bio); |
| 259 | bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL); |
| 260 | } |
| 261 | |
| 262 | blk_finish_plug(&plug); |
| 263 | |
| 264 | if (!is_sync) |
| 265 | return -EIOCBQUEUED; |
| 266 | |
| 267 | for (;;) { |
| 268 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 269 | if (!READ_ONCE(dio->waiter)) |
| 270 | break; |
| 271 | blk_io_schedule(); |
| 272 | } |
| 273 | __set_current_state(TASK_RUNNING); |
| 274 | |
| 275 | if (!ret) |
| 276 | ret = blk_status_to_errno(dio->bio.bi_status); |
| 277 | if (likely(!ret)) |
| 278 | ret = dio->size; |
| 279 | |
| 280 | bio_put(&dio->bio); |
| 281 | return ret; |
| 282 | } |
| 283 | |
| 284 | static void blkdev_bio_end_io_async(struct bio *bio) |
| 285 | { |
| 286 | struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio); |
| 287 | struct kiocb *iocb = dio->iocb; |
| 288 | ssize_t ret; |
| 289 | |
| 290 | WRITE_ONCE(iocb->private, NULL); |
| 291 | |
| 292 | if (likely(!bio->bi_status)) { |
| 293 | ret = dio->size; |
| 294 | iocb->ki_pos += ret; |
| 295 | } else { |
| 296 | ret = blk_status_to_errno(bio->bi_status); |
| 297 | } |
| 298 | |
| 299 | iocb->ki_complete(iocb, ret); |
| 300 | |
| 301 | if (dio->flags & DIO_SHOULD_DIRTY) { |
| 302 | bio_check_pages_dirty(bio); |
| 303 | } else { |
| 304 | bio_release_pages(bio, false); |
| 305 | bio_put(bio); |
| 306 | } |
| 307 | } |
| 308 | |
| 309 | static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb, |
| 310 | struct iov_iter *iter, |
| 311 | unsigned int nr_pages) |
| 312 | { |
| 313 | struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host); |
| 314 | bool is_read = iov_iter_rw(iter) == READ; |
| 315 | blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); |
| 316 | struct blkdev_dio *dio; |
| 317 | struct bio *bio; |
| 318 | loff_t pos = iocb->ki_pos; |
| 319 | int ret = 0; |
| 320 | |
| 321 | if (blkdev_dio_unaligned(bdev, pos, iter)) |
| 322 | return -EINVAL; |
| 323 | |
| 324 | if (iocb->ki_flags & IOCB_ALLOC_CACHE) |
| 325 | opf |= REQ_ALLOC_CACHE; |
| 326 | bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL, |
| 327 | &blkdev_dio_pool); |
| 328 | dio = container_of(bio, struct blkdev_dio, bio); |
| 329 | dio->flags = 0; |
| 330 | dio->iocb = iocb; |
| 331 | bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT; |
| 332 | bio->bi_end_io = blkdev_bio_end_io_async; |
| 333 | bio->bi_ioprio = iocb->ki_ioprio; |
| 334 | |
| 335 | if (iov_iter_is_bvec(iter)) { |
| 336 | /* |
| 337 | * Users don't rely on the iterator being in any particular |
| 338 | * state for async I/O returning -EIOCBQUEUED, hence we can |
| 339 | * avoid expensive iov_iter_advance(). Bypass |
| 340 | * bio_iov_iter_get_pages() and set the bvec directly. |
| 341 | */ |
| 342 | bio_iov_bvec_set(bio, iter); |
| 343 | } else { |
| 344 | ret = bio_iov_iter_get_pages(bio, iter); |
| 345 | if (unlikely(ret)) { |
| 346 | bio_put(bio); |
| 347 | return ret; |
| 348 | } |
| 349 | } |
| 350 | dio->size = bio->bi_iter.bi_size; |
| 351 | |
| 352 | if (is_read) { |
| 353 | if (user_backed_iter(iter)) { |
| 354 | dio->flags |= DIO_SHOULD_DIRTY; |
| 355 | bio_set_pages_dirty(bio); |
| 356 | } |
| 357 | } else { |
| 358 | task_io_account_write(bio->bi_iter.bi_size); |
| 359 | } |
| 360 | |
| 361 | if (iocb->ki_flags & IOCB_HIPRI) { |
| 362 | bio->bi_opf |= REQ_POLLED | REQ_NOWAIT; |
| 363 | submit_bio(bio); |
| 364 | WRITE_ONCE(iocb->private, bio); |
| 365 | } else { |
| 366 | if (iocb->ki_flags & IOCB_NOWAIT) |
| 367 | bio->bi_opf |= REQ_NOWAIT; |
| 368 | submit_bio(bio); |
| 369 | } |
| 370 | return -EIOCBQUEUED; |
| 371 | } |
| 372 | |
| 373 | static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter) |
| 374 | { |
| 375 | unsigned int nr_pages; |
| 376 | |
| 377 | if (!iov_iter_count(iter)) |
| 378 | return 0; |
| 379 | |
| 380 | nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1); |
| 381 | if (likely(nr_pages <= BIO_MAX_VECS)) { |
| 382 | if (is_sync_kiocb(iocb)) |
| 383 | return __blkdev_direct_IO_simple(iocb, iter, nr_pages); |
| 384 | return __blkdev_direct_IO_async(iocb, iter, nr_pages); |
| 385 | } |
| 386 | return __blkdev_direct_IO(iocb, iter, bio_max_segs(nr_pages)); |
| 387 | } |
| 388 | |
| 389 | static int blkdev_writepage(struct page *page, struct writeback_control *wbc) |
| 390 | { |
| 391 | return block_write_full_page(page, blkdev_get_block, wbc); |
| 392 | } |
| 393 | |
| 394 | static int blkdev_read_folio(struct file *file, struct folio *folio) |
| 395 | { |
| 396 | return block_read_full_folio(folio, blkdev_get_block); |
| 397 | } |
| 398 | |
| 399 | static void blkdev_readahead(struct readahead_control *rac) |
| 400 | { |
| 401 | mpage_readahead(rac, blkdev_get_block); |
| 402 | } |
| 403 | |
| 404 | static int blkdev_write_begin(struct file *file, struct address_space *mapping, |
| 405 | loff_t pos, unsigned len, struct page **pagep, void **fsdata) |
| 406 | { |
| 407 | return block_write_begin(mapping, pos, len, pagep, blkdev_get_block); |
| 408 | } |
| 409 | |
| 410 | static int blkdev_write_end(struct file *file, struct address_space *mapping, |
| 411 | loff_t pos, unsigned len, unsigned copied, struct page *page, |
| 412 | void *fsdata) |
| 413 | { |
| 414 | int ret; |
| 415 | ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); |
| 416 | |
| 417 | unlock_page(page); |
| 418 | put_page(page); |
| 419 | |
| 420 | return ret; |
| 421 | } |
| 422 | |
| 423 | const struct address_space_operations def_blk_aops = { |
| 424 | .dirty_folio = block_dirty_folio, |
| 425 | .invalidate_folio = block_invalidate_folio, |
| 426 | .read_folio = blkdev_read_folio, |
| 427 | .readahead = blkdev_readahead, |
| 428 | .writepage = blkdev_writepage, |
| 429 | .write_begin = blkdev_write_begin, |
| 430 | .write_end = blkdev_write_end, |
| 431 | .direct_IO = blkdev_direct_IO, |
| 432 | .migrate_folio = buffer_migrate_folio_norefs, |
| 433 | .is_dirty_writeback = buffer_check_dirty_writeback, |
| 434 | }; |
| 435 | |
| 436 | /* |
| 437 | * for a block special file file_inode(file)->i_size is zero |
| 438 | * so we compute the size by hand (just as in block_read/write above) |
| 439 | */ |
| 440 | static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence) |
| 441 | { |
| 442 | struct inode *bd_inode = bdev_file_inode(file); |
| 443 | loff_t retval; |
| 444 | |
| 445 | inode_lock(bd_inode); |
| 446 | retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode)); |
| 447 | inode_unlock(bd_inode); |
| 448 | return retval; |
| 449 | } |
| 450 | |
| 451 | static int blkdev_fsync(struct file *filp, loff_t start, loff_t end, |
| 452 | int datasync) |
| 453 | { |
| 454 | struct block_device *bdev = I_BDEV(filp->f_mapping->host); |
| 455 | int error; |
| 456 | |
| 457 | error = file_write_and_wait_range(filp, start, end); |
| 458 | if (error) |
| 459 | return error; |
| 460 | |
| 461 | /* |
| 462 | * There is no need to serialise calls to blkdev_issue_flush with |
| 463 | * i_mutex and doing so causes performance issues with concurrent |
| 464 | * O_SYNC writers to a block device. |
| 465 | */ |
| 466 | error = blkdev_issue_flush(bdev); |
| 467 | if (error == -EOPNOTSUPP) |
| 468 | error = 0; |
| 469 | |
| 470 | return error; |
| 471 | } |
| 472 | |
| 473 | blk_mode_t file_to_blk_mode(struct file *file) |
| 474 | { |
| 475 | blk_mode_t mode = 0; |
| 476 | |
| 477 | if (file->f_mode & FMODE_READ) |
| 478 | mode |= BLK_OPEN_READ; |
| 479 | if (file->f_mode & FMODE_WRITE) |
| 480 | mode |= BLK_OPEN_WRITE; |
| 481 | if (file->private_data) |
| 482 | mode |= BLK_OPEN_EXCL; |
| 483 | if (file->f_flags & O_NDELAY) |
| 484 | mode |= BLK_OPEN_NDELAY; |
| 485 | |
| 486 | /* |
| 487 | * If all bits in O_ACCMODE set (aka O_RDWR | O_WRONLY), the floppy |
| 488 | * driver has historically allowed ioctls as if the file was opened for |
| 489 | * writing, but does not allow and actual reads or writes. |
| 490 | */ |
| 491 | if ((file->f_flags & O_ACCMODE) == (O_RDWR | O_WRONLY)) |
| 492 | mode |= BLK_OPEN_WRITE_IOCTL; |
| 493 | |
| 494 | return mode; |
| 495 | } |
| 496 | |
| 497 | static int blkdev_open(struct inode *inode, struct file *filp) |
| 498 | { |
| 499 | struct block_device *bdev; |
| 500 | |
| 501 | /* |
| 502 | * Preserve backwards compatibility and allow large file access |
| 503 | * even if userspace doesn't ask for it explicitly. Some mkfs |
| 504 | * binary needs it. We might want to drop this workaround |
| 505 | * during an unstable branch. |
| 506 | */ |
| 507 | filp->f_flags |= O_LARGEFILE; |
| 508 | filp->f_mode |= FMODE_BUF_RASYNC; |
| 509 | |
| 510 | /* |
| 511 | * Use the file private data to store the holder for exclusive openes. |
| 512 | * file_to_blk_mode relies on it being present to set BLK_OPEN_EXCL. |
| 513 | */ |
| 514 | if (filp->f_flags & O_EXCL) |
| 515 | filp->private_data = filp; |
| 516 | |
| 517 | bdev = blkdev_get_by_dev(inode->i_rdev, file_to_blk_mode(filp), |
| 518 | filp->private_data, NULL); |
| 519 | if (IS_ERR(bdev)) |
| 520 | return PTR_ERR(bdev); |
| 521 | |
| 522 | if (bdev_nowait(bdev)) |
| 523 | filp->f_mode |= FMODE_NOWAIT; |
| 524 | |
| 525 | filp->f_mapping = bdev->bd_inode->i_mapping; |
| 526 | filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); |
| 527 | return 0; |
| 528 | } |
| 529 | |
| 530 | static int blkdev_release(struct inode *inode, struct file *filp) |
| 531 | { |
| 532 | blkdev_put(I_BDEV(filp->f_mapping->host), filp->private_data); |
| 533 | return 0; |
| 534 | } |
| 535 | |
| 536 | /* |
| 537 | * Write data to the block device. Only intended for the block device itself |
| 538 | * and the raw driver which basically is a fake block device. |
| 539 | * |
| 540 | * Does not take i_mutex for the write and thus is not for general purpose |
| 541 | * use. |
| 542 | */ |
| 543 | static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) |
| 544 | { |
| 545 | struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host); |
| 546 | struct inode *bd_inode = bdev->bd_inode; |
| 547 | loff_t size = bdev_nr_bytes(bdev); |
| 548 | size_t shorted = 0; |
| 549 | ssize_t ret; |
| 550 | |
| 551 | if (bdev_read_only(bdev)) |
| 552 | return -EPERM; |
| 553 | |
| 554 | if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev)) |
| 555 | return -ETXTBSY; |
| 556 | |
| 557 | if (!iov_iter_count(from)) |
| 558 | return 0; |
| 559 | |
| 560 | if (iocb->ki_pos >= size) |
| 561 | return -ENOSPC; |
| 562 | |
| 563 | if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT) |
| 564 | return -EOPNOTSUPP; |
| 565 | |
| 566 | size -= iocb->ki_pos; |
| 567 | if (iov_iter_count(from) > size) { |
| 568 | shorted = iov_iter_count(from) - size; |
| 569 | iov_iter_truncate(from, size); |
| 570 | } |
| 571 | |
| 572 | ret = __generic_file_write_iter(iocb, from); |
| 573 | if (ret > 0) |
| 574 | ret = generic_write_sync(iocb, ret); |
| 575 | iov_iter_reexpand(from, iov_iter_count(from) + shorted); |
| 576 | return ret; |
| 577 | } |
| 578 | |
| 579 | static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) |
| 580 | { |
| 581 | struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host); |
| 582 | loff_t size = bdev_nr_bytes(bdev); |
| 583 | loff_t pos = iocb->ki_pos; |
| 584 | size_t shorted = 0; |
| 585 | ssize_t ret = 0; |
| 586 | size_t count; |
| 587 | |
| 588 | if (unlikely(pos + iov_iter_count(to) > size)) { |
| 589 | if (pos >= size) |
| 590 | return 0; |
| 591 | size -= pos; |
| 592 | shorted = iov_iter_count(to) - size; |
| 593 | iov_iter_truncate(to, size); |
| 594 | } |
| 595 | |
| 596 | count = iov_iter_count(to); |
| 597 | if (!count) |
| 598 | goto reexpand; /* skip atime */ |
| 599 | |
| 600 | if (iocb->ki_flags & IOCB_DIRECT) { |
| 601 | ret = kiocb_write_and_wait(iocb, count); |
| 602 | if (ret < 0) |
| 603 | goto reexpand; |
| 604 | file_accessed(iocb->ki_filp); |
| 605 | |
| 606 | ret = blkdev_direct_IO(iocb, to); |
| 607 | if (ret >= 0) { |
| 608 | iocb->ki_pos += ret; |
| 609 | count -= ret; |
| 610 | } |
| 611 | iov_iter_revert(to, count - iov_iter_count(to)); |
| 612 | if (ret < 0 || !count) |
| 613 | goto reexpand; |
| 614 | } |
| 615 | |
| 616 | ret = filemap_read(iocb, to, ret); |
| 617 | |
| 618 | reexpand: |
| 619 | if (unlikely(shorted)) |
| 620 | iov_iter_reexpand(to, iov_iter_count(to) + shorted); |
| 621 | return ret; |
| 622 | } |
| 623 | |
| 624 | #define BLKDEV_FALLOC_FL_SUPPORTED \ |
| 625 | (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \ |
| 626 | FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE) |
| 627 | |
| 628 | static long blkdev_fallocate(struct file *file, int mode, loff_t start, |
| 629 | loff_t len) |
| 630 | { |
| 631 | struct inode *inode = bdev_file_inode(file); |
| 632 | struct block_device *bdev = I_BDEV(inode); |
| 633 | loff_t end = start + len - 1; |
| 634 | loff_t isize; |
| 635 | int error; |
| 636 | |
| 637 | /* Fail if we don't recognize the flags. */ |
| 638 | if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED) |
| 639 | return -EOPNOTSUPP; |
| 640 | |
| 641 | /* Don't go off the end of the device. */ |
| 642 | isize = bdev_nr_bytes(bdev); |
| 643 | if (start >= isize) |
| 644 | return -EINVAL; |
| 645 | if (end >= isize) { |
| 646 | if (mode & FALLOC_FL_KEEP_SIZE) { |
| 647 | len = isize - start; |
| 648 | end = start + len - 1; |
| 649 | } else |
| 650 | return -EINVAL; |
| 651 | } |
| 652 | |
| 653 | /* |
| 654 | * Don't allow IO that isn't aligned to logical block size. |
| 655 | */ |
| 656 | if ((start | len) & (bdev_logical_block_size(bdev) - 1)) |
| 657 | return -EINVAL; |
| 658 | |
| 659 | filemap_invalidate_lock(inode->i_mapping); |
| 660 | |
| 661 | /* Invalidate the page cache, including dirty pages. */ |
| 662 | error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end); |
| 663 | if (error) |
| 664 | goto fail; |
| 665 | |
| 666 | switch (mode) { |
| 667 | case FALLOC_FL_ZERO_RANGE: |
| 668 | case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE: |
| 669 | error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT, |
| 670 | len >> SECTOR_SHIFT, GFP_KERNEL, |
| 671 | BLKDEV_ZERO_NOUNMAP); |
| 672 | break; |
| 673 | case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE: |
| 674 | error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT, |
| 675 | len >> SECTOR_SHIFT, GFP_KERNEL, |
| 676 | BLKDEV_ZERO_NOFALLBACK); |
| 677 | break; |
| 678 | case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE: |
| 679 | error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT, |
| 680 | len >> SECTOR_SHIFT, GFP_KERNEL); |
| 681 | break; |
| 682 | default: |
| 683 | error = -EOPNOTSUPP; |
| 684 | } |
| 685 | |
| 686 | fail: |
| 687 | filemap_invalidate_unlock(inode->i_mapping); |
| 688 | return error; |
| 689 | } |
| 690 | |
| 691 | static int blkdev_mmap(struct file *file, struct vm_area_struct *vma) |
| 692 | { |
| 693 | struct inode *bd_inode = bdev_file_inode(file); |
| 694 | |
| 695 | if (bdev_read_only(I_BDEV(bd_inode))) |
| 696 | return generic_file_readonly_mmap(file, vma); |
| 697 | |
| 698 | return generic_file_mmap(file, vma); |
| 699 | } |
| 700 | |
| 701 | const struct file_operations def_blk_fops = { |
| 702 | .open = blkdev_open, |
| 703 | .release = blkdev_release, |
| 704 | .llseek = blkdev_llseek, |
| 705 | .read_iter = blkdev_read_iter, |
| 706 | .write_iter = blkdev_write_iter, |
| 707 | .iopoll = iocb_bio_iopoll, |
| 708 | .mmap = blkdev_mmap, |
| 709 | .fsync = blkdev_fsync, |
| 710 | .unlocked_ioctl = blkdev_ioctl, |
| 711 | #ifdef CONFIG_COMPAT |
| 712 | .compat_ioctl = compat_blkdev_ioctl, |
| 713 | #endif |
| 714 | .splice_read = filemap_splice_read, |
| 715 | .splice_write = iter_file_splice_write, |
| 716 | .fallocate = blkdev_fallocate, |
| 717 | }; |
| 718 | |
| 719 | static __init int blkdev_init(void) |
| 720 | { |
| 721 | return bioset_init(&blkdev_dio_pool, 4, |
| 722 | offsetof(struct blkdev_dio, bio), |
| 723 | BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE); |
| 724 | } |
| 725 | module_init(blkdev_init); |