xfs: implement online get/set fs label
[linux-block.git] / fs / iomap.c
CommitLineData
ae259a9c
CH
1/*
2 * Copyright (C) 2010 Red Hat, Inc.
3 * Copyright (c) 2016 Christoph Hellwig.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/module.h>
15#include <linux/compiler.h>
16#include <linux/fs.h>
17#include <linux/iomap.h>
18#include <linux/uaccess.h>
19#include <linux/gfp.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/pagemap.h>
23#include <linux/file.h>
24#include <linux/uio.h>
25#include <linux/backing-dev.h>
26#include <linux/buffer_head.h>
ff6a9292 27#include <linux/task_io_accounting_ops.h>
9a286f0e 28#include <linux/dax.h>
f361bf4a 29#include <linux/sched/signal.h>
67482129 30#include <linux/swap.h>
f361bf4a 31
ae259a9c
CH
32#include "internal.h"
33
ae259a9c
CH
34/*
35 * Execute a iomap write on a segment of the mapping that spans a
36 * contiguous range of pages that have identical block mapping state.
37 *
38 * This avoids the need to map pages individually, do individual allocations
39 * for each page and most importantly avoid the need for filesystem specific
40 * locking per page. Instead, all the operations are amortised over the entire
41 * range of pages. It is assumed that the filesystems will lock whatever
42 * resources they require in the iomap_begin call, and release them in the
43 * iomap_end call.
44 */
befb503c 45loff_t
ae259a9c 46iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
8ff6daa1 47 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
ae259a9c
CH
48{
49 struct iomap iomap = { 0 };
50 loff_t written = 0, ret;
51
52 /*
53 * Need to map a range from start position for length bytes. This can
54 * span multiple pages - it is only guaranteed to return a range of a
55 * single type of pages (e.g. all into a hole, all mapped or all
56 * unwritten). Failure at this point has nothing to undo.
57 *
58 * If allocation is required for this range, reserve the space now so
59 * that the allocation is guaranteed to succeed later on. Once we copy
60 * the data into the page cache pages, then we cannot fail otherwise we
61 * expose transient stale data. If the reserve fails, we can safely
62 * back out at this point as there is nothing to undo.
63 */
64 ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
65 if (ret)
66 return ret;
67 if (WARN_ON(iomap.offset > pos))
68 return -EIO;
0c6dda7a
DW
69 if (WARN_ON(iomap.length == 0))
70 return -EIO;
ae259a9c
CH
71
72 /*
73 * Cut down the length to the one actually provided by the filesystem,
74 * as it might not be able to give us the whole size that we requested.
75 */
76 if (iomap.offset + iomap.length < pos + length)
77 length = iomap.offset + iomap.length - pos;
78
79 /*
80 * Now that we have guaranteed that the space allocation will succeed.
81 * we can do the copy-in page by page without having to worry about
82 * failures exposing transient data.
83 */
84 written = actor(inode, pos, length, data, &iomap);
85
86 /*
87 * Now the data has been copied, commit the range we've copied. This
88 * should not fail unless the filesystem has had a fatal error.
89 */
f20ac7ab
CH
90 if (ops->iomap_end) {
91 ret = ops->iomap_end(inode, pos, length,
92 written > 0 ? written : 0,
93 flags, &iomap);
94 }
ae259a9c
CH
95
96 return written ? written : ret;
97}
98
99static void
100iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
101{
102 loff_t i_size = i_size_read(inode);
103
104 /*
105 * Only truncate newly allocated pages beyoned EOF, even if the
106 * write started inside the existing inode size.
107 */
108 if (pos + len > i_size)
109 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
110}
111
112static int
113iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
114 struct page **pagep, struct iomap *iomap)
115{
116 pgoff_t index = pos >> PAGE_SHIFT;
117 struct page *page;
118 int status = 0;
119
120 BUG_ON(pos + len > iomap->offset + iomap->length);
121
d1908f52
MH
122 if (fatal_signal_pending(current))
123 return -EINTR;
124
ae259a9c
CH
125 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
126 if (!page)
127 return -ENOMEM;
128
129 status = __block_write_begin_int(page, pos, len, NULL, iomap);
130 if (unlikely(status)) {
131 unlock_page(page);
132 put_page(page);
133 page = NULL;
134
135 iomap_write_failed(inode, pos, len);
136 }
137
138 *pagep = page;
139 return status;
140}
141
142static int
143iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
144 unsigned copied, struct page *page)
145{
146 int ret;
147
148 ret = generic_write_end(NULL, inode->i_mapping, pos, len,
149 copied, page, NULL);
150 if (ret < len)
151 iomap_write_failed(inode, pos, len);
152 return ret;
153}
154
155static loff_t
156iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
157 struct iomap *iomap)
158{
159 struct iov_iter *i = data;
160 long status = 0;
161 ssize_t written = 0;
162 unsigned int flags = AOP_FLAG_NOFS;
163
ae259a9c
CH
164 do {
165 struct page *page;
166 unsigned long offset; /* Offset into pagecache page */
167 unsigned long bytes; /* Bytes to write to page */
168 size_t copied; /* Bytes copied from user */
169
170 offset = (pos & (PAGE_SIZE - 1));
171 bytes = min_t(unsigned long, PAGE_SIZE - offset,
172 iov_iter_count(i));
173again:
174 if (bytes > length)
175 bytes = length;
176
177 /*
178 * Bring in the user page that we will copy from _first_.
179 * Otherwise there's a nasty deadlock on copying from the
180 * same page as we're writing to, without it being marked
181 * up-to-date.
182 *
183 * Not only is this an optimisation, but it is also required
184 * to check that the address is actually valid, when atomic
185 * usercopies are used, below.
186 */
187 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
188 status = -EFAULT;
189 break;
190 }
191
192 status = iomap_write_begin(inode, pos, bytes, flags, &page,
193 iomap);
194 if (unlikely(status))
195 break;
196
197 if (mapping_writably_mapped(inode->i_mapping))
198 flush_dcache_page(page);
199
ae259a9c 200 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
ae259a9c
CH
201
202 flush_dcache_page(page);
ae259a9c
CH
203
204 status = iomap_write_end(inode, pos, bytes, copied, page);
205 if (unlikely(status < 0))
206 break;
207 copied = status;
208
209 cond_resched();
210
211 iov_iter_advance(i, copied);
212 if (unlikely(copied == 0)) {
213 /*
214 * If we were unable to copy any data at all, we must
215 * fall back to a single segment length write.
216 *
217 * If we didn't fallback here, we could livelock
218 * because not all segments in the iov can be copied at
219 * once without a pagefault.
220 */
221 bytes = min_t(unsigned long, PAGE_SIZE - offset,
222 iov_iter_single_seg_count(i));
223 goto again;
224 }
225 pos += copied;
226 written += copied;
227 length -= copied;
228
229 balance_dirty_pages_ratelimited(inode->i_mapping);
230 } while (iov_iter_count(i) && length);
231
232 return written ? written : status;
233}
234
235ssize_t
236iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
8ff6daa1 237 const struct iomap_ops *ops)
ae259a9c
CH
238{
239 struct inode *inode = iocb->ki_filp->f_mapping->host;
240 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
241
242 while (iov_iter_count(iter)) {
243 ret = iomap_apply(inode, pos, iov_iter_count(iter),
244 IOMAP_WRITE, ops, iter, iomap_write_actor);
245 if (ret <= 0)
246 break;
247 pos += ret;
248 written += ret;
249 }
250
251 return written ? written : ret;
252}
253EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
254
5f4e5752
CH
255static struct page *
256__iomap_read_page(struct inode *inode, loff_t offset)
257{
258 struct address_space *mapping = inode->i_mapping;
259 struct page *page;
260
261 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
262 if (IS_ERR(page))
263 return page;
264 if (!PageUptodate(page)) {
265 put_page(page);
266 return ERR_PTR(-EIO);
267 }
268 return page;
269}
270
271static loff_t
272iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
273 struct iomap *iomap)
274{
275 long status = 0;
276 ssize_t written = 0;
277
278 do {
279 struct page *page, *rpage;
280 unsigned long offset; /* Offset into pagecache page */
281 unsigned long bytes; /* Bytes to write to page */
282
283 offset = (pos & (PAGE_SIZE - 1));
e28ae8e4 284 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
5f4e5752
CH
285
286 rpage = __iomap_read_page(inode, pos);
287 if (IS_ERR(rpage))
288 return PTR_ERR(rpage);
289
290 status = iomap_write_begin(inode, pos, bytes,
c718a975 291 AOP_FLAG_NOFS, &page, iomap);
5f4e5752
CH
292 put_page(rpage);
293 if (unlikely(status))
294 return status;
295
296 WARN_ON_ONCE(!PageUptodate(page));
297
298 status = iomap_write_end(inode, pos, bytes, bytes, page);
299 if (unlikely(status <= 0)) {
300 if (WARN_ON_ONCE(status == 0))
301 return -EIO;
302 return status;
303 }
304
305 cond_resched();
306
307 pos += status;
308 written += status;
309 length -= status;
310
311 balance_dirty_pages_ratelimited(inode->i_mapping);
312 } while (length);
313
314 return written;
315}
316
317int
318iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
8ff6daa1 319 const struct iomap_ops *ops)
5f4e5752
CH
320{
321 loff_t ret;
322
323 while (len) {
324 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
325 iomap_dirty_actor);
326 if (ret <= 0)
327 return ret;
328 pos += ret;
329 len -= ret;
330 }
331
332 return 0;
333}
334EXPORT_SYMBOL_GPL(iomap_file_dirty);
335
ae259a9c
CH
336static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
337 unsigned bytes, struct iomap *iomap)
338{
339 struct page *page;
340 int status;
341
c718a975
TH
342 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
343 iomap);
ae259a9c
CH
344 if (status)
345 return status;
346
347 zero_user(page, offset, bytes);
348 mark_page_accessed(page);
349
350 return iomap_write_end(inode, pos, bytes, bytes, page);
351}
352
9a286f0e
CH
353static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
354 struct iomap *iomap)
355{
19fe5f64
AG
356 sector_t sector = (iomap->addr +
357 (pos & PAGE_MASK) - iomap->offset) >> 9;
9a286f0e 358
cccbce67
DW
359 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, sector,
360 offset, bytes);
9a286f0e
CH
361}
362
ae259a9c
CH
363static loff_t
364iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
365 void *data, struct iomap *iomap)
366{
367 bool *did_zero = data;
368 loff_t written = 0;
369 int status;
370
371 /* already zeroed? we're done. */
372 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
373 return count;
374
375 do {
376 unsigned offset, bytes;
377
378 offset = pos & (PAGE_SIZE - 1); /* Within page */
e28ae8e4 379 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
ae259a9c 380
9a286f0e
CH
381 if (IS_DAX(inode))
382 status = iomap_dax_zero(pos, offset, bytes, iomap);
383 else
384 status = iomap_zero(inode, pos, offset, bytes, iomap);
ae259a9c
CH
385 if (status < 0)
386 return status;
387
388 pos += bytes;
389 count -= bytes;
390 written += bytes;
391 if (did_zero)
392 *did_zero = true;
393 } while (count > 0);
394
395 return written;
396}
397
398int
399iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
8ff6daa1 400 const struct iomap_ops *ops)
ae259a9c
CH
401{
402 loff_t ret;
403
404 while (len > 0) {
405 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
406 ops, did_zero, iomap_zero_range_actor);
407 if (ret <= 0)
408 return ret;
409
410 pos += ret;
411 len -= ret;
412 }
413
414 return 0;
415}
416EXPORT_SYMBOL_GPL(iomap_zero_range);
417
418int
419iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
8ff6daa1 420 const struct iomap_ops *ops)
ae259a9c 421{
93407472
FF
422 unsigned int blocksize = i_blocksize(inode);
423 unsigned int off = pos & (blocksize - 1);
ae259a9c
CH
424
425 /* Block boundary? Nothing to do */
426 if (!off)
427 return 0;
428 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
429}
430EXPORT_SYMBOL_GPL(iomap_truncate_page);
431
432static loff_t
433iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
434 void *data, struct iomap *iomap)
435{
436 struct page *page = data;
437 int ret;
438
c663e29f 439 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
ae259a9c
CH
440 if (ret)
441 return ret;
442
443 block_commit_write(page, 0, length);
444 return length;
445}
446
11bac800 447int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
ae259a9c
CH
448{
449 struct page *page = vmf->page;
11bac800 450 struct inode *inode = file_inode(vmf->vma->vm_file);
ae259a9c
CH
451 unsigned long length;
452 loff_t offset, size;
453 ssize_t ret;
454
455 lock_page(page);
456 size = i_size_read(inode);
457 if ((page->mapping != inode->i_mapping) ||
458 (page_offset(page) > size)) {
459 /* We overload EFAULT to mean page got truncated */
460 ret = -EFAULT;
461 goto out_unlock;
462 }
463
464 /* page is wholly or partially inside EOF */
465 if (((page->index + 1) << PAGE_SHIFT) > size)
466 length = size & ~PAGE_MASK;
467 else
468 length = PAGE_SIZE;
469
470 offset = page_offset(page);
471 while (length > 0) {
9484ab1b
JK
472 ret = iomap_apply(inode, offset, length,
473 IOMAP_WRITE | IOMAP_FAULT, ops, page,
474 iomap_page_mkwrite_actor);
ae259a9c
CH
475 if (unlikely(ret <= 0))
476 goto out_unlock;
477 offset += ret;
478 length -= ret;
479 }
480
481 set_page_dirty(page);
482 wait_for_stable_page(page);
e7647fb4 483 return VM_FAULT_LOCKED;
ae259a9c
CH
484out_unlock:
485 unlock_page(page);
e7647fb4 486 return block_page_mkwrite_return(ret);
ae259a9c
CH
487}
488EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
8be9f564
CH
489
490struct fiemap_ctx {
491 struct fiemap_extent_info *fi;
492 struct iomap prev;
493};
494
495static int iomap_to_fiemap(struct fiemap_extent_info *fi,
496 struct iomap *iomap, u32 flags)
497{
498 switch (iomap->type) {
499 case IOMAP_HOLE:
500 /* skip holes */
501 return 0;
502 case IOMAP_DELALLOC:
503 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
504 break;
505 case IOMAP_UNWRITTEN:
506 flags |= FIEMAP_EXTENT_UNWRITTEN;
507 break;
508 case IOMAP_MAPPED:
509 break;
510 }
511
17de0a9f
CH
512 if (iomap->flags & IOMAP_F_MERGED)
513 flags |= FIEMAP_EXTENT_MERGED;
e43c460d
DW
514 if (iomap->flags & IOMAP_F_SHARED)
515 flags |= FIEMAP_EXTENT_SHARED;
9ca250a5
AG
516 if (iomap->flags & IOMAP_F_DATA_INLINE)
517 flags |= FIEMAP_EXTENT_DATA_INLINE;
17de0a9f 518
8be9f564 519 return fiemap_fill_next_extent(fi, iomap->offset,
19fe5f64 520 iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
17de0a9f 521 iomap->length, flags);
8be9f564
CH
522}
523
524static loff_t
525iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
526 struct iomap *iomap)
527{
528 struct fiemap_ctx *ctx = data;
529 loff_t ret = length;
530
531 if (iomap->type == IOMAP_HOLE)
532 return length;
533
534 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
535 ctx->prev = *iomap;
536 switch (ret) {
537 case 0: /* success */
538 return length;
539 case 1: /* extent array full */
540 return 0;
541 default:
542 return ret;
543 }
544}
545
546int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
8ff6daa1 547 loff_t start, loff_t len, const struct iomap_ops *ops)
8be9f564
CH
548{
549 struct fiemap_ctx ctx;
550 loff_t ret;
551
552 memset(&ctx, 0, sizeof(ctx));
553 ctx.fi = fi;
554 ctx.prev.type = IOMAP_HOLE;
555
556 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
557 if (ret)
558 return ret;
559
8896b8f6
DC
560 if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
561 ret = filemap_write_and_wait(inode->i_mapping);
562 if (ret)
563 return ret;
564 }
8be9f564
CH
565
566 while (len > 0) {
d33fd776 567 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
8be9f564 568 iomap_fiemap_actor);
ac2dc058
DC
569 /* inode with no (attribute) mapping will give ENOENT */
570 if (ret == -ENOENT)
571 break;
8be9f564
CH
572 if (ret < 0)
573 return ret;
574 if (ret == 0)
575 break;
576
577 start += ret;
578 len -= ret;
579 }
580
581 if (ctx.prev.type != IOMAP_HOLE) {
582 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
583 if (ret < 0)
584 return ret;
585 }
586
587 return 0;
588}
589EXPORT_SYMBOL_GPL(iomap_fiemap);
ff6a9292 590
0ed3b0d4
AG
591static loff_t
592iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
593 void *data, struct iomap *iomap)
594{
595 switch (iomap->type) {
596 case IOMAP_UNWRITTEN:
597 offset = page_cache_seek_hole_data(inode, offset, length,
598 SEEK_HOLE);
599 if (offset < 0)
600 return length;
601 /* fall through */
602 case IOMAP_HOLE:
603 *(loff_t *)data = offset;
604 return 0;
605 default:
606 return length;
607 }
608}
609
610loff_t
611iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
612{
613 loff_t size = i_size_read(inode);
614 loff_t length = size - offset;
615 loff_t ret;
616
d6ab17f2
DW
617 /* Nothing to be found before or beyond the end of the file. */
618 if (offset < 0 || offset >= size)
0ed3b0d4
AG
619 return -ENXIO;
620
621 while (length > 0) {
622 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
623 &offset, iomap_seek_hole_actor);
624 if (ret < 0)
625 return ret;
626 if (ret == 0)
627 break;
628
629 offset += ret;
630 length -= ret;
631 }
632
633 return offset;
634}
635EXPORT_SYMBOL_GPL(iomap_seek_hole);
636
637static loff_t
638iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
639 void *data, struct iomap *iomap)
640{
641 switch (iomap->type) {
642 case IOMAP_HOLE:
643 return length;
644 case IOMAP_UNWRITTEN:
645 offset = page_cache_seek_hole_data(inode, offset, length,
646 SEEK_DATA);
647 if (offset < 0)
648 return length;
649 /*FALLTHRU*/
650 default:
651 *(loff_t *)data = offset;
652 return 0;
653 }
654}
655
656loff_t
657iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
658{
659 loff_t size = i_size_read(inode);
660 loff_t length = size - offset;
661 loff_t ret;
662
d6ab17f2
DW
663 /* Nothing to be found before or beyond the end of the file. */
664 if (offset < 0 || offset >= size)
0ed3b0d4
AG
665 return -ENXIO;
666
667 while (length > 0) {
668 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
669 &offset, iomap_seek_data_actor);
670 if (ret < 0)
671 return ret;
672 if (ret == 0)
673 break;
674
675 offset += ret;
676 length -= ret;
677 }
678
679 if (length <= 0)
680 return -ENXIO;
681 return offset;
682}
683EXPORT_SYMBOL_GPL(iomap_seek_data);
684
ff6a9292
CH
685/*
686 * Private flags for iomap_dio, must not overlap with the public ones in
687 * iomap.h:
688 */
3460cac1 689#define IOMAP_DIO_WRITE_FUA (1 << 28)
4f8ff44b 690#define IOMAP_DIO_NEED_SYNC (1 << 29)
ff6a9292
CH
691#define IOMAP_DIO_WRITE (1 << 30)
692#define IOMAP_DIO_DIRTY (1 << 31)
693
694struct iomap_dio {
695 struct kiocb *iocb;
696 iomap_dio_end_io_t *end_io;
697 loff_t i_size;
698 loff_t size;
699 atomic_t ref;
700 unsigned flags;
701 int error;
702
703 union {
704 /* used during submission and for synchronous completion: */
705 struct {
706 struct iov_iter *iter;
707 struct task_struct *waiter;
708 struct request_queue *last_queue;
709 blk_qc_t cookie;
710 } submit;
711
712 /* used for aio completion: */
713 struct {
714 struct work_struct work;
715 } aio;
716 };
717};
718
719static ssize_t iomap_dio_complete(struct iomap_dio *dio)
720{
721 struct kiocb *iocb = dio->iocb;
332391a9 722 struct inode *inode = file_inode(iocb->ki_filp);
5e25c269 723 loff_t offset = iocb->ki_pos;
ff6a9292
CH
724 ssize_t ret;
725
726 if (dio->end_io) {
727 ret = dio->end_io(iocb,
728 dio->error ? dio->error : dio->size,
729 dio->flags);
730 } else {
731 ret = dio->error;
732 }
733
734 if (likely(!ret)) {
735 ret = dio->size;
736 /* check for short read */
5e25c269 737 if (offset + ret > dio->i_size &&
ff6a9292 738 !(dio->flags & IOMAP_DIO_WRITE))
5e25c269 739 ret = dio->i_size - offset;
ff6a9292
CH
740 iocb->ki_pos += ret;
741 }
742
5e25c269
EG
743 /*
744 * Try again to invalidate clean pages which might have been cached by
745 * non-direct readahead, or faulted in by get_user_pages() if the source
746 * of the write was an mmap'ed region of the file we're writing. Either
747 * one is a pretty crazy thing to do, so we don't support it 100%. If
748 * this invalidation fails, tough, the write still worked...
749 *
750 * And this page cache invalidation has to be after dio->end_io(), as
751 * some filesystems convert unwritten extents to real allocations in
752 * end_io() when necessary, otherwise a racing buffer read would cache
753 * zeros from unwritten extents.
754 */
755 if (!dio->error &&
756 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
757 int err;
758 err = invalidate_inode_pages2_range(inode->i_mapping,
759 offset >> PAGE_SHIFT,
760 (offset + dio->size - 1) >> PAGE_SHIFT);
5a9d929d
DW
761 if (err)
762 dio_warn_stale_pagecache(iocb->ki_filp);
5e25c269
EG
763 }
764
4f8ff44b
DC
765 /*
766 * If this is a DSYNC write, make sure we push it to stable storage now
767 * that we've written data.
768 */
769 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
770 ret = generic_write_sync(iocb, ret);
771
ff6a9292
CH
772 inode_dio_end(file_inode(iocb->ki_filp));
773 kfree(dio);
774
775 return ret;
776}
777
778static void iomap_dio_complete_work(struct work_struct *work)
779{
780 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
781 struct kiocb *iocb = dio->iocb;
ff6a9292 782
4f8ff44b 783 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
ff6a9292
CH
784}
785
786/*
787 * Set an error in the dio if none is set yet. We have to use cmpxchg
788 * as the submission context and the completion context(s) can race to
789 * update the error.
790 */
791static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
792{
793 cmpxchg(&dio->error, 0, ret);
794}
795
796static void iomap_dio_bio_end_io(struct bio *bio)
797{
798 struct iomap_dio *dio = bio->bi_private;
799 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
800
4e4cbee9
CH
801 if (bio->bi_status)
802 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
ff6a9292
CH
803
804 if (atomic_dec_and_test(&dio->ref)) {
805 if (is_sync_kiocb(dio->iocb)) {
806 struct task_struct *waiter = dio->submit.waiter;
807
808 WRITE_ONCE(dio->submit.waiter, NULL);
809 wake_up_process(waiter);
810 } else if (dio->flags & IOMAP_DIO_WRITE) {
811 struct inode *inode = file_inode(dio->iocb->ki_filp);
812
813 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
814 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
815 } else {
816 iomap_dio_complete_work(&dio->aio.work);
817 }
818 }
819
820 if (should_dirty) {
821 bio_check_pages_dirty(bio);
822 } else {
823 struct bio_vec *bvec;
824 int i;
825
826 bio_for_each_segment_all(bvec, bio, i)
827 put_page(bvec->bv_page);
828 bio_put(bio);
829 }
830}
831
832static blk_qc_t
833iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
834 unsigned len)
835{
836 struct page *page = ZERO_PAGE(0);
837 struct bio *bio;
838
839 bio = bio_alloc(GFP_KERNEL, 1);
74d46992 840 bio_set_dev(bio, iomap->bdev);
ff6a9292 841 bio->bi_iter.bi_sector =
19fe5f64 842 (iomap->addr + pos - iomap->offset) >> 9;
ff6a9292
CH
843 bio->bi_private = dio;
844 bio->bi_end_io = iomap_dio_bio_end_io;
845
846 get_page(page);
847 if (bio_add_page(bio, page, len, 0) != len)
848 BUG();
5cc60aee 849 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
ff6a9292
CH
850
851 atomic_inc(&dio->ref);
852 return submit_bio(bio);
853}
854
855static loff_t
856iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
857 void *data, struct iomap *iomap)
858{
859 struct iomap_dio *dio = data;
93407472
FF
860 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
861 unsigned int fs_block_size = i_blocksize(inode), pad;
862 unsigned int align = iov_iter_alignment(dio->submit.iter);
ff6a9292
CH
863 struct iov_iter iter;
864 struct bio *bio;
865 bool need_zeroout = false;
3460cac1 866 bool use_fua = false;
ff6a9292 867 int nr_pages, ret;
cfe057f7 868 size_t copied = 0;
ff6a9292
CH
869
870 if ((pos | length | align) & ((1 << blkbits) - 1))
871 return -EINVAL;
872
873 switch (iomap->type) {
874 case IOMAP_HOLE:
875 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
876 return -EIO;
877 /*FALLTHRU*/
878 case IOMAP_UNWRITTEN:
879 if (!(dio->flags & IOMAP_DIO_WRITE)) {
cfe057f7 880 length = iov_iter_zero(length, dio->submit.iter);
ff6a9292
CH
881 dio->size += length;
882 return length;
883 }
884 dio->flags |= IOMAP_DIO_UNWRITTEN;
885 need_zeroout = true;
886 break;
887 case IOMAP_MAPPED:
888 if (iomap->flags & IOMAP_F_SHARED)
889 dio->flags |= IOMAP_DIO_COW;
3460cac1 890 if (iomap->flags & IOMAP_F_NEW) {
ff6a9292 891 need_zeroout = true;
3460cac1
DC
892 } else {
893 /*
894 * Use a FUA write if we need datasync semantics, this
895 * is a pure data IO that doesn't require any metadata
896 * updates and the underlying device supports FUA. This
897 * allows us to avoid cache flushes on IO completion.
898 */
899 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
900 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
901 blk_queue_fua(bdev_get_queue(iomap->bdev)))
902 use_fua = true;
903 }
ff6a9292
CH
904 break;
905 default:
906 WARN_ON_ONCE(1);
907 return -EIO;
908 }
909
910 /*
911 * Operate on a partial iter trimmed to the extent we were called for.
912 * We'll update the iter in the dio once we're done with this extent.
913 */
914 iter = *dio->submit.iter;
915 iov_iter_truncate(&iter, length);
916
917 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
918 if (nr_pages <= 0)
919 return nr_pages;
920
921 if (need_zeroout) {
922 /* zero out from the start of the block to the write offset */
923 pad = pos & (fs_block_size - 1);
924 if (pad)
925 iomap_dio_zero(dio, iomap, pos - pad, pad);
926 }
927
928 do {
cfe057f7
AV
929 size_t n;
930 if (dio->error) {
931 iov_iter_revert(dio->submit.iter, copied);
ff6a9292 932 return 0;
cfe057f7 933 }
ff6a9292
CH
934
935 bio = bio_alloc(GFP_KERNEL, nr_pages);
74d46992 936 bio_set_dev(bio, iomap->bdev);
ff6a9292 937 bio->bi_iter.bi_sector =
19fe5f64 938 (iomap->addr + pos - iomap->offset) >> 9;
45d06cf7 939 bio->bi_write_hint = dio->iocb->ki_hint;
ff6a9292
CH
940 bio->bi_private = dio;
941 bio->bi_end_io = iomap_dio_bio_end_io;
942
943 ret = bio_iov_iter_get_pages(bio, &iter);
944 if (unlikely(ret)) {
945 bio_put(bio);
cfe057f7 946 return copied ? copied : ret;
ff6a9292
CH
947 }
948
cfe057f7 949 n = bio->bi_iter.bi_size;
ff6a9292 950 if (dio->flags & IOMAP_DIO_WRITE) {
3460cac1
DC
951 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
952 if (use_fua)
953 bio->bi_opf |= REQ_FUA;
954 else
955 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
cfe057f7 956 task_io_account_write(n);
ff6a9292 957 } else {
3460cac1 958 bio->bi_opf = REQ_OP_READ;
ff6a9292
CH
959 if (dio->flags & IOMAP_DIO_DIRTY)
960 bio_set_pages_dirty(bio);
961 }
962
cfe057f7
AV
963 iov_iter_advance(dio->submit.iter, n);
964
965 dio->size += n;
966 pos += n;
967 copied += n;
ff6a9292
CH
968
969 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
970
971 atomic_inc(&dio->ref);
972
973 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
974 dio->submit.cookie = submit_bio(bio);
975 } while (nr_pages);
976
977 if (need_zeroout) {
978 /* zero out from the end of the write to the end of the block */
979 pad = pos & (fs_block_size - 1);
980 if (pad)
981 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
982 }
cfe057f7 983 return copied;
ff6a9292
CH
984}
985
4f8ff44b
DC
986/*
987 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
3460cac1
DC
988 * is being issued as AIO or not. This allows us to optimise pure data writes
989 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
990 * REQ_FLUSH post write. This is slightly tricky because a single request here
991 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
992 * may be pure data writes. In that case, we still need to do a full data sync
993 * completion.
4f8ff44b 994 */
ff6a9292 995ssize_t
8ff6daa1
CH
996iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
997 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
ff6a9292
CH
998{
999 struct address_space *mapping = iocb->ki_filp->f_mapping;
1000 struct inode *inode = file_inode(iocb->ki_filp);
1001 size_t count = iov_iter_count(iter);
c771c14b
EG
1002 loff_t pos = iocb->ki_pos, start = pos;
1003 loff_t end = iocb->ki_pos + count - 1, ret = 0;
ff6a9292
CH
1004 unsigned int flags = IOMAP_DIRECT;
1005 struct blk_plug plug;
1006 struct iomap_dio *dio;
1007
1008 lockdep_assert_held(&inode->i_rwsem);
1009
1010 if (!count)
1011 return 0;
1012
1013 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
1014 if (!dio)
1015 return -ENOMEM;
1016
1017 dio->iocb = iocb;
1018 atomic_set(&dio->ref, 1);
1019 dio->size = 0;
1020 dio->i_size = i_size_read(inode);
1021 dio->end_io = end_io;
1022 dio->error = 0;
1023 dio->flags = 0;
1024
1025 dio->submit.iter = iter;
1026 if (is_sync_kiocb(iocb)) {
1027 dio->submit.waiter = current;
1028 dio->submit.cookie = BLK_QC_T_NONE;
1029 dio->submit.last_queue = NULL;
1030 }
1031
1032 if (iov_iter_rw(iter) == READ) {
1033 if (pos >= dio->i_size)
1034 goto out_free_dio;
1035
1036 if (iter->type == ITER_IOVEC)
1037 dio->flags |= IOMAP_DIO_DIRTY;
1038 } else {
3460cac1 1039 flags |= IOMAP_WRITE;
ff6a9292 1040 dio->flags |= IOMAP_DIO_WRITE;
3460cac1
DC
1041
1042 /* for data sync or sync, we need sync completion processing */
4f8ff44b
DC
1043 if (iocb->ki_flags & IOCB_DSYNC)
1044 dio->flags |= IOMAP_DIO_NEED_SYNC;
3460cac1
DC
1045
1046 /*
1047 * For datasync only writes, we optimistically try using FUA for
1048 * this IO. Any non-FUA write that occurs will clear this flag,
1049 * hence we know before completion whether a cache flush is
1050 * necessary.
1051 */
1052 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
1053 dio->flags |= IOMAP_DIO_WRITE_FUA;
ff6a9292
CH
1054 }
1055
a38d1243
GR
1056 if (iocb->ki_flags & IOCB_NOWAIT) {
1057 if (filemap_range_has_page(mapping, start, end)) {
1058 ret = -EAGAIN;
1059 goto out_free_dio;
1060 }
1061 flags |= IOMAP_NOWAIT;
1062 }
1063
55635ba7
AR
1064 ret = filemap_write_and_wait_range(mapping, start, end);
1065 if (ret)
1066 goto out_free_dio;
ff6a9292 1067
5a9d929d
DW
1068 /*
1069 * Try to invalidate cache pages for the range we're direct
1070 * writing. If this invalidation fails, tough, the write will
1071 * still work, but racing two incompatible write paths is a
1072 * pretty crazy thing to do, so we don't support it 100%.
1073 */
55635ba7
AR
1074 ret = invalidate_inode_pages2_range(mapping,
1075 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
5a9d929d
DW
1076 if (ret)
1077 dio_warn_stale_pagecache(iocb->ki_filp);
55635ba7 1078 ret = 0;
ff6a9292 1079
546e7be8
CR
1080 if (iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
1081 !inode->i_sb->s_dio_done_wq) {
1082 ret = sb_init_dio_done_wq(inode->i_sb);
1083 if (ret < 0)
1084 goto out_free_dio;
1085 }
1086
ff6a9292
CH
1087 inode_dio_begin(inode);
1088
1089 blk_start_plug(&plug);
1090 do {
1091 ret = iomap_apply(inode, pos, count, flags, ops, dio,
1092 iomap_dio_actor);
1093 if (ret <= 0) {
1094 /* magic error code to fall back to buffered I/O */
1095 if (ret == -ENOTBLK)
1096 ret = 0;
1097 break;
1098 }
1099 pos += ret;
a008c31c
CR
1100
1101 if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
1102 break;
ff6a9292
CH
1103 } while ((count = iov_iter_count(iter)) > 0);
1104 blk_finish_plug(&plug);
1105
1106 if (ret < 0)
1107 iomap_dio_set_error(dio, ret);
1108
3460cac1
DC
1109 /*
1110 * If all the writes we issued were FUA, we don't need to flush the
1111 * cache on IO completion. Clear the sync flag for this case.
1112 */
1113 if (dio->flags & IOMAP_DIO_WRITE_FUA)
1114 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
1115
ff6a9292
CH
1116 if (!atomic_dec_and_test(&dio->ref)) {
1117 if (!is_sync_kiocb(iocb))
1118 return -EIOCBQUEUED;
1119
1120 for (;;) {
1121 set_current_state(TASK_UNINTERRUPTIBLE);
1122 if (!READ_ONCE(dio->submit.waiter))
1123 break;
1124
1125 if (!(iocb->ki_flags & IOCB_HIPRI) ||
1126 !dio->submit.last_queue ||
ea435e1b 1127 !blk_poll(dio->submit.last_queue,
5cc60aee 1128 dio->submit.cookie))
ff6a9292
CH
1129 io_schedule();
1130 }
1131 __set_current_state(TASK_RUNNING);
1132 }
1133
c771c14b
EG
1134 ret = iomap_dio_complete(dio);
1135
c771c14b 1136 return ret;
ff6a9292
CH
1137
1138out_free_dio:
1139 kfree(dio);
1140 return ret;
1141}
1142EXPORT_SYMBOL_GPL(iomap_dio_rw);
67482129
DW
1143
1144/* Swapfile activation */
1145
1146#ifdef CONFIG_SWAP
1147struct iomap_swapfile_info {
1148 struct iomap iomap; /* accumulated iomap */
1149 struct swap_info_struct *sis;
1150 uint64_t lowest_ppage; /* lowest physical addr seen (pages) */
1151 uint64_t highest_ppage; /* highest physical addr seen (pages) */
1152 unsigned long nr_pages; /* number of pages collected */
1153 int nr_extents; /* extent count */
1154};
1155
1156/*
1157 * Collect physical extents for this swap file. Physical extents reported to
1158 * the swap code must be trimmed to align to a page boundary. The logical
1159 * offset within the file is irrelevant since the swapfile code maps logical
1160 * page numbers of the swap device to the physical page-aligned extents.
1161 */
1162static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
1163{
1164 struct iomap *iomap = &isi->iomap;
1165 unsigned long nr_pages;
1166 uint64_t first_ppage;
1167 uint64_t first_ppage_reported;
1168 uint64_t next_ppage;
1169 int error;
1170
1171 /*
1172 * Round the start up and the end down so that the physical
1173 * extent aligns to a page boundary.
1174 */
1175 first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT;
1176 next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >>
1177 PAGE_SHIFT;
1178
1179 /* Skip too-short physical extents. */
1180 if (first_ppage >= next_ppage)
1181 return 0;
1182 nr_pages = next_ppage - first_ppage;
1183
1184 /*
1185 * Calculate how much swap space we're adding; the first page contains
1186 * the swap header and doesn't count. The mm still wants that first
1187 * page fed to add_swap_extent, however.
1188 */
1189 first_ppage_reported = first_ppage;
1190 if (iomap->offset == 0)
1191 first_ppage_reported++;
1192 if (isi->lowest_ppage > first_ppage_reported)
1193 isi->lowest_ppage = first_ppage_reported;
1194 if (isi->highest_ppage < (next_ppage - 1))
1195 isi->highest_ppage = next_ppage - 1;
1196
1197 /* Add extent, set up for the next call. */
1198 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage);
1199 if (error < 0)
1200 return error;
1201 isi->nr_extents += error;
1202 isi->nr_pages += nr_pages;
1203 return 0;
1204}
1205
1206/*
1207 * Accumulate iomaps for this swap file. We have to accumulate iomaps because
1208 * swap only cares about contiguous page-aligned physical extents and makes no
1209 * distinction between written and unwritten extents.
1210 */
1211static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
1212 loff_t count, void *data, struct iomap *iomap)
1213{
1214 struct iomap_swapfile_info *isi = data;
1215 int error;
1216
1217 /* Skip holes. */
1218 if (iomap->type == IOMAP_HOLE)
1219 goto out;
1220
1221 /* Only one bdev per swap file. */
1222 if (iomap->bdev != isi->sis->bdev)
1223 goto err;
1224
1225 /* Only real or unwritten extents. */
1226 if (iomap->type != IOMAP_MAPPED && iomap->type != IOMAP_UNWRITTEN)
1227 goto err;
1228
1229 /* No uncommitted metadata or shared blocks or inline data. */
1230 if (iomap->flags & (IOMAP_F_DIRTY | IOMAP_F_SHARED |
1231 IOMAP_F_DATA_INLINE))
1232 goto err;
1233
1234 /* No null physical addresses. */
1235 if (iomap->addr == IOMAP_NULL_ADDR)
1236 goto err;
1237
1238 if (isi->iomap.length == 0) {
1239 /* No accumulated extent, so just store it. */
1240 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
1241 } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) {
1242 /* Append this to the accumulated extent. */
1243 isi->iomap.length += iomap->length;
1244 } else {
1245 /* Otherwise, add the retained iomap and store this one. */
1246 error = iomap_swapfile_add_extent(isi);
1247 if (error)
1248 return error;
1249 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
1250 }
1251out:
1252 return count;
1253err:
1254 pr_err("swapon: file cannot be used for swap\n");
1255 return -EINVAL;
1256}
1257
1258/*
1259 * Iterate a swap file's iomaps to construct physical extents that can be
1260 * passed to the swapfile subsystem.
1261 */
1262int iomap_swapfile_activate(struct swap_info_struct *sis,
1263 struct file *swap_file, sector_t *pagespan,
1264 const struct iomap_ops *ops)
1265{
1266 struct iomap_swapfile_info isi = {
1267 .sis = sis,
1268 .lowest_ppage = (sector_t)-1ULL,
1269 };
1270 struct address_space *mapping = swap_file->f_mapping;
1271 struct inode *inode = mapping->host;
1272 loff_t pos = 0;
1273 loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE);
1274 loff_t ret;
1275
1276 ret = filemap_write_and_wait(inode->i_mapping);
1277 if (ret)
1278 return ret;
1279
1280 while (len > 0) {
1281 ret = iomap_apply(inode, pos, len, IOMAP_REPORT,
1282 ops, &isi, iomap_swapfile_activate_actor);
1283 if (ret <= 0)
1284 return ret;
1285
1286 pos += ret;
1287 len -= ret;
1288 }
1289
1290 if (isi.iomap.length) {
1291 ret = iomap_swapfile_add_extent(&isi);
1292 if (ret)
1293 return ret;
1294 }
1295
1296 *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
1297 sis->max = isi.nr_pages;
1298 sis->pages = isi.nr_pages - 1;
1299 sis->highest_bit = isi.nr_pages - 1;
1300 return isi.nr_extents;
1301}
1302EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
1303#endif /* CONFIG_SWAP */