fs: move page_cache_seek_hole_data to iomap.c
[linux-block.git] / fs / iomap.c
CommitLineData
ae259a9c
CH
1/*
2 * Copyright (C) 2010 Red Hat, Inc.
3 * Copyright (c) 2016 Christoph Hellwig.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/module.h>
15#include <linux/compiler.h>
16#include <linux/fs.h>
17#include <linux/iomap.h>
18#include <linux/uaccess.h>
19#include <linux/gfp.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/pagemap.h>
8a78cb1f 23#include <linux/pagevec.h>
ae259a9c
CH
24#include <linux/file.h>
25#include <linux/uio.h>
26#include <linux/backing-dev.h>
27#include <linux/buffer_head.h>
ff6a9292 28#include <linux/task_io_accounting_ops.h>
9a286f0e 29#include <linux/dax.h>
f361bf4a 30#include <linux/sched/signal.h>
67482129 31#include <linux/swap.h>
f361bf4a 32
ae259a9c
CH
33#include "internal.h"
34
ae259a9c
CH
35/*
36 * Execute a iomap write on a segment of the mapping that spans a
37 * contiguous range of pages that have identical block mapping state.
38 *
39 * This avoids the need to map pages individually, do individual allocations
40 * for each page and most importantly avoid the need for filesystem specific
41 * locking per page. Instead, all the operations are amortised over the entire
42 * range of pages. It is assumed that the filesystems will lock whatever
43 * resources they require in the iomap_begin call, and release them in the
44 * iomap_end call.
45 */
befb503c 46loff_t
ae259a9c 47iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
8ff6daa1 48 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
ae259a9c
CH
49{
50 struct iomap iomap = { 0 };
51 loff_t written = 0, ret;
52
53 /*
54 * Need to map a range from start position for length bytes. This can
55 * span multiple pages - it is only guaranteed to return a range of a
56 * single type of pages (e.g. all into a hole, all mapped or all
57 * unwritten). Failure at this point has nothing to undo.
58 *
59 * If allocation is required for this range, reserve the space now so
60 * that the allocation is guaranteed to succeed later on. Once we copy
61 * the data into the page cache pages, then we cannot fail otherwise we
62 * expose transient stale data. If the reserve fails, we can safely
63 * back out at this point as there is nothing to undo.
64 */
65 ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
66 if (ret)
67 return ret;
68 if (WARN_ON(iomap.offset > pos))
69 return -EIO;
0c6dda7a
DW
70 if (WARN_ON(iomap.length == 0))
71 return -EIO;
ae259a9c
CH
72
73 /*
74 * Cut down the length to the one actually provided by the filesystem,
75 * as it might not be able to give us the whole size that we requested.
76 */
77 if (iomap.offset + iomap.length < pos + length)
78 length = iomap.offset + iomap.length - pos;
79
80 /*
81 * Now that we have guaranteed that the space allocation will succeed.
82 * we can do the copy-in page by page without having to worry about
83 * failures exposing transient data.
84 */
85 written = actor(inode, pos, length, data, &iomap);
86
87 /*
88 * Now the data has been copied, commit the range we've copied. This
89 * should not fail unless the filesystem has had a fatal error.
90 */
f20ac7ab
CH
91 if (ops->iomap_end) {
92 ret = ops->iomap_end(inode, pos, length,
93 written > 0 ? written : 0,
94 flags, &iomap);
95 }
ae259a9c
CH
96
97 return written ? written : ret;
98}
99
57fc505d
CH
100static sector_t
101iomap_sector(struct iomap *iomap, loff_t pos)
102{
103 return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
104}
105
ae259a9c
CH
106static void
107iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
108{
109 loff_t i_size = i_size_read(inode);
110
111 /*
112 * Only truncate newly allocated pages beyoned EOF, even if the
113 * write started inside the existing inode size.
114 */
115 if (pos + len > i_size)
116 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
117}
118
119static int
120iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
121 struct page **pagep, struct iomap *iomap)
122{
123 pgoff_t index = pos >> PAGE_SHIFT;
124 struct page *page;
125 int status = 0;
126
127 BUG_ON(pos + len > iomap->offset + iomap->length);
128
d1908f52
MH
129 if (fatal_signal_pending(current))
130 return -EINTR;
131
ae259a9c
CH
132 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
133 if (!page)
134 return -ENOMEM;
135
136 status = __block_write_begin_int(page, pos, len, NULL, iomap);
137 if (unlikely(status)) {
138 unlock_page(page);
139 put_page(page);
140 page = NULL;
141
142 iomap_write_failed(inode, pos, len);
143 }
144
145 *pagep = page;
146 return status;
147}
148
149static int
150iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
151 unsigned copied, struct page *page)
152{
153 int ret;
154
155 ret = generic_write_end(NULL, inode->i_mapping, pos, len,
156 copied, page, NULL);
157 if (ret < len)
158 iomap_write_failed(inode, pos, len);
159 return ret;
160}
161
162static loff_t
163iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
164 struct iomap *iomap)
165{
166 struct iov_iter *i = data;
167 long status = 0;
168 ssize_t written = 0;
169 unsigned int flags = AOP_FLAG_NOFS;
170
ae259a9c
CH
171 do {
172 struct page *page;
173 unsigned long offset; /* Offset into pagecache page */
174 unsigned long bytes; /* Bytes to write to page */
175 size_t copied; /* Bytes copied from user */
176
177 offset = (pos & (PAGE_SIZE - 1));
178 bytes = min_t(unsigned long, PAGE_SIZE - offset,
179 iov_iter_count(i));
180again:
181 if (bytes > length)
182 bytes = length;
183
184 /*
185 * Bring in the user page that we will copy from _first_.
186 * Otherwise there's a nasty deadlock on copying from the
187 * same page as we're writing to, without it being marked
188 * up-to-date.
189 *
190 * Not only is this an optimisation, but it is also required
191 * to check that the address is actually valid, when atomic
192 * usercopies are used, below.
193 */
194 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
195 status = -EFAULT;
196 break;
197 }
198
199 status = iomap_write_begin(inode, pos, bytes, flags, &page,
200 iomap);
201 if (unlikely(status))
202 break;
203
204 if (mapping_writably_mapped(inode->i_mapping))
205 flush_dcache_page(page);
206
ae259a9c 207 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
ae259a9c
CH
208
209 flush_dcache_page(page);
ae259a9c
CH
210
211 status = iomap_write_end(inode, pos, bytes, copied, page);
212 if (unlikely(status < 0))
213 break;
214 copied = status;
215
216 cond_resched();
217
218 iov_iter_advance(i, copied);
219 if (unlikely(copied == 0)) {
220 /*
221 * If we were unable to copy any data at all, we must
222 * fall back to a single segment length write.
223 *
224 * If we didn't fallback here, we could livelock
225 * because not all segments in the iov can be copied at
226 * once without a pagefault.
227 */
228 bytes = min_t(unsigned long, PAGE_SIZE - offset,
229 iov_iter_single_seg_count(i));
230 goto again;
231 }
232 pos += copied;
233 written += copied;
234 length -= copied;
235
236 balance_dirty_pages_ratelimited(inode->i_mapping);
237 } while (iov_iter_count(i) && length);
238
239 return written ? written : status;
240}
241
242ssize_t
243iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
8ff6daa1 244 const struct iomap_ops *ops)
ae259a9c
CH
245{
246 struct inode *inode = iocb->ki_filp->f_mapping->host;
247 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
248
249 while (iov_iter_count(iter)) {
250 ret = iomap_apply(inode, pos, iov_iter_count(iter),
251 IOMAP_WRITE, ops, iter, iomap_write_actor);
252 if (ret <= 0)
253 break;
254 pos += ret;
255 written += ret;
256 }
257
258 return written ? written : ret;
259}
260EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
261
5f4e5752
CH
262static struct page *
263__iomap_read_page(struct inode *inode, loff_t offset)
264{
265 struct address_space *mapping = inode->i_mapping;
266 struct page *page;
267
268 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
269 if (IS_ERR(page))
270 return page;
271 if (!PageUptodate(page)) {
272 put_page(page);
273 return ERR_PTR(-EIO);
274 }
275 return page;
276}
277
278static loff_t
279iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
280 struct iomap *iomap)
281{
282 long status = 0;
283 ssize_t written = 0;
284
285 do {
286 struct page *page, *rpage;
287 unsigned long offset; /* Offset into pagecache page */
288 unsigned long bytes; /* Bytes to write to page */
289
290 offset = (pos & (PAGE_SIZE - 1));
e28ae8e4 291 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
5f4e5752
CH
292
293 rpage = __iomap_read_page(inode, pos);
294 if (IS_ERR(rpage))
295 return PTR_ERR(rpage);
296
297 status = iomap_write_begin(inode, pos, bytes,
c718a975 298 AOP_FLAG_NOFS, &page, iomap);
5f4e5752
CH
299 put_page(rpage);
300 if (unlikely(status))
301 return status;
302
303 WARN_ON_ONCE(!PageUptodate(page));
304
305 status = iomap_write_end(inode, pos, bytes, bytes, page);
306 if (unlikely(status <= 0)) {
307 if (WARN_ON_ONCE(status == 0))
308 return -EIO;
309 return status;
310 }
311
312 cond_resched();
313
314 pos += status;
315 written += status;
316 length -= status;
317
318 balance_dirty_pages_ratelimited(inode->i_mapping);
319 } while (length);
320
321 return written;
322}
323
324int
325iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
8ff6daa1 326 const struct iomap_ops *ops)
5f4e5752
CH
327{
328 loff_t ret;
329
330 while (len) {
331 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
332 iomap_dirty_actor);
333 if (ret <= 0)
334 return ret;
335 pos += ret;
336 len -= ret;
337 }
338
339 return 0;
340}
341EXPORT_SYMBOL_GPL(iomap_file_dirty);
342
ae259a9c
CH
343static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
344 unsigned bytes, struct iomap *iomap)
345{
346 struct page *page;
347 int status;
348
c718a975
TH
349 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
350 iomap);
ae259a9c
CH
351 if (status)
352 return status;
353
354 zero_user(page, offset, bytes);
355 mark_page_accessed(page);
356
357 return iomap_write_end(inode, pos, bytes, bytes, page);
358}
359
9a286f0e
CH
360static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
361 struct iomap *iomap)
362{
57fc505d
CH
363 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
364 iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
9a286f0e
CH
365}
366
ae259a9c
CH
367static loff_t
368iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
369 void *data, struct iomap *iomap)
370{
371 bool *did_zero = data;
372 loff_t written = 0;
373 int status;
374
375 /* already zeroed? we're done. */
376 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
377 return count;
378
379 do {
380 unsigned offset, bytes;
381
382 offset = pos & (PAGE_SIZE - 1); /* Within page */
e28ae8e4 383 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
ae259a9c 384
9a286f0e
CH
385 if (IS_DAX(inode))
386 status = iomap_dax_zero(pos, offset, bytes, iomap);
387 else
388 status = iomap_zero(inode, pos, offset, bytes, iomap);
ae259a9c
CH
389 if (status < 0)
390 return status;
391
392 pos += bytes;
393 count -= bytes;
394 written += bytes;
395 if (did_zero)
396 *did_zero = true;
397 } while (count > 0);
398
399 return written;
400}
401
402int
403iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
8ff6daa1 404 const struct iomap_ops *ops)
ae259a9c
CH
405{
406 loff_t ret;
407
408 while (len > 0) {
409 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
410 ops, did_zero, iomap_zero_range_actor);
411 if (ret <= 0)
412 return ret;
413
414 pos += ret;
415 len -= ret;
416 }
417
418 return 0;
419}
420EXPORT_SYMBOL_GPL(iomap_zero_range);
421
422int
423iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
8ff6daa1 424 const struct iomap_ops *ops)
ae259a9c 425{
93407472
FF
426 unsigned int blocksize = i_blocksize(inode);
427 unsigned int off = pos & (blocksize - 1);
ae259a9c
CH
428
429 /* Block boundary? Nothing to do */
430 if (!off)
431 return 0;
432 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
433}
434EXPORT_SYMBOL_GPL(iomap_truncate_page);
435
436static loff_t
437iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
438 void *data, struct iomap *iomap)
439{
440 struct page *page = data;
441 int ret;
442
c663e29f 443 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
ae259a9c
CH
444 if (ret)
445 return ret;
446
447 block_commit_write(page, 0, length);
448 return length;
449}
450
11bac800 451int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
ae259a9c
CH
452{
453 struct page *page = vmf->page;
11bac800 454 struct inode *inode = file_inode(vmf->vma->vm_file);
ae259a9c
CH
455 unsigned long length;
456 loff_t offset, size;
457 ssize_t ret;
458
459 lock_page(page);
460 size = i_size_read(inode);
461 if ((page->mapping != inode->i_mapping) ||
462 (page_offset(page) > size)) {
463 /* We overload EFAULT to mean page got truncated */
464 ret = -EFAULT;
465 goto out_unlock;
466 }
467
468 /* page is wholly or partially inside EOF */
469 if (((page->index + 1) << PAGE_SHIFT) > size)
470 length = size & ~PAGE_MASK;
471 else
472 length = PAGE_SIZE;
473
474 offset = page_offset(page);
475 while (length > 0) {
9484ab1b
JK
476 ret = iomap_apply(inode, offset, length,
477 IOMAP_WRITE | IOMAP_FAULT, ops, page,
478 iomap_page_mkwrite_actor);
ae259a9c
CH
479 if (unlikely(ret <= 0))
480 goto out_unlock;
481 offset += ret;
482 length -= ret;
483 }
484
485 set_page_dirty(page);
486 wait_for_stable_page(page);
e7647fb4 487 return VM_FAULT_LOCKED;
ae259a9c
CH
488out_unlock:
489 unlock_page(page);
e7647fb4 490 return block_page_mkwrite_return(ret);
ae259a9c
CH
491}
492EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
8be9f564
CH
493
494struct fiemap_ctx {
495 struct fiemap_extent_info *fi;
496 struct iomap prev;
497};
498
499static int iomap_to_fiemap(struct fiemap_extent_info *fi,
500 struct iomap *iomap, u32 flags)
501{
502 switch (iomap->type) {
503 case IOMAP_HOLE:
504 /* skip holes */
505 return 0;
506 case IOMAP_DELALLOC:
507 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
508 break;
19319b53
CH
509 case IOMAP_MAPPED:
510 break;
8be9f564
CH
511 case IOMAP_UNWRITTEN:
512 flags |= FIEMAP_EXTENT_UNWRITTEN;
513 break;
19319b53
CH
514 case IOMAP_INLINE:
515 flags |= FIEMAP_EXTENT_DATA_INLINE;
8be9f564
CH
516 break;
517 }
518
17de0a9f
CH
519 if (iomap->flags & IOMAP_F_MERGED)
520 flags |= FIEMAP_EXTENT_MERGED;
e43c460d
DW
521 if (iomap->flags & IOMAP_F_SHARED)
522 flags |= FIEMAP_EXTENT_SHARED;
17de0a9f 523
8be9f564 524 return fiemap_fill_next_extent(fi, iomap->offset,
19fe5f64 525 iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
17de0a9f 526 iomap->length, flags);
8be9f564
CH
527}
528
529static loff_t
530iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
531 struct iomap *iomap)
532{
533 struct fiemap_ctx *ctx = data;
534 loff_t ret = length;
535
536 if (iomap->type == IOMAP_HOLE)
537 return length;
538
539 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
540 ctx->prev = *iomap;
541 switch (ret) {
542 case 0: /* success */
543 return length;
544 case 1: /* extent array full */
545 return 0;
546 default:
547 return ret;
548 }
549}
550
551int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
8ff6daa1 552 loff_t start, loff_t len, const struct iomap_ops *ops)
8be9f564
CH
553{
554 struct fiemap_ctx ctx;
555 loff_t ret;
556
557 memset(&ctx, 0, sizeof(ctx));
558 ctx.fi = fi;
559 ctx.prev.type = IOMAP_HOLE;
560
561 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
562 if (ret)
563 return ret;
564
8896b8f6
DC
565 if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
566 ret = filemap_write_and_wait(inode->i_mapping);
567 if (ret)
568 return ret;
569 }
8be9f564
CH
570
571 while (len > 0) {
d33fd776 572 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
8be9f564 573 iomap_fiemap_actor);
ac2dc058
DC
574 /* inode with no (attribute) mapping will give ENOENT */
575 if (ret == -ENOENT)
576 break;
8be9f564
CH
577 if (ret < 0)
578 return ret;
579 if (ret == 0)
580 break;
581
582 start += ret;
583 len -= ret;
584 }
585
586 if (ctx.prev.type != IOMAP_HOLE) {
587 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
588 if (ret < 0)
589 return ret;
590 }
591
592 return 0;
593}
594EXPORT_SYMBOL_GPL(iomap_fiemap);
ff6a9292 595
8a78cb1f
CH
596/*
597 * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff.
598 *
599 * Returns the offset within the file on success, and -ENOENT otherwise.
600 */
601static loff_t
602page_seek_hole_data(struct page *page, loff_t lastoff, int whence)
603{
604 loff_t offset = page_offset(page);
605 struct buffer_head *bh, *head;
606 bool seek_data = whence == SEEK_DATA;
607
608 if (lastoff < offset)
609 lastoff = offset;
610
611 bh = head = page_buffers(page);
612 do {
613 offset += bh->b_size;
614 if (lastoff >= offset)
615 continue;
616
617 /*
618 * Unwritten extents that have data in the page cache covering
619 * them can be identified by the BH_Unwritten state flag.
620 * Pages with multiple buffers might have a mix of holes, data
621 * and unwritten extents - any buffer with valid data in it
622 * should have BH_Uptodate flag set on it.
623 */
624
625 if ((buffer_unwritten(bh) || buffer_uptodate(bh)) == seek_data)
626 return lastoff;
627
628 lastoff = offset;
629 } while ((bh = bh->b_this_page) != head);
630 return -ENOENT;
631}
632
633/*
634 * Seek for SEEK_DATA / SEEK_HOLE in the page cache.
635 *
636 * Within unwritten extents, the page cache determines which parts are holes
637 * and which are data: unwritten and uptodate buffer heads count as data;
638 * everything else counts as a hole.
639 *
640 * Returns the resulting offset on successs, and -ENOENT otherwise.
641 */
642static loff_t
643page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
644 int whence)
645{
646 pgoff_t index = offset >> PAGE_SHIFT;
647 pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE);
648 loff_t lastoff = offset;
649 struct pagevec pvec;
650
651 if (length <= 0)
652 return -ENOENT;
653
654 pagevec_init(&pvec);
655
656 do {
657 unsigned nr_pages, i;
658
659 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index,
660 end - 1);
661 if (nr_pages == 0)
662 break;
663
664 for (i = 0; i < nr_pages; i++) {
665 struct page *page = pvec.pages[i];
666
667 /*
668 * At this point, the page may be truncated or
669 * invalidated (changing page->mapping to NULL), or
670 * even swizzled back from swapper_space to tmpfs file
671 * mapping. However, page->index will not change
672 * because we have a reference on the page.
673 *
674 * If current page offset is beyond where we've ended,
675 * we've found a hole.
676 */
677 if (whence == SEEK_HOLE &&
678 lastoff < page_offset(page))
679 goto check_range;
680
681 lock_page(page);
682 if (likely(page->mapping == inode->i_mapping) &&
683 page_has_buffers(page)) {
684 lastoff = page_seek_hole_data(page, lastoff, whence);
685 if (lastoff >= 0) {
686 unlock_page(page);
687 goto check_range;
688 }
689 }
690 unlock_page(page);
691 lastoff = page_offset(page) + PAGE_SIZE;
692 }
693 pagevec_release(&pvec);
694 } while (index < end);
695
696 /* When no page at lastoff and we are not done, we found a hole. */
697 if (whence != SEEK_HOLE)
698 goto not_found;
699
700check_range:
701 if (lastoff < offset + length)
702 goto out;
703not_found:
704 lastoff = -ENOENT;
705out:
706 pagevec_release(&pvec);
707 return lastoff;
708}
709
710
0ed3b0d4
AG
711static loff_t
712iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
713 void *data, struct iomap *iomap)
714{
715 switch (iomap->type) {
716 case IOMAP_UNWRITTEN:
717 offset = page_cache_seek_hole_data(inode, offset, length,
718 SEEK_HOLE);
719 if (offset < 0)
720 return length;
721 /* fall through */
722 case IOMAP_HOLE:
723 *(loff_t *)data = offset;
724 return 0;
725 default:
726 return length;
727 }
728}
729
730loff_t
731iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
732{
733 loff_t size = i_size_read(inode);
734 loff_t length = size - offset;
735 loff_t ret;
736
d6ab17f2
DW
737 /* Nothing to be found before or beyond the end of the file. */
738 if (offset < 0 || offset >= size)
0ed3b0d4
AG
739 return -ENXIO;
740
741 while (length > 0) {
742 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
743 &offset, iomap_seek_hole_actor);
744 if (ret < 0)
745 return ret;
746 if (ret == 0)
747 break;
748
749 offset += ret;
750 length -= ret;
751 }
752
753 return offset;
754}
755EXPORT_SYMBOL_GPL(iomap_seek_hole);
756
757static loff_t
758iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
759 void *data, struct iomap *iomap)
760{
761 switch (iomap->type) {
762 case IOMAP_HOLE:
763 return length;
764 case IOMAP_UNWRITTEN:
765 offset = page_cache_seek_hole_data(inode, offset, length,
766 SEEK_DATA);
767 if (offset < 0)
768 return length;
769 /*FALLTHRU*/
770 default:
771 *(loff_t *)data = offset;
772 return 0;
773 }
774}
775
776loff_t
777iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
778{
779 loff_t size = i_size_read(inode);
780 loff_t length = size - offset;
781 loff_t ret;
782
d6ab17f2
DW
783 /* Nothing to be found before or beyond the end of the file. */
784 if (offset < 0 || offset >= size)
0ed3b0d4
AG
785 return -ENXIO;
786
787 while (length > 0) {
788 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
789 &offset, iomap_seek_data_actor);
790 if (ret < 0)
791 return ret;
792 if (ret == 0)
793 break;
794
795 offset += ret;
796 length -= ret;
797 }
798
799 if (length <= 0)
800 return -ENXIO;
801 return offset;
802}
803EXPORT_SYMBOL_GPL(iomap_seek_data);
804
ff6a9292
CH
805/*
806 * Private flags for iomap_dio, must not overlap with the public ones in
807 * iomap.h:
808 */
3460cac1 809#define IOMAP_DIO_WRITE_FUA (1 << 28)
4f8ff44b 810#define IOMAP_DIO_NEED_SYNC (1 << 29)
ff6a9292
CH
811#define IOMAP_DIO_WRITE (1 << 30)
812#define IOMAP_DIO_DIRTY (1 << 31)
813
814struct iomap_dio {
815 struct kiocb *iocb;
816 iomap_dio_end_io_t *end_io;
817 loff_t i_size;
818 loff_t size;
819 atomic_t ref;
820 unsigned flags;
821 int error;
822
823 union {
824 /* used during submission and for synchronous completion: */
825 struct {
826 struct iov_iter *iter;
827 struct task_struct *waiter;
828 struct request_queue *last_queue;
829 blk_qc_t cookie;
830 } submit;
831
832 /* used for aio completion: */
833 struct {
834 struct work_struct work;
835 } aio;
836 };
837};
838
839static ssize_t iomap_dio_complete(struct iomap_dio *dio)
840{
841 struct kiocb *iocb = dio->iocb;
332391a9 842 struct inode *inode = file_inode(iocb->ki_filp);
5e25c269 843 loff_t offset = iocb->ki_pos;
ff6a9292
CH
844 ssize_t ret;
845
846 if (dio->end_io) {
847 ret = dio->end_io(iocb,
848 dio->error ? dio->error : dio->size,
849 dio->flags);
850 } else {
851 ret = dio->error;
852 }
853
854 if (likely(!ret)) {
855 ret = dio->size;
856 /* check for short read */
5e25c269 857 if (offset + ret > dio->i_size &&
ff6a9292 858 !(dio->flags & IOMAP_DIO_WRITE))
5e25c269 859 ret = dio->i_size - offset;
ff6a9292
CH
860 iocb->ki_pos += ret;
861 }
862
5e25c269
EG
863 /*
864 * Try again to invalidate clean pages which might have been cached by
865 * non-direct readahead, or faulted in by get_user_pages() if the source
866 * of the write was an mmap'ed region of the file we're writing. Either
867 * one is a pretty crazy thing to do, so we don't support it 100%. If
868 * this invalidation fails, tough, the write still worked...
869 *
870 * And this page cache invalidation has to be after dio->end_io(), as
871 * some filesystems convert unwritten extents to real allocations in
872 * end_io() when necessary, otherwise a racing buffer read would cache
873 * zeros from unwritten extents.
874 */
875 if (!dio->error &&
876 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
877 int err;
878 err = invalidate_inode_pages2_range(inode->i_mapping,
879 offset >> PAGE_SHIFT,
880 (offset + dio->size - 1) >> PAGE_SHIFT);
5a9d929d
DW
881 if (err)
882 dio_warn_stale_pagecache(iocb->ki_filp);
5e25c269
EG
883 }
884
4f8ff44b
DC
885 /*
886 * If this is a DSYNC write, make sure we push it to stable storage now
887 * that we've written data.
888 */
889 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
890 ret = generic_write_sync(iocb, ret);
891
ff6a9292
CH
892 inode_dio_end(file_inode(iocb->ki_filp));
893 kfree(dio);
894
895 return ret;
896}
897
898static void iomap_dio_complete_work(struct work_struct *work)
899{
900 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
901 struct kiocb *iocb = dio->iocb;
ff6a9292 902
4f8ff44b 903 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
ff6a9292
CH
904}
905
906/*
907 * Set an error in the dio if none is set yet. We have to use cmpxchg
908 * as the submission context and the completion context(s) can race to
909 * update the error.
910 */
911static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
912{
913 cmpxchg(&dio->error, 0, ret);
914}
915
916static void iomap_dio_bio_end_io(struct bio *bio)
917{
918 struct iomap_dio *dio = bio->bi_private;
919 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
920
4e4cbee9
CH
921 if (bio->bi_status)
922 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
ff6a9292
CH
923
924 if (atomic_dec_and_test(&dio->ref)) {
925 if (is_sync_kiocb(dio->iocb)) {
926 struct task_struct *waiter = dio->submit.waiter;
927
928 WRITE_ONCE(dio->submit.waiter, NULL);
929 wake_up_process(waiter);
930 } else if (dio->flags & IOMAP_DIO_WRITE) {
931 struct inode *inode = file_inode(dio->iocb->ki_filp);
932
933 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
934 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
935 } else {
936 iomap_dio_complete_work(&dio->aio.work);
937 }
938 }
939
940 if (should_dirty) {
941 bio_check_pages_dirty(bio);
942 } else {
943 struct bio_vec *bvec;
944 int i;
945
946 bio_for_each_segment_all(bvec, bio, i)
947 put_page(bvec->bv_page);
948 bio_put(bio);
949 }
950}
951
952static blk_qc_t
953iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
954 unsigned len)
955{
956 struct page *page = ZERO_PAGE(0);
957 struct bio *bio;
958
959 bio = bio_alloc(GFP_KERNEL, 1);
74d46992 960 bio_set_dev(bio, iomap->bdev);
57fc505d 961 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
ff6a9292
CH
962 bio->bi_private = dio;
963 bio->bi_end_io = iomap_dio_bio_end_io;
964
965 get_page(page);
6533b4e4 966 __bio_add_page(bio, page, len, 0);
5cc60aee 967 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
ff6a9292
CH
968
969 atomic_inc(&dio->ref);
970 return submit_bio(bio);
971}
972
973static loff_t
974iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
975 void *data, struct iomap *iomap)
976{
977 struct iomap_dio *dio = data;
93407472
FF
978 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
979 unsigned int fs_block_size = i_blocksize(inode), pad;
980 unsigned int align = iov_iter_alignment(dio->submit.iter);
ff6a9292
CH
981 struct iov_iter iter;
982 struct bio *bio;
983 bool need_zeroout = false;
3460cac1 984 bool use_fua = false;
ff6a9292 985 int nr_pages, ret;
cfe057f7 986 size_t copied = 0;
ff6a9292
CH
987
988 if ((pos | length | align) & ((1 << blkbits) - 1))
989 return -EINVAL;
990
991 switch (iomap->type) {
992 case IOMAP_HOLE:
993 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
994 return -EIO;
995 /*FALLTHRU*/
996 case IOMAP_UNWRITTEN:
997 if (!(dio->flags & IOMAP_DIO_WRITE)) {
cfe057f7 998 length = iov_iter_zero(length, dio->submit.iter);
ff6a9292
CH
999 dio->size += length;
1000 return length;
1001 }
1002 dio->flags |= IOMAP_DIO_UNWRITTEN;
1003 need_zeroout = true;
1004 break;
1005 case IOMAP_MAPPED:
1006 if (iomap->flags & IOMAP_F_SHARED)
1007 dio->flags |= IOMAP_DIO_COW;
3460cac1 1008 if (iomap->flags & IOMAP_F_NEW) {
ff6a9292 1009 need_zeroout = true;
3460cac1
DC
1010 } else {
1011 /*
1012 * Use a FUA write if we need datasync semantics, this
1013 * is a pure data IO that doesn't require any metadata
1014 * updates and the underlying device supports FUA. This
1015 * allows us to avoid cache flushes on IO completion.
1016 */
1017 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
1018 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
1019 blk_queue_fua(bdev_get_queue(iomap->bdev)))
1020 use_fua = true;
1021 }
ff6a9292
CH
1022 break;
1023 default:
1024 WARN_ON_ONCE(1);
1025 return -EIO;
1026 }
1027
1028 /*
1029 * Operate on a partial iter trimmed to the extent we were called for.
1030 * We'll update the iter in the dio once we're done with this extent.
1031 */
1032 iter = *dio->submit.iter;
1033 iov_iter_truncate(&iter, length);
1034
1035 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1036 if (nr_pages <= 0)
1037 return nr_pages;
1038
1039 if (need_zeroout) {
1040 /* zero out from the start of the block to the write offset */
1041 pad = pos & (fs_block_size - 1);
1042 if (pad)
1043 iomap_dio_zero(dio, iomap, pos - pad, pad);
1044 }
1045
1046 do {
cfe057f7
AV
1047 size_t n;
1048 if (dio->error) {
1049 iov_iter_revert(dio->submit.iter, copied);
ff6a9292 1050 return 0;
cfe057f7 1051 }
ff6a9292
CH
1052
1053 bio = bio_alloc(GFP_KERNEL, nr_pages);
74d46992 1054 bio_set_dev(bio, iomap->bdev);
57fc505d 1055 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
45d06cf7 1056 bio->bi_write_hint = dio->iocb->ki_hint;
ff6a9292
CH
1057 bio->bi_private = dio;
1058 bio->bi_end_io = iomap_dio_bio_end_io;
1059
1060 ret = bio_iov_iter_get_pages(bio, &iter);
1061 if (unlikely(ret)) {
1062 bio_put(bio);
cfe057f7 1063 return copied ? copied : ret;
ff6a9292
CH
1064 }
1065
cfe057f7 1066 n = bio->bi_iter.bi_size;
ff6a9292 1067 if (dio->flags & IOMAP_DIO_WRITE) {
3460cac1
DC
1068 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
1069 if (use_fua)
1070 bio->bi_opf |= REQ_FUA;
1071 else
1072 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
cfe057f7 1073 task_io_account_write(n);
ff6a9292 1074 } else {
3460cac1 1075 bio->bi_opf = REQ_OP_READ;
ff6a9292
CH
1076 if (dio->flags & IOMAP_DIO_DIRTY)
1077 bio_set_pages_dirty(bio);
1078 }
1079
cfe057f7
AV
1080 iov_iter_advance(dio->submit.iter, n);
1081
1082 dio->size += n;
1083 pos += n;
1084 copied += n;
ff6a9292
CH
1085
1086 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1087
1088 atomic_inc(&dio->ref);
1089
1090 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
1091 dio->submit.cookie = submit_bio(bio);
1092 } while (nr_pages);
1093
1094 if (need_zeroout) {
1095 /* zero out from the end of the write to the end of the block */
1096 pad = pos & (fs_block_size - 1);
1097 if (pad)
1098 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
1099 }
cfe057f7 1100 return copied;
ff6a9292
CH
1101}
1102
4f8ff44b
DC
1103/*
1104 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
3460cac1
DC
1105 * is being issued as AIO or not. This allows us to optimise pure data writes
1106 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
1107 * REQ_FLUSH post write. This is slightly tricky because a single request here
1108 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
1109 * may be pure data writes. In that case, we still need to do a full data sync
1110 * completion.
4f8ff44b 1111 */
ff6a9292 1112ssize_t
8ff6daa1
CH
1113iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1114 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
ff6a9292
CH
1115{
1116 struct address_space *mapping = iocb->ki_filp->f_mapping;
1117 struct inode *inode = file_inode(iocb->ki_filp);
1118 size_t count = iov_iter_count(iter);
c771c14b
EG
1119 loff_t pos = iocb->ki_pos, start = pos;
1120 loff_t end = iocb->ki_pos + count - 1, ret = 0;
ff6a9292
CH
1121 unsigned int flags = IOMAP_DIRECT;
1122 struct blk_plug plug;
1123 struct iomap_dio *dio;
1124
1125 lockdep_assert_held(&inode->i_rwsem);
1126
1127 if (!count)
1128 return 0;
1129
1130 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
1131 if (!dio)
1132 return -ENOMEM;
1133
1134 dio->iocb = iocb;
1135 atomic_set(&dio->ref, 1);
1136 dio->size = 0;
1137 dio->i_size = i_size_read(inode);
1138 dio->end_io = end_io;
1139 dio->error = 0;
1140 dio->flags = 0;
1141
1142 dio->submit.iter = iter;
1143 if (is_sync_kiocb(iocb)) {
1144 dio->submit.waiter = current;
1145 dio->submit.cookie = BLK_QC_T_NONE;
1146 dio->submit.last_queue = NULL;
1147 }
1148
1149 if (iov_iter_rw(iter) == READ) {
1150 if (pos >= dio->i_size)
1151 goto out_free_dio;
1152
1153 if (iter->type == ITER_IOVEC)
1154 dio->flags |= IOMAP_DIO_DIRTY;
1155 } else {
3460cac1 1156 flags |= IOMAP_WRITE;
ff6a9292 1157 dio->flags |= IOMAP_DIO_WRITE;
3460cac1
DC
1158
1159 /* for data sync or sync, we need sync completion processing */
4f8ff44b
DC
1160 if (iocb->ki_flags & IOCB_DSYNC)
1161 dio->flags |= IOMAP_DIO_NEED_SYNC;
3460cac1
DC
1162
1163 /*
1164 * For datasync only writes, we optimistically try using FUA for
1165 * this IO. Any non-FUA write that occurs will clear this flag,
1166 * hence we know before completion whether a cache flush is
1167 * necessary.
1168 */
1169 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
1170 dio->flags |= IOMAP_DIO_WRITE_FUA;
ff6a9292
CH
1171 }
1172
a38d1243
GR
1173 if (iocb->ki_flags & IOCB_NOWAIT) {
1174 if (filemap_range_has_page(mapping, start, end)) {
1175 ret = -EAGAIN;
1176 goto out_free_dio;
1177 }
1178 flags |= IOMAP_NOWAIT;
1179 }
1180
55635ba7
AR
1181 ret = filemap_write_and_wait_range(mapping, start, end);
1182 if (ret)
1183 goto out_free_dio;
ff6a9292 1184
5a9d929d
DW
1185 /*
1186 * Try to invalidate cache pages for the range we're direct
1187 * writing. If this invalidation fails, tough, the write will
1188 * still work, but racing two incompatible write paths is a
1189 * pretty crazy thing to do, so we don't support it 100%.
1190 */
55635ba7
AR
1191 ret = invalidate_inode_pages2_range(mapping,
1192 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
5a9d929d
DW
1193 if (ret)
1194 dio_warn_stale_pagecache(iocb->ki_filp);
55635ba7 1195 ret = 0;
ff6a9292 1196
546e7be8
CR
1197 if (iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
1198 !inode->i_sb->s_dio_done_wq) {
1199 ret = sb_init_dio_done_wq(inode->i_sb);
1200 if (ret < 0)
1201 goto out_free_dio;
1202 }
1203
ff6a9292
CH
1204 inode_dio_begin(inode);
1205
1206 blk_start_plug(&plug);
1207 do {
1208 ret = iomap_apply(inode, pos, count, flags, ops, dio,
1209 iomap_dio_actor);
1210 if (ret <= 0) {
1211 /* magic error code to fall back to buffered I/O */
1212 if (ret == -ENOTBLK)
1213 ret = 0;
1214 break;
1215 }
1216 pos += ret;
a008c31c
CR
1217
1218 if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
1219 break;
ff6a9292
CH
1220 } while ((count = iov_iter_count(iter)) > 0);
1221 blk_finish_plug(&plug);
1222
1223 if (ret < 0)
1224 iomap_dio_set_error(dio, ret);
1225
3460cac1
DC
1226 /*
1227 * If all the writes we issued were FUA, we don't need to flush the
1228 * cache on IO completion. Clear the sync flag for this case.
1229 */
1230 if (dio->flags & IOMAP_DIO_WRITE_FUA)
1231 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
1232
ff6a9292
CH
1233 if (!atomic_dec_and_test(&dio->ref)) {
1234 if (!is_sync_kiocb(iocb))
1235 return -EIOCBQUEUED;
1236
1237 for (;;) {
1238 set_current_state(TASK_UNINTERRUPTIBLE);
1239 if (!READ_ONCE(dio->submit.waiter))
1240 break;
1241
1242 if (!(iocb->ki_flags & IOCB_HIPRI) ||
1243 !dio->submit.last_queue ||
ea435e1b 1244 !blk_poll(dio->submit.last_queue,
5cc60aee 1245 dio->submit.cookie))
ff6a9292
CH
1246 io_schedule();
1247 }
1248 __set_current_state(TASK_RUNNING);
1249 }
1250
c771c14b
EG
1251 ret = iomap_dio_complete(dio);
1252
c771c14b 1253 return ret;
ff6a9292
CH
1254
1255out_free_dio:
1256 kfree(dio);
1257 return ret;
1258}
1259EXPORT_SYMBOL_GPL(iomap_dio_rw);
67482129
DW
1260
1261/* Swapfile activation */
1262
1263#ifdef CONFIG_SWAP
1264struct iomap_swapfile_info {
1265 struct iomap iomap; /* accumulated iomap */
1266 struct swap_info_struct *sis;
1267 uint64_t lowest_ppage; /* lowest physical addr seen (pages) */
1268 uint64_t highest_ppage; /* highest physical addr seen (pages) */
1269 unsigned long nr_pages; /* number of pages collected */
1270 int nr_extents; /* extent count */
1271};
1272
1273/*
1274 * Collect physical extents for this swap file. Physical extents reported to
1275 * the swap code must be trimmed to align to a page boundary. The logical
1276 * offset within the file is irrelevant since the swapfile code maps logical
1277 * page numbers of the swap device to the physical page-aligned extents.
1278 */
1279static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
1280{
1281 struct iomap *iomap = &isi->iomap;
1282 unsigned long nr_pages;
1283 uint64_t first_ppage;
1284 uint64_t first_ppage_reported;
1285 uint64_t next_ppage;
1286 int error;
1287
1288 /*
1289 * Round the start up and the end down so that the physical
1290 * extent aligns to a page boundary.
1291 */
1292 first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT;
1293 next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >>
1294 PAGE_SHIFT;
1295
1296 /* Skip too-short physical extents. */
1297 if (first_ppage >= next_ppage)
1298 return 0;
1299 nr_pages = next_ppage - first_ppage;
1300
1301 /*
1302 * Calculate how much swap space we're adding; the first page contains
1303 * the swap header and doesn't count. The mm still wants that first
1304 * page fed to add_swap_extent, however.
1305 */
1306 first_ppage_reported = first_ppage;
1307 if (iomap->offset == 0)
1308 first_ppage_reported++;
1309 if (isi->lowest_ppage > first_ppage_reported)
1310 isi->lowest_ppage = first_ppage_reported;
1311 if (isi->highest_ppage < (next_ppage - 1))
1312 isi->highest_ppage = next_ppage - 1;
1313
1314 /* Add extent, set up for the next call. */
1315 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage);
1316 if (error < 0)
1317 return error;
1318 isi->nr_extents += error;
1319 isi->nr_pages += nr_pages;
1320 return 0;
1321}
1322
1323/*
1324 * Accumulate iomaps for this swap file. We have to accumulate iomaps because
1325 * swap only cares about contiguous page-aligned physical extents and makes no
1326 * distinction between written and unwritten extents.
1327 */
1328static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
1329 loff_t count, void *data, struct iomap *iomap)
1330{
1331 struct iomap_swapfile_info *isi = data;
1332 int error;
1333
19319b53
CH
1334 switch (iomap->type) {
1335 case IOMAP_MAPPED:
1336 case IOMAP_UNWRITTEN:
1337 /* Only real or unwritten extents. */
1338 break;
1339 case IOMAP_INLINE:
1340 /* No inline data. */
ec601924
OS
1341 pr_err("swapon: file is inline\n");
1342 return -EINVAL;
19319b53 1343 default:
ec601924
OS
1344 pr_err("swapon: file has unallocated extents\n");
1345 return -EINVAL;
1346 }
67482129 1347
ec601924
OS
1348 /* No uncommitted metadata or shared blocks. */
1349 if (iomap->flags & IOMAP_F_DIRTY) {
1350 pr_err("swapon: file is not committed\n");
1351 return -EINVAL;
1352 }
1353 if (iomap->flags & IOMAP_F_SHARED) {
1354 pr_err("swapon: file has shared extents\n");
1355 return -EINVAL;
1356 }
67482129 1357
ec601924
OS
1358 /* Only one bdev per swap file. */
1359 if (iomap->bdev != isi->sis->bdev) {
1360 pr_err("swapon: file is on multiple devices\n");
1361 return -EINVAL;
1362 }
67482129
DW
1363
1364 if (isi->iomap.length == 0) {
1365 /* No accumulated extent, so just store it. */
1366 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
1367 } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) {
1368 /* Append this to the accumulated extent. */
1369 isi->iomap.length += iomap->length;
1370 } else {
1371 /* Otherwise, add the retained iomap and store this one. */
1372 error = iomap_swapfile_add_extent(isi);
1373 if (error)
1374 return error;
1375 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
1376 }
67482129 1377 return count;
67482129
DW
1378}
1379
1380/*
1381 * Iterate a swap file's iomaps to construct physical extents that can be
1382 * passed to the swapfile subsystem.
1383 */
1384int iomap_swapfile_activate(struct swap_info_struct *sis,
1385 struct file *swap_file, sector_t *pagespan,
1386 const struct iomap_ops *ops)
1387{
1388 struct iomap_swapfile_info isi = {
1389 .sis = sis,
1390 .lowest_ppage = (sector_t)-1ULL,
1391 };
1392 struct address_space *mapping = swap_file->f_mapping;
1393 struct inode *inode = mapping->host;
1394 loff_t pos = 0;
1395 loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE);
1396 loff_t ret;
1397
1398 ret = filemap_write_and_wait(inode->i_mapping);
1399 if (ret)
1400 return ret;
1401
1402 while (len > 0) {
1403 ret = iomap_apply(inode, pos, len, IOMAP_REPORT,
1404 ops, &isi, iomap_swapfile_activate_actor);
1405 if (ret <= 0)
1406 return ret;
1407
1408 pos += ret;
1409 len -= ret;
1410 }
1411
1412 if (isi.iomap.length) {
1413 ret = iomap_swapfile_add_extent(&isi);
1414 if (ret)
1415 return ret;
1416 }
1417
1418 *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
1419 sis->max = isi.nr_pages;
1420 sis->pages = isi.nr_pages - 1;
1421 sis->highest_bit = isi.nr_pages - 1;
1422 return isi.nr_extents;
1423}
1424EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
1425#endif /* CONFIG_SWAP */
89eb1906
CH
1426
1427static loff_t
1428iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
1429 void *data, struct iomap *iomap)
1430{
1431 sector_t *bno = data, addr;
1432
1433 if (iomap->type == IOMAP_MAPPED) {
1434 addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits;
1435 if (addr > INT_MAX)
1436 WARN(1, "would truncate bmap result\n");
1437 else
1438 *bno = addr;
1439 }
1440 return 0;
1441}
1442
1443/* legacy ->bmap interface. 0 is the error return (!) */
1444sector_t
1445iomap_bmap(struct address_space *mapping, sector_t bno,
1446 const struct iomap_ops *ops)
1447{
1448 struct inode *inode = mapping->host;
1449 loff_t pos = bno >> inode->i_blkbits;
1450 unsigned blocksize = i_blocksize(inode);
1451
1452 if (filemap_write_and_wait(mapping))
1453 return 0;
1454
1455 bno = 0;
1456 iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor);
1457 return bno;
1458}
1459EXPORT_SYMBOL_GPL(iomap_bmap);