2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include "xfs_shared.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_trans_resv.h"
23 #include "xfs_mount.h"
24 #include "xfs_inode.h"
25 #include "xfs_trans.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_alloc.h"
28 #include "xfs_error.h"
29 #include "xfs_iomap.h"
30 #include "xfs_trace.h"
32 #include "xfs_bmap_util.h"
33 #include "xfs_bmap_btree.h"
34 #include "xfs_reflink.h"
35 #include <linux/gfp.h>
36 #include <linux/mpage.h>
37 #include <linux/pagevec.h>
38 #include <linux/writeback.h>
40 /* flags for direct write completions */
41 #define XFS_DIO_FLAG_UNWRITTEN (1 << 0)
42 #define XFS_DIO_FLAG_APPEND (1 << 1)
43 #define XFS_DIO_FLAG_COW (1 << 2)
46 * structure owned by writepages passed to individual writepage calls
48 struct xfs_writepage_ctx {
49 struct xfs_bmbt_irec imap;
52 struct xfs_ioend *ioend;
62 struct buffer_head *bh, *head;
64 *delalloc = *unwritten = 0;
66 bh = head = page_buffers(page);
68 if (buffer_unwritten(bh))
70 else if (buffer_delay(bh))
72 } while ((bh = bh->b_this_page) != head);
76 xfs_find_bdev_for_inode(
79 struct xfs_inode *ip = XFS_I(inode);
80 struct xfs_mount *mp = ip->i_mount;
82 if (XFS_IS_REALTIME_INODE(ip))
83 return mp->m_rtdev_targp->bt_bdev;
85 return mp->m_ddev_targp->bt_bdev;
89 * We're now finished for good with this page. Update the page state via the
90 * associated buffer_heads, paying attention to the start and end offsets that
91 * we need to process on the page.
93 * Landmine Warning: bh->b_end_io() will call end_page_writeback() on the last
94 * buffer in the IO. Once it does this, it is unsafe to access the bufferhead or
95 * the page at all, as we may be racing with memory reclaim and it can free both
96 * the bufferhead chain and the page as it will see the page as clean and
100 xfs_finish_page_writeback(
102 struct bio_vec *bvec,
105 unsigned int end = bvec->bv_offset + bvec->bv_len - 1;
106 struct buffer_head *head, *bh, *next;
107 unsigned int off = 0;
110 ASSERT(bvec->bv_offset < PAGE_SIZE);
111 ASSERT((bvec->bv_offset & ((1 << inode->i_blkbits) - 1)) == 0);
112 ASSERT(end < PAGE_SIZE);
113 ASSERT((bvec->bv_len & ((1 << inode->i_blkbits) - 1)) == 0);
115 bh = head = page_buffers(bvec->bv_page);
119 next = bh->b_this_page;
120 if (off < bvec->bv_offset)
124 bh->b_end_io(bh, !error);
127 } while ((bh = next) != head);
131 * We're now finished for good with this ioend structure. Update the page
132 * state, release holds on bios, and finally free up memory. Do not use the
137 struct xfs_ioend *ioend,
140 struct inode *inode = ioend->io_inode;
141 struct bio *last = ioend->io_bio;
142 struct bio *bio, *next;
144 for (bio = &ioend->io_inline_bio; bio; bio = next) {
145 struct bio_vec *bvec;
149 * For the last bio, bi_private points to the ioend, so we
150 * need to explicitly end the iteration here.
155 next = bio->bi_private;
157 /* walk each page on bio, ending page IO on them */
158 bio_for_each_segment_all(bvec, bio, i)
159 xfs_finish_page_writeback(inode, bvec, error);
166 * Fast and loose check if this write could update the on-disk inode size.
168 static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
170 return ioend->io_offset + ioend->io_size >
171 XFS_I(ioend->io_inode)->i_d.di_size;
175 xfs_setfilesize_trans_alloc(
176 struct xfs_ioend *ioend)
178 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
179 struct xfs_trans *tp;
182 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
186 ioend->io_append_trans = tp;
189 * We may pass freeze protection with a transaction. So tell lockdep
192 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
194 * We hand off the transaction to the completion thread now, so
195 * clear the flag here.
197 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
202 * Update on-disk file size now that data has been written to disk.
206 struct xfs_inode *ip,
207 struct xfs_trans *tp,
213 xfs_ilock(ip, XFS_ILOCK_EXCL);
214 isize = xfs_new_eof(ip, offset + size);
216 xfs_iunlock(ip, XFS_ILOCK_EXCL);
217 xfs_trans_cancel(tp);
221 trace_xfs_setfilesize(ip, offset, size);
223 ip->i_d.di_size = isize;
224 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
225 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
227 return xfs_trans_commit(tp);
232 struct xfs_inode *ip,
236 struct xfs_mount *mp = ip->i_mount;
237 struct xfs_trans *tp;
240 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
244 return __xfs_setfilesize(ip, tp, offset, size);
248 xfs_setfilesize_ioend(
249 struct xfs_ioend *ioend,
252 struct xfs_inode *ip = XFS_I(ioend->io_inode);
253 struct xfs_trans *tp = ioend->io_append_trans;
256 * The transaction may have been allocated in the I/O submission thread,
257 * thus we need to mark ourselves as being in a transaction manually.
258 * Similarly for freeze protection.
260 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
261 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
263 /* we abort the update if there was an IO error */
265 xfs_trans_cancel(tp);
269 return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
273 * IO write completion.
277 struct work_struct *work)
279 struct xfs_ioend *ioend =
280 container_of(work, struct xfs_ioend, io_work);
281 struct xfs_inode *ip = XFS_I(ioend->io_inode);
282 int error = ioend->io_bio->bi_error;
285 * Set an error if the mount has shut down and proceed with end I/O
286 * processing so it can perform whatever cleanups are necessary.
288 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
292 * For a CoW extent, we need to move the mapping from the CoW fork
293 * to the data fork. If instead an error happened, just dump the
296 if (ioend->io_type == XFS_IO_COW) {
299 if (ioend->io_bio->bi_error) {
300 error = xfs_reflink_cancel_cow_range(ip,
301 ioend->io_offset, ioend->io_size);
304 error = xfs_reflink_end_cow(ip, ioend->io_offset,
311 * For unwritten extents we need to issue transactions to convert a
312 * range to normal written extens after the data I/O has finished.
313 * Detecting and handling completion IO errors is done individually
314 * for each case as different cleanup operations need to be performed
317 if (ioend->io_type == XFS_IO_UNWRITTEN) {
320 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
322 } else if (ioend->io_append_trans) {
323 error = xfs_setfilesize_ioend(ioend, error);
325 ASSERT(!xfs_ioend_is_append(ioend) ||
326 ioend->io_type == XFS_IO_COW);
330 xfs_destroy_ioend(ioend, error);
337 struct xfs_ioend *ioend = bio->bi_private;
338 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
340 if (ioend->io_type == XFS_IO_UNWRITTEN || ioend->io_type == XFS_IO_COW)
341 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
342 else if (ioend->io_append_trans)
343 queue_work(mp->m_data_workqueue, &ioend->io_work);
345 xfs_destroy_ioend(ioend, bio->bi_error);
352 struct xfs_bmbt_irec *imap,
355 struct xfs_inode *ip = XFS_I(inode);
356 struct xfs_mount *mp = ip->i_mount;
357 ssize_t count = 1 << inode->i_blkbits;
358 xfs_fileoff_t offset_fsb, end_fsb;
360 int bmapi_flags = XFS_BMAPI_ENTIRE;
363 if (XFS_FORCED_SHUTDOWN(mp))
366 ASSERT(type != XFS_IO_COW);
367 if (type == XFS_IO_UNWRITTEN)
368 bmapi_flags |= XFS_BMAPI_IGSTATE;
370 xfs_ilock(ip, XFS_ILOCK_SHARED);
371 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
372 (ip->i_df.if_flags & XFS_IFEXTENTS));
373 ASSERT(offset <= mp->m_super->s_maxbytes);
375 if (offset + count > mp->m_super->s_maxbytes)
376 count = mp->m_super->s_maxbytes - offset;
377 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
378 offset_fsb = XFS_B_TO_FSBT(mp, offset);
379 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
380 imap, &nimaps, bmapi_flags);
382 * Truncate an overwrite extent if there's a pending CoW
383 * reservation before the end of this extent. This forces us
384 * to come back to writepage to take care of the CoW.
386 if (nimaps && type == XFS_IO_OVERWRITE)
387 xfs_reflink_trim_irec_to_next_cow(ip, offset_fsb, imap);
388 xfs_iunlock(ip, XFS_ILOCK_SHARED);
393 if (type == XFS_IO_DELALLOC &&
394 (!nimaps || isnullstartblock(imap->br_startblock))) {
395 error = xfs_iomap_write_allocate(ip, XFS_DATA_FORK, offset,
398 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
403 if (type == XFS_IO_UNWRITTEN) {
405 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
406 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
410 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
417 struct xfs_bmbt_irec *imap,
420 offset >>= inode->i_blkbits;
422 return offset >= imap->br_startoff &&
423 offset < imap->br_startoff + imap->br_blockcount;
427 xfs_start_buffer_writeback(
428 struct buffer_head *bh)
430 ASSERT(buffer_mapped(bh));
431 ASSERT(buffer_locked(bh));
432 ASSERT(!buffer_delay(bh));
433 ASSERT(!buffer_unwritten(bh));
435 mark_buffer_async_write(bh);
436 set_buffer_uptodate(bh);
437 clear_buffer_dirty(bh);
441 xfs_start_page_writeback(
445 ASSERT(PageLocked(page));
446 ASSERT(!PageWriteback(page));
449 * if the page was not fully cleaned, we need to ensure that the higher
450 * layers come back to it correctly. That means we need to keep the page
451 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
452 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
453 * write this page in this writeback sweep will be made.
456 clear_page_dirty_for_io(page);
457 set_page_writeback(page);
459 set_page_writeback_keepwrite(page);
464 static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
466 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
470 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
471 * it, and we submit that bio. The ioend may be used for multiple bio
472 * submissions, so we only want to allocate an append transaction for the ioend
473 * once. In the case of multiple bio submission, each bio will take an IO
474 * reference to the ioend to ensure that the ioend completion is only done once
475 * all bios have been submitted and the ioend is really done.
477 * If @fail is non-zero, it means that we have a situation where some part of
478 * the submission process has failed after we have marked paged for writeback
479 * and unlocked them. In this situation, we need to fail the bio and ioend
480 * rather than submit it to IO. This typically only happens on a filesystem
485 struct writeback_control *wbc,
486 struct xfs_ioend *ioend,
489 /* Reserve log space if we might write beyond the on-disk inode size. */
491 ioend->io_type != XFS_IO_UNWRITTEN &&
492 xfs_ioend_is_append(ioend) &&
493 !ioend->io_append_trans)
494 status = xfs_setfilesize_trans_alloc(ioend);
496 ioend->io_bio->bi_private = ioend;
497 ioend->io_bio->bi_end_io = xfs_end_bio;
498 ioend->io_bio->bi_opf = REQ_OP_WRITE;
499 if (wbc->sync_mode == WB_SYNC_ALL)
500 ioend->io_bio->bi_opf |= REQ_SYNC;
503 * If we are failing the IO now, just mark the ioend with an
504 * error and finish it. This will run IO completion immediately
505 * as there is only one reference to the ioend at this point in
509 ioend->io_bio->bi_error = status;
510 bio_endio(ioend->io_bio);
514 submit_bio(ioend->io_bio);
519 xfs_init_bio_from_bh(
521 struct buffer_head *bh)
523 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
524 bio->bi_bdev = bh->b_bdev;
527 static struct xfs_ioend *
532 struct buffer_head *bh)
534 struct xfs_ioend *ioend;
537 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, xfs_ioend_bioset);
538 xfs_init_bio_from_bh(bio, bh);
540 ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
541 INIT_LIST_HEAD(&ioend->io_list);
542 ioend->io_type = type;
543 ioend->io_inode = inode;
545 ioend->io_offset = offset;
546 INIT_WORK(&ioend->io_work, xfs_end_io);
547 ioend->io_append_trans = NULL;
553 * Allocate a new bio, and chain the old bio to the new one.
555 * Note that we have to do perform the chaining in this unintuitive order
556 * so that the bi_private linkage is set up in the right direction for the
557 * traversal in xfs_destroy_ioend().
561 struct xfs_ioend *ioend,
562 struct writeback_control *wbc,
563 struct buffer_head *bh)
567 new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
568 xfs_init_bio_from_bh(new, bh);
570 bio_chain(ioend->io_bio, new);
571 bio_get(ioend->io_bio); /* for xfs_destroy_ioend */
572 ioend->io_bio->bi_opf = REQ_OP_WRITE;
573 if (wbc->sync_mode == WB_SYNC_ALL)
574 ioend->io_bio->bi_opf |= REQ_SYNC;
575 submit_bio(ioend->io_bio);
580 * Test to see if we've been building up a completion structure for
581 * earlier buffers -- if so, we try to append to this ioend if we
582 * can, otherwise we finish off any current ioend and start another.
583 * Return the ioend we finished off so that the caller can submit it
584 * once it has finished processing the dirty page.
589 struct buffer_head *bh,
591 struct xfs_writepage_ctx *wpc,
592 struct writeback_control *wbc,
593 struct list_head *iolist)
595 if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
596 bh->b_blocknr != wpc->last_block + 1 ||
597 offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
599 list_add(&wpc->ioend->io_list, iolist);
600 wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, bh);
604 * If the buffer doesn't fit into the bio we need to allocate a new
605 * one. This shouldn't happen more than once for a given buffer.
607 while (xfs_bio_add_buffer(wpc->ioend->io_bio, bh) != bh->b_size)
608 xfs_chain_bio(wpc->ioend, wbc, bh);
610 wpc->ioend->io_size += bh->b_size;
611 wpc->last_block = bh->b_blocknr;
612 xfs_start_buffer_writeback(bh);
618 struct buffer_head *bh,
619 struct xfs_bmbt_irec *imap,
623 struct xfs_mount *m = XFS_I(inode)->i_mount;
624 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
625 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
627 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
628 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
630 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
631 ((offset - iomap_offset) >> inode->i_blkbits);
633 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
636 set_buffer_mapped(bh);
642 struct buffer_head *bh,
643 struct xfs_bmbt_irec *imap,
646 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
647 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
649 xfs_map_buffer(inode, bh, imap, offset);
650 set_buffer_mapped(bh);
651 clear_buffer_delay(bh);
652 clear_buffer_unwritten(bh);
656 * Test if a given page contains at least one buffer of a given @type.
657 * If @check_all_buffers is true, then we walk all the buffers in the page to
658 * try to find one of the type passed in. If it is not set, then the caller only
659 * needs to check the first buffer on the page for a match.
665 bool check_all_buffers)
667 struct buffer_head *bh;
668 struct buffer_head *head;
670 if (PageWriteback(page))
674 if (!page_has_buffers(page))
677 bh = head = page_buffers(page);
679 if (buffer_unwritten(bh)) {
680 if (type == XFS_IO_UNWRITTEN)
682 } else if (buffer_delay(bh)) {
683 if (type == XFS_IO_DELALLOC)
685 } else if (buffer_dirty(bh) && buffer_mapped(bh)) {
686 if (type == XFS_IO_OVERWRITE)
690 /* If we are only checking the first buffer, we are done now. */
691 if (!check_all_buffers)
693 } while ((bh = bh->b_this_page) != head);
699 xfs_vm_invalidatepage(
704 trace_xfs_invalidatepage(page->mapping->host, page, offset,
706 block_invalidatepage(page, offset, length);
710 * If the page has delalloc buffers on it, we need to punch them out before we
711 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
712 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
713 * is done on that same region - the delalloc extent is returned when none is
714 * supposed to be there.
716 * We prevent this by truncating away the delalloc regions on the page before
717 * invalidating it. Because they are delalloc, we can do this without needing a
718 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
719 * truncation without a transaction as there is no space left for block
720 * reservation (typically why we see a ENOSPC in writeback).
722 * This is not a performance critical path, so for now just do the punching a
723 * buffer head at a time.
726 xfs_aops_discard_page(
729 struct inode *inode = page->mapping->host;
730 struct xfs_inode *ip = XFS_I(inode);
731 struct buffer_head *bh, *head;
732 loff_t offset = page_offset(page);
734 if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
737 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
740 xfs_alert(ip->i_mount,
741 "page discard on page %p, inode 0x%llx, offset %llu.",
742 page, ip->i_ino, offset);
744 xfs_ilock(ip, XFS_ILOCK_EXCL);
745 bh = head = page_buffers(page);
748 xfs_fileoff_t start_fsb;
750 if (!buffer_delay(bh))
753 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
754 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
756 /* something screwed, just bail */
757 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
758 xfs_alert(ip->i_mount,
759 "page discard unable to remove delalloc mapping.");
764 offset += 1 << inode->i_blkbits;
766 } while ((bh = bh->b_this_page) != head);
768 xfs_iunlock(ip, XFS_ILOCK_EXCL);
770 xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
776 struct xfs_writepage_ctx *wpc,
779 unsigned int *new_type)
781 struct xfs_inode *ip = XFS_I(inode);
782 struct xfs_bmbt_irec imap;
783 bool is_cow = false, need_alloc = false;
787 * If we already have a valid COW mapping keep using it.
789 if (wpc->io_type == XFS_IO_COW) {
790 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, offset);
791 if (wpc->imap_valid) {
792 *new_type = XFS_IO_COW;
798 * Else we need to check if there is a COW mapping at this offset.
800 xfs_ilock(ip, XFS_ILOCK_SHARED);
801 is_cow = xfs_reflink_find_cow_mapping(ip, offset, &imap, &need_alloc);
802 xfs_iunlock(ip, XFS_ILOCK_SHARED);
808 * And if the COW mapping has a delayed extent here we need to
809 * allocate real space for it now.
812 error = xfs_iomap_write_allocate(ip, XFS_COW_FORK, offset,
818 wpc->io_type = *new_type = XFS_IO_COW;
819 wpc->imap_valid = true;
825 * We implement an immediate ioend submission policy here to avoid needing to
826 * chain multiple ioends and hence nest mempool allocations which can violate
827 * forward progress guarantees we need to provide. The current ioend we are
828 * adding buffers to is cached on the writepage context, and if the new buffer
829 * does not append to the cached ioend it will create a new ioend and cache that
832 * If a new ioend is created and cached, the old ioend is returned and queued
833 * locally for submission once the entire page is processed or an error has been
834 * detected. While ioends are submitted immediately after they are completed,
835 * batching optimisations are provided by higher level block plugging.
837 * At the end of a writeback pass, there will be a cached ioend remaining on the
838 * writepage context that the caller will need to submit.
842 struct xfs_writepage_ctx *wpc,
843 struct writeback_control *wbc,
847 __uint64_t end_offset)
849 LIST_HEAD(submit_list);
850 struct xfs_ioend *ioend, *next;
851 struct buffer_head *bh, *head;
852 ssize_t len = 1 << inode->i_blkbits;
856 unsigned int new_type;
858 bh = head = page_buffers(page);
859 offset = page_offset(page);
861 if (offset >= end_offset)
863 if (!buffer_uptodate(bh))
867 * set_page_dirty dirties all buffers in a page, independent
868 * of their state. The dirty state however is entirely
869 * meaningless for holes (!mapped && uptodate), so skip
870 * buffers covering holes here.
872 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
873 wpc->imap_valid = false;
877 if (buffer_unwritten(bh))
878 new_type = XFS_IO_UNWRITTEN;
879 else if (buffer_delay(bh))
880 new_type = XFS_IO_DELALLOC;
881 else if (buffer_uptodate(bh))
882 new_type = XFS_IO_OVERWRITE;
884 if (PageUptodate(page))
885 ASSERT(buffer_mapped(bh));
887 * This buffer is not uptodate and will not be
888 * written to disk. Ensure that we will put any
889 * subsequent writeable buffers into a new
892 wpc->imap_valid = false;
896 if (xfs_is_reflink_inode(XFS_I(inode))) {
897 error = xfs_map_cow(wpc, inode, offset, &new_type);
902 if (wpc->io_type != new_type) {
903 wpc->io_type = new_type;
904 wpc->imap_valid = false;
908 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
910 if (!wpc->imap_valid) {
911 error = xfs_map_blocks(inode, offset, &wpc->imap,
915 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
918 if (wpc->imap_valid) {
920 if (wpc->io_type != XFS_IO_OVERWRITE)
921 xfs_map_at_offset(inode, bh, &wpc->imap, offset);
922 xfs_add_to_ioend(inode, bh, offset, wpc, wbc, &submit_list);
926 } while (offset += len, ((bh = bh->b_this_page) != head));
928 if (uptodate && bh == head)
929 SetPageUptodate(page);
931 ASSERT(wpc->ioend || list_empty(&submit_list));
935 * On error, we have to fail the ioend here because we have locked
936 * buffers in the ioend. If we don't do this, we'll deadlock
937 * invalidating the page as that tries to lock the buffers on the page.
938 * Also, because we may have set pages under writeback, we have to make
939 * sure we run IO completion to mark the error state of the IO
940 * appropriately, so we can't cancel the ioend directly here. That means
941 * we have to mark this page as under writeback if we included any
942 * buffers from it in the ioend chain so that completion treats it
945 * If we didn't include the page in the ioend, the on error we can
946 * simply discard and unlock it as there are no other users of the page
947 * or it's buffers right now. The caller will still need to trigger
948 * submission of outstanding ioends on the writepage context so they are
949 * treated correctly on error.
952 xfs_start_page_writeback(page, !error);
955 * Preserve the original error if there was one, otherwise catch
956 * submission errors here and propagate into subsequent ioend
959 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
962 list_del_init(&ioend->io_list);
963 error2 = xfs_submit_ioend(wbc, ioend, error);
964 if (error2 && !error)
968 xfs_aops_discard_page(page);
969 ClearPageUptodate(page);
973 * We can end up here with no error and nothing to write if we
974 * race with a partial page truncate on a sub-page block sized
975 * filesystem. In that case we need to mark the page clean.
977 xfs_start_page_writeback(page, 1);
978 end_page_writeback(page);
981 mapping_set_error(page->mapping, error);
986 * Write out a dirty page.
988 * For delalloc space on the page we need to allocate space and flush it.
989 * For unwritten space on the page we need to start the conversion to
990 * regular allocated space.
991 * For any other dirty buffer heads on the page we should flush them.
996 struct writeback_control *wbc,
999 struct xfs_writepage_ctx *wpc = data;
1000 struct inode *inode = page->mapping->host;
1002 __uint64_t end_offset;
1005 trace_xfs_writepage(inode, page, 0, 0);
1007 ASSERT(page_has_buffers(page));
1010 * Refuse to write the page out if we are called from reclaim context.
1012 * This avoids stack overflows when called from deeply used stacks in
1013 * random callers for direct reclaim or memcg reclaim. We explicitly
1014 * allow reclaim from kswapd as the stack usage there is relatively low.
1016 * This should never happen except in the case of a VM regression so
1019 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1024 * Given that we do not allow direct reclaim to call us, we should
1025 * never be called while in a filesystem transaction.
1027 if (WARN_ON_ONCE(current->flags & PF_FSTRANS))
1031 * Is this page beyond the end of the file?
1033 * The page index is less than the end_index, adjust the end_offset
1034 * to the highest offset that this page should represent.
1035 * -----------------------------------------------------
1036 * | file mapping | <EOF> |
1037 * -----------------------------------------------------
1038 * | Page ... | Page N-2 | Page N-1 | Page N | |
1039 * ^--------------------------------^----------|--------
1040 * | desired writeback range | see else |
1041 * ---------------------------------^------------------|
1043 offset = i_size_read(inode);
1044 end_index = offset >> PAGE_SHIFT;
1045 if (page->index < end_index)
1046 end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
1049 * Check whether the page to write out is beyond or straddles
1051 * -------------------------------------------------------
1052 * | file mapping | <EOF> |
1053 * -------------------------------------------------------
1054 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1055 * ^--------------------------------^-----------|---------
1057 * ---------------------------------^-----------|--------|
1059 unsigned offset_into_page = offset & (PAGE_SIZE - 1);
1062 * Skip the page if it is fully outside i_size, e.g. due to a
1063 * truncate operation that is in progress. We must redirty the
1064 * page so that reclaim stops reclaiming it. Otherwise
1065 * xfs_vm_releasepage() is called on it and gets confused.
1067 * Note that the end_index is unsigned long, it would overflow
1068 * if the given offset is greater than 16TB on 32-bit system
1069 * and if we do check the page is fully outside i_size or not
1070 * via "if (page->index >= end_index + 1)" as "end_index + 1"
1071 * will be evaluated to 0. Hence this page will be redirtied
1072 * and be written out repeatedly which would result in an
1073 * infinite loop, the user program that perform this operation
1074 * will hang. Instead, we can verify this situation by checking
1075 * if the page to write is totally beyond the i_size or if it's
1076 * offset is just equal to the EOF.
1078 if (page->index > end_index ||
1079 (page->index == end_index && offset_into_page == 0))
1083 * The page straddles i_size. It must be zeroed out on each
1084 * and every writepage invocation because it may be mmapped.
1085 * "A file is mapped in multiples of the page size. For a file
1086 * that is not a multiple of the page size, the remaining
1087 * memory is zeroed when mapped, and writes to that region are
1088 * not written out to the file."
1090 zero_user_segment(page, offset_into_page, PAGE_SIZE);
1092 /* Adjust the end_offset to the end of file */
1093 end_offset = offset;
1096 return xfs_writepage_map(wpc, wbc, inode, page, offset, end_offset);
1099 redirty_page_for_writepage(wbc, page);
1107 struct writeback_control *wbc)
1109 struct xfs_writepage_ctx wpc = {
1110 .io_type = XFS_IO_INVALID,
1114 ret = xfs_do_writepage(page, wbc, &wpc);
1116 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1122 struct address_space *mapping,
1123 struct writeback_control *wbc)
1125 struct xfs_writepage_ctx wpc = {
1126 .io_type = XFS_IO_INVALID,
1130 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1131 if (dax_mapping(mapping))
1132 return dax_writeback_mapping_range(mapping,
1133 xfs_find_bdev_for_inode(mapping->host), wbc);
1135 ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
1137 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1142 * Called to move a page into cleanable state - and from there
1143 * to be released. The page should already be clean. We always
1144 * have buffer heads in this call.
1146 * Returns 1 if the page is ok to release, 0 otherwise.
1153 int delalloc, unwritten;
1155 trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1158 * mm accommodates an old ext3 case where clean pages might not have had
1159 * the dirty bit cleared. Thus, it can send actual dirty pages to
1160 * ->releasepage() via shrink_active_list(). Conversely,
1161 * block_invalidatepage() can send pages that are still marked dirty
1162 * but otherwise have invalidated buffers.
1164 * We've historically freed buffers on the latter. Instead, quietly
1165 * filter out all dirty pages to avoid spurious buffer state warnings.
1166 * This can likely be removed once shrink_active_list() is fixed.
1168 if (PageDirty(page))
1171 xfs_count_page_state(page, &delalloc, &unwritten);
1173 if (WARN_ON_ONCE(delalloc))
1175 if (WARN_ON_ONCE(unwritten))
1178 return try_to_free_buffers(page);
1182 * When we map a DIO buffer, we may need to pass flags to
1183 * xfs_end_io_direct_write to tell it what kind of write IO we are doing.
1185 * Note that for DIO, an IO to the highest supported file block offset (i.e.
1186 * 2^63 - 1FSB bytes) will result in the offset + count overflowing a signed 64
1187 * bit variable. Hence if we see this overflow, we have to assume that the IO is
1188 * extending the file size. We won't know for sure until IO completion is run
1189 * and the actual max write offset is communicated to the IO completion
1194 struct inode *inode,
1195 struct buffer_head *bh_result,
1196 struct xfs_bmbt_irec *imap,
1200 uintptr_t *flags = (uintptr_t *)&bh_result->b_private;
1201 xfs_off_t size = bh_result->b_size;
1203 trace_xfs_get_blocks_map_direct(XFS_I(inode), offset, size,
1204 ISUNWRITTEN(imap) ? XFS_IO_UNWRITTEN : is_cow ? XFS_IO_COW :
1205 XFS_IO_OVERWRITE, imap);
1207 if (ISUNWRITTEN(imap)) {
1208 *flags |= XFS_DIO_FLAG_UNWRITTEN;
1209 set_buffer_defer_completion(bh_result);
1210 } else if (is_cow) {
1211 *flags |= XFS_DIO_FLAG_COW;
1212 set_buffer_defer_completion(bh_result);
1214 if (offset + size > i_size_read(inode) || offset + size < 0) {
1215 *flags |= XFS_DIO_FLAG_APPEND;
1216 set_buffer_defer_completion(bh_result);
1221 * If this is O_DIRECT or the mpage code calling tell them how large the mapping
1222 * is, so that we can avoid repeated get_blocks calls.
1224 * If the mapping spans EOF, then we have to break the mapping up as the mapping
1225 * for blocks beyond EOF must be marked new so that sub block regions can be
1226 * correctly zeroed. We can't do this for mappings within EOF unless the mapping
1227 * was just allocated or is unwritten, otherwise the callers would overwrite
1228 * existing data with zeros. Hence we have to split the mapping into a range up
1229 * to and including EOF, and a second mapping for beyond EOF.
1233 struct inode *inode,
1235 struct buffer_head *bh_result,
1236 struct xfs_bmbt_irec *imap,
1240 xfs_off_t mapping_size;
1242 mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
1243 mapping_size <<= inode->i_blkbits;
1245 ASSERT(mapping_size > 0);
1246 if (mapping_size > size)
1247 mapping_size = size;
1248 if (offset < i_size_read(inode) &&
1249 offset + mapping_size >= i_size_read(inode)) {
1250 /* limit mapping to block that spans EOF */
1251 mapping_size = roundup_64(i_size_read(inode) - offset,
1252 1 << inode->i_blkbits);
1254 if (mapping_size > LONG_MAX)
1255 mapping_size = LONG_MAX;
1257 bh_result->b_size = mapping_size;
1260 /* Bounce unaligned directio writes to the page cache. */
1262 xfs_bounce_unaligned_dio_write(
1263 struct xfs_inode *ip,
1264 xfs_fileoff_t offset_fsb,
1265 struct xfs_bmbt_irec *imap)
1267 struct xfs_bmbt_irec irec;
1268 xfs_fileoff_t delta;
1274 if (offset_fsb > irec.br_startoff) {
1275 delta = offset_fsb - irec.br_startoff;
1276 irec.br_blockcount -= delta;
1277 irec.br_startblock += delta;
1278 irec.br_startoff = offset_fsb;
1280 error = xfs_reflink_trim_around_shared(ip, &irec, &shared, &x);
1285 * We're here because we're trying to do a directio write to a
1286 * region that isn't aligned to a filesystem block. If any part
1287 * of the extent is shared, fall back to buffered mode to handle
1288 * the RMW. This is done by returning -EREMCHG ("remote addr
1289 * changed"), which is caught further up the call stack.
1292 trace_xfs_reflink_bounce_dio_write(ip, imap);
1300 struct inode *inode,
1302 struct buffer_head *bh_result,
1307 struct xfs_inode *ip = XFS_I(inode);
1308 struct xfs_mount *mp = ip->i_mount;
1309 xfs_fileoff_t offset_fsb, end_fsb;
1312 struct xfs_bmbt_irec imap;
1317 bool is_cow = false;
1318 bool need_alloc = false;
1320 BUG_ON(create && !direct);
1322 if (XFS_FORCED_SHUTDOWN(mp))
1325 offset = (xfs_off_t)iblock << inode->i_blkbits;
1326 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1327 size = bh_result->b_size;
1329 if (!create && offset >= i_size_read(inode))
1333 * Direct I/O is usually done on preallocated files, so try getting
1334 * a block mapping without an exclusive lock first.
1336 lockmode = xfs_ilock_data_map_shared(ip);
1338 ASSERT(offset <= mp->m_super->s_maxbytes);
1339 if (offset + size > mp->m_super->s_maxbytes)
1340 size = mp->m_super->s_maxbytes - offset;
1341 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1342 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1344 if (create && direct && xfs_is_reflink_inode(ip))
1345 is_cow = xfs_reflink_find_cow_mapping(ip, offset, &imap,
1348 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1349 &imap, &nimaps, XFS_BMAPI_ENTIRE);
1351 * Truncate an overwrite extent if there's a pending CoW
1352 * reservation before the end of this extent. This
1353 * forces us to come back to get_blocks to take care of
1356 if (create && direct && nimaps &&
1357 imap.br_startblock != HOLESTARTBLOCK &&
1358 imap.br_startblock != DELAYSTARTBLOCK &&
1359 !ISUNWRITTEN(&imap))
1360 xfs_reflink_trim_irec_to_next_cow(ip, offset_fsb,
1363 ASSERT(!need_alloc);
1367 /* for DAX, we convert unwritten extents directly */
1370 (imap.br_startblock == HOLESTARTBLOCK ||
1371 imap.br_startblock == DELAYSTARTBLOCK) ||
1372 (IS_DAX(inode) && ISUNWRITTEN(&imap)))) {
1374 * xfs_iomap_write_direct() expects the shared lock. It
1375 * is unlocked on return.
1377 if (lockmode == XFS_ILOCK_EXCL)
1378 xfs_ilock_demote(ip, lockmode);
1380 error = xfs_iomap_write_direct(ip, offset, size,
1386 trace_xfs_get_blocks_alloc(ip, offset, size,
1387 ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1388 : XFS_IO_DELALLOC, &imap);
1389 } else if (nimaps) {
1390 trace_xfs_get_blocks_found(ip, offset, size,
1391 ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1392 : XFS_IO_OVERWRITE, &imap);
1393 xfs_iunlock(ip, lockmode);
1395 trace_xfs_get_blocks_notfound(ip, offset, size);
1399 if (IS_DAX(inode) && create) {
1400 ASSERT(!ISUNWRITTEN(&imap));
1401 /* zeroing is not needed at a higher layer */
1405 /* trim mapping down to size requested */
1406 xfs_map_trim_size(inode, iblock, bh_result, &imap, offset, size);
1409 * For unwritten extents do not report a disk address in the buffered
1410 * read case (treat as if we're reading into a hole).
1412 if (imap.br_startblock != HOLESTARTBLOCK &&
1413 imap.br_startblock != DELAYSTARTBLOCK &&
1414 (create || !ISUNWRITTEN(&imap))) {
1415 if (create && direct && !is_cow) {
1416 error = xfs_bounce_unaligned_dio_write(ip, offset_fsb,
1422 xfs_map_buffer(inode, bh_result, &imap, offset);
1423 if (ISUNWRITTEN(&imap))
1424 set_buffer_unwritten(bh_result);
1425 /* direct IO needs special help */
1428 ASSERT(!ISUNWRITTEN(&imap));
1430 xfs_map_direct(inode, bh_result, &imap, offset,
1436 * If this is a realtime file, data may be on a different device.
1437 * to that pointed to from the buffer_head b_bdev currently.
1439 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1442 * If we previously allocated a block out beyond eof and we are now
1443 * coming back to use it then we will need to flag it as new even if it
1444 * has a disk address.
1446 * With sub-block writes into unwritten extents we also need to mark
1447 * the buffer as new so that the unwritten parts of the buffer gets
1451 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1452 (offset >= i_size_read(inode)) ||
1453 (new || ISUNWRITTEN(&imap))))
1454 set_buffer_new(bh_result);
1456 BUG_ON(direct && imap.br_startblock == DELAYSTARTBLOCK);
1461 xfs_iunlock(ip, lockmode);
1467 struct inode *inode,
1469 struct buffer_head *bh_result,
1472 return __xfs_get_blocks(inode, iblock, bh_result, create, false, false);
1476 xfs_get_blocks_direct(
1477 struct inode *inode,
1479 struct buffer_head *bh_result,
1482 return __xfs_get_blocks(inode, iblock, bh_result, create, true, false);
1486 xfs_get_blocks_dax_fault(
1487 struct inode *inode,
1489 struct buffer_head *bh_result,
1492 return __xfs_get_blocks(inode, iblock, bh_result, create, true, true);
1496 * Complete a direct I/O write request.
1498 * xfs_map_direct passes us some flags in the private data to tell us what to
1499 * do. If no flags are set, then the write IO is an overwrite wholly within
1500 * the existing allocated file size and so there is nothing for us to do.
1502 * Note that in this case the completion can be called in interrupt context,
1503 * whereas if we have flags set we will always be called in task context
1504 * (i.e. from a workqueue).
1507 xfs_end_io_direct_write(
1513 struct inode *inode = file_inode(iocb->ki_filp);
1514 struct xfs_inode *ip = XFS_I(inode);
1515 uintptr_t flags = (uintptr_t)private;
1518 trace_xfs_end_io_direct_write(ip, offset, size);
1520 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
1527 * The flags tell us whether we are doing unwritten extent conversions
1528 * or an append transaction that updates the on-disk file size. These
1529 * cases are the only cases where we should *potentially* be needing
1530 * to update the VFS inode size.
1533 ASSERT(offset + size <= i_size_read(inode));
1538 * We need to update the in-core inode size here so that we don't end up
1539 * with the on-disk inode size being outside the in-core inode size. We
1540 * have no other method of updating EOF for AIO, so always do it here
1543 * We need to lock the test/set EOF update as we can be racing with
1544 * other IO completions here to update the EOF. Failing to serialise
1545 * here can result in EOF moving backwards and Bad Things Happen when
1548 spin_lock(&ip->i_flags_lock);
1549 if (offset + size > i_size_read(inode))
1550 i_size_write(inode, offset + size);
1551 spin_unlock(&ip->i_flags_lock);
1553 if (flags & XFS_DIO_FLAG_COW)
1554 error = xfs_reflink_end_cow(ip, offset, size);
1555 if (flags & XFS_DIO_FLAG_UNWRITTEN) {
1556 trace_xfs_end_io_direct_write_unwritten(ip, offset, size);
1558 error = xfs_iomap_write_unwritten(ip, offset, size);
1560 if (flags & XFS_DIO_FLAG_APPEND) {
1561 trace_xfs_end_io_direct_write_append(ip, offset, size);
1563 error = xfs_setfilesize(ip, offset, size);
1572 struct iov_iter *iter)
1575 * We just need the method present so that open/fcntl allow direct I/O.
1582 struct address_space *mapping,
1585 struct inode *inode = (struct inode *)mapping->host;
1586 struct xfs_inode *ip = XFS_I(inode);
1588 trace_xfs_vm_bmap(XFS_I(inode));
1589 xfs_ilock(ip, XFS_IOLOCK_SHARED);
1592 * The swap code (ab-)uses ->bmap to get a block mapping and then
1593 * bypasseѕ the file system for actual I/O. We really can't allow
1594 * that on reflinks inodes, so we have to skip out here. And yes,
1595 * 0 is the magic code for a bmap error..
1597 if (xfs_is_reflink_inode(ip)) {
1598 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1601 filemap_write_and_wait(mapping);
1602 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1603 return generic_block_bmap(mapping, block, xfs_get_blocks);
1608 struct file *unused,
1611 trace_xfs_vm_readpage(page->mapping->host, 1);
1612 return mpage_readpage(page, xfs_get_blocks);
1617 struct file *unused,
1618 struct address_space *mapping,
1619 struct list_head *pages,
1622 trace_xfs_vm_readpages(mapping->host, nr_pages);
1623 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1627 * This is basically a copy of __set_page_dirty_buffers() with one
1628 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
1629 * dirty, we'll never be able to clean them because we don't write buffers
1630 * beyond EOF, and that means we can't invalidate pages that span EOF
1631 * that have been marked dirty. Further, the dirty state can leak into
1632 * the file interior if the file is extended, resulting in all sorts of
1633 * bad things happening as the state does not match the underlying data.
1635 * XXX: this really indicates that bufferheads in XFS need to die. Warts like
1636 * this only exist because of bufferheads and how the generic code manages them.
1639 xfs_vm_set_page_dirty(
1642 struct address_space *mapping = page->mapping;
1643 struct inode *inode = mapping->host;
1648 if (unlikely(!mapping))
1649 return !TestSetPageDirty(page);
1651 end_offset = i_size_read(inode);
1652 offset = page_offset(page);
1654 spin_lock(&mapping->private_lock);
1655 if (page_has_buffers(page)) {
1656 struct buffer_head *head = page_buffers(page);
1657 struct buffer_head *bh = head;
1660 if (offset < end_offset)
1661 set_buffer_dirty(bh);
1662 bh = bh->b_this_page;
1663 offset += 1 << inode->i_blkbits;
1664 } while (bh != head);
1667 * Lock out page->mem_cgroup migration to keep PageDirty
1668 * synchronized with per-memcg dirty page counters.
1670 lock_page_memcg(page);
1671 newly_dirty = !TestSetPageDirty(page);
1672 spin_unlock(&mapping->private_lock);
1675 /* sigh - __set_page_dirty() is static, so copy it here, too */
1676 unsigned long flags;
1678 spin_lock_irqsave(&mapping->tree_lock, flags);
1679 if (page->mapping) { /* Race with truncate? */
1680 WARN_ON_ONCE(!PageUptodate(page));
1681 account_page_dirtied(page, mapping);
1682 radix_tree_tag_set(&mapping->page_tree,
1683 page_index(page), PAGECACHE_TAG_DIRTY);
1685 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1687 unlock_page_memcg(page);
1689 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1693 const struct address_space_operations xfs_address_space_operations = {
1694 .readpage = xfs_vm_readpage,
1695 .readpages = xfs_vm_readpages,
1696 .writepage = xfs_vm_writepage,
1697 .writepages = xfs_vm_writepages,
1698 .set_page_dirty = xfs_vm_set_page_dirty,
1699 .releasepage = xfs_vm_releasepage,
1700 .invalidatepage = xfs_vm_invalidatepage,
1701 .bmap = xfs_vm_bmap,
1702 .direct_IO = xfs_vm_direct_IO,
1703 .migratepage = buffer_migrate_page,
1704 .is_partially_uptodate = block_is_partially_uptodate,
1705 .error_remove_page = generic_error_remove_page,