Merge remote-tracking branches 'asoc/topic/rt5645', 'asoc/topic/rt5651', 'asoc/topic...
[linux-2.6-block.git] / fs / xfs / xfs_aops.c
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
70a9883c 19#include "xfs_shared.h"
239880ef
DC
20#include "xfs_format.h"
21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h"
1da177e4 23#include "xfs_mount.h"
1da177e4 24#include "xfs_inode.h"
239880ef 25#include "xfs_trans.h"
281627df 26#include "xfs_inode_item.h"
a844f451 27#include "xfs_alloc.h"
1da177e4 28#include "xfs_error.h"
1da177e4 29#include "xfs_iomap.h"
0b1b213f 30#include "xfs_trace.h"
3ed3a434 31#include "xfs_bmap.h"
68988114 32#include "xfs_bmap_util.h"
a4fbe6ab 33#include "xfs_bmap_btree.h"
ef473667 34#include "xfs_reflink.h"
5a0e3ad6 35#include <linux/gfp.h>
1da177e4 36#include <linux/mpage.h>
10ce4444 37#include <linux/pagevec.h>
1da177e4
LT
38#include <linux/writeback.h>
39
fbcc0256
DC
40/*
41 * structure owned by writepages passed to individual writepage calls
42 */
43struct xfs_writepage_ctx {
44 struct xfs_bmbt_irec imap;
45 bool imap_valid;
46 unsigned int io_type;
fbcc0256
DC
47 struct xfs_ioend *ioend;
48 sector_t last_block;
49};
50
0b1b213f 51void
f51623b2
NS
52xfs_count_page_state(
53 struct page *page,
54 int *delalloc,
f51623b2
NS
55 int *unwritten)
56{
57 struct buffer_head *bh, *head;
58
20cb52eb 59 *delalloc = *unwritten = 0;
f51623b2
NS
60
61 bh = head = page_buffers(page);
62 do {
20cb52eb 63 if (buffer_unwritten(bh))
f51623b2
NS
64 (*unwritten) = 1;
65 else if (buffer_delay(bh))
66 (*delalloc) = 1;
67 } while ((bh = bh->b_this_page) != head);
68}
69
20a90f58 70struct block_device *
6214ed44 71xfs_find_bdev_for_inode(
046f1685 72 struct inode *inode)
6214ed44 73{
046f1685 74 struct xfs_inode *ip = XFS_I(inode);
6214ed44
CH
75 struct xfs_mount *mp = ip->i_mount;
76
71ddabb9 77 if (XFS_IS_REALTIME_INODE(ip))
6214ed44
CH
78 return mp->m_rtdev_targp->bt_bdev;
79 else
80 return mp->m_ddev_targp->bt_bdev;
81}
82
486aff5e
DW
83struct dax_device *
84xfs_find_daxdev_for_inode(
85 struct inode *inode)
86{
87 struct xfs_inode *ip = XFS_I(inode);
88 struct xfs_mount *mp = ip->i_mount;
89
90 if (XFS_IS_REALTIME_INODE(ip))
91 return mp->m_rtdev_targp->bt_daxdev;
92 else
93 return mp->m_ddev_targp->bt_daxdev;
94}
95
f6d6d4fc 96/*
37992c18
DC
97 * We're now finished for good with this page. Update the page state via the
98 * associated buffer_heads, paying attention to the start and end offsets that
99 * we need to process on the page.
28b783e4 100 *
8353a814
CH
101 * Note that we open code the action in end_buffer_async_write here so that we
102 * only have to iterate over the buffers attached to the page once. This is not
103 * only more efficient, but also ensures that we only calls end_page_writeback
104 * at the end of the iteration, and thus avoids the pitfall of having the page
105 * and buffers potentially freed after every call to end_buffer_async_write.
37992c18
DC
106 */
107static void
108xfs_finish_page_writeback(
109 struct inode *inode,
110 struct bio_vec *bvec,
111 int error)
112{
8353a814
CH
113 struct buffer_head *head = page_buffers(bvec->bv_page), *bh = head;
114 bool busy = false;
37992c18 115 unsigned int off = 0;
8353a814 116 unsigned long flags;
37992c18
DC
117
118 ASSERT(bvec->bv_offset < PAGE_SIZE);
93407472 119 ASSERT((bvec->bv_offset & (i_blocksize(inode) - 1)) == 0);
8353a814 120 ASSERT(bvec->bv_offset + bvec->bv_len <= PAGE_SIZE);
93407472 121 ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0);
37992c18 122
8353a814
CH
123 local_irq_save(flags);
124 bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
37992c18 125 do {
8353a814
CH
126 if (off >= bvec->bv_offset &&
127 off < bvec->bv_offset + bvec->bv_len) {
128 ASSERT(buffer_async_write(bh));
129 ASSERT(bh->b_end_io == NULL);
130
131 if (error) {
132 mark_buffer_write_io_error(bh);
133 clear_buffer_uptodate(bh);
134 SetPageError(bvec->bv_page);
135 } else {
136 set_buffer_uptodate(bh);
137 }
138 clear_buffer_async_write(bh);
139 unlock_buffer(bh);
140 } else if (buffer_async_write(bh)) {
141 ASSERT(buffer_locked(bh));
142 busy = true;
143 }
144 off += bh->b_size;
145 } while ((bh = bh->b_this_page) != head);
146 bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
147 local_irq_restore(flags);
148
149 if (!busy)
150 end_page_writeback(bvec->bv_page);
37992c18
DC
151}
152
153/*
154 * We're now finished for good with this ioend structure. Update the page
155 * state, release holds on bios, and finally free up memory. Do not use the
156 * ioend after this.
f6d6d4fc 157 */
0829c360
CH
158STATIC void
159xfs_destroy_ioend(
0e51a8e1
CH
160 struct xfs_ioend *ioend,
161 int error)
0829c360 162{
37992c18 163 struct inode *inode = ioend->io_inode;
8353a814
CH
164 struct bio *bio = &ioend->io_inline_bio;
165 struct bio *last = ioend->io_bio, *next;
166 u64 start = bio->bi_iter.bi_sector;
167 bool quiet = bio_flagged(bio, BIO_QUIET);
f6d6d4fc 168
0e51a8e1 169 for (bio = &ioend->io_inline_bio; bio; bio = next) {
37992c18
DC
170 struct bio_vec *bvec;
171 int i;
172
0e51a8e1
CH
173 /*
174 * For the last bio, bi_private points to the ioend, so we
175 * need to explicitly end the iteration here.
176 */
177 if (bio == last)
178 next = NULL;
179 else
180 next = bio->bi_private;
583fa586 181
37992c18
DC
182 /* walk each page on bio, ending page IO on them */
183 bio_for_each_segment_all(bvec, bio, i)
184 xfs_finish_page_writeback(inode, bvec, error);
185
186 bio_put(bio);
f6d6d4fc 187 }
8353a814
CH
188
189 if (unlikely(error && !quiet)) {
190 xfs_err_ratelimited(XFS_I(inode)->i_mount,
191 "writeback error on sector %llu", start);
192 }
0829c360
CH
193}
194
fc0063c4
CH
195/*
196 * Fast and loose check if this write could update the on-disk inode size.
197 */
198static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
199{
200 return ioend->io_offset + ioend->io_size >
201 XFS_I(ioend->io_inode)->i_d.di_size;
202}
203
281627df
CH
204STATIC int
205xfs_setfilesize_trans_alloc(
206 struct xfs_ioend *ioend)
207{
208 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
209 struct xfs_trans *tp;
210 int error;
211
253f4911
CH
212 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
213 if (error)
281627df 214 return error;
281627df
CH
215
216 ioend->io_append_trans = tp;
217
d9457dc0 218 /*
437a255a 219 * We may pass freeze protection with a transaction. So tell lockdep
d9457dc0
JK
220 * we released it.
221 */
bee9182d 222 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
281627df
CH
223 /*
224 * We hand off the transaction to the completion thread now, so
225 * clear the flag here.
226 */
9070733b 227 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
281627df
CH
228 return 0;
229}
230
ba87ea69 231/*
2813d682 232 * Update on-disk file size now that data has been written to disk.
ba87ea69 233 */
281627df 234STATIC int
e372843a 235__xfs_setfilesize(
2ba66237
CH
236 struct xfs_inode *ip,
237 struct xfs_trans *tp,
238 xfs_off_t offset,
239 size_t size)
ba87ea69 240{
ba87ea69 241 xfs_fsize_t isize;
ba87ea69 242
aa6bf01d 243 xfs_ilock(ip, XFS_ILOCK_EXCL);
2ba66237 244 isize = xfs_new_eof(ip, offset + size);
281627df
CH
245 if (!isize) {
246 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4906e215 247 xfs_trans_cancel(tp);
281627df 248 return 0;
ba87ea69
LM
249 }
250
2ba66237 251 trace_xfs_setfilesize(ip, offset, size);
281627df
CH
252
253 ip->i_d.di_size = isize;
254 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
255 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
256
70393313 257 return xfs_trans_commit(tp);
77d7a0c2
DC
258}
259
e372843a
CH
260int
261xfs_setfilesize(
262 struct xfs_inode *ip,
263 xfs_off_t offset,
264 size_t size)
265{
266 struct xfs_mount *mp = ip->i_mount;
267 struct xfs_trans *tp;
268 int error;
269
270 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
271 if (error)
272 return error;
273
274 return __xfs_setfilesize(ip, tp, offset, size);
275}
276
2ba66237
CH
277STATIC int
278xfs_setfilesize_ioend(
0e51a8e1
CH
279 struct xfs_ioend *ioend,
280 int error)
2ba66237
CH
281{
282 struct xfs_inode *ip = XFS_I(ioend->io_inode);
283 struct xfs_trans *tp = ioend->io_append_trans;
284
285 /*
286 * The transaction may have been allocated in the I/O submission thread,
287 * thus we need to mark ourselves as being in a transaction manually.
288 * Similarly for freeze protection.
289 */
9070733b 290 current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
bee9182d 291 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
2ba66237 292
5cb13dcd 293 /* we abort the update if there was an IO error */
0e51a8e1 294 if (error) {
5cb13dcd 295 xfs_trans_cancel(tp);
0e51a8e1 296 return error;
5cb13dcd
Z
297 }
298
e372843a 299 return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
2ba66237
CH
300}
301
0829c360 302/*
5ec4fabb 303 * IO write completion.
f6d6d4fc
CH
304 */
305STATIC void
5ec4fabb 306xfs_end_io(
77d7a0c2 307 struct work_struct *work)
0829c360 308{
0e51a8e1
CH
309 struct xfs_ioend *ioend =
310 container_of(work, struct xfs_ioend, io_work);
311 struct xfs_inode *ip = XFS_I(ioend->io_inode);
787eb485
CH
312 xfs_off_t offset = ioend->io_offset;
313 size_t size = ioend->io_size;
4e4cbee9 314 int error;
ba87ea69 315
af055e37 316 /*
787eb485 317 * Just clean up the in-memory strutures if the fs has been shut down.
af055e37 318 */
787eb485 319 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
0e51a8e1 320 error = -EIO;
787eb485
CH
321 goto done;
322 }
04f658ee 323
43caeb18 324 /*
787eb485 325 * Clean up any COW blocks on an I/O error.
43caeb18 326 */
4e4cbee9 327 error = blk_status_to_errno(ioend->io_bio->bi_status);
787eb485
CH
328 if (unlikely(error)) {
329 switch (ioend->io_type) {
330 case XFS_IO_COW:
331 xfs_reflink_cancel_cow_range(ip, offset, size, true);
332 break;
43caeb18 333 }
787eb485
CH
334
335 goto done;
43caeb18
DW
336 }
337
5ec4fabb 338 /*
787eb485 339 * Success: commit the COW or unwritten blocks if needed.
5ec4fabb 340 */
787eb485
CH
341 switch (ioend->io_type) {
342 case XFS_IO_COW:
343 error = xfs_reflink_end_cow(ip, offset, size);
344 break;
345 case XFS_IO_UNWRITTEN:
ee70daab
EG
346 /* writeback should never update isize */
347 error = xfs_iomap_write_unwritten(ip, offset, size, false);
787eb485
CH
348 break;
349 default:
350 ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
351 break;
5ec4fabb 352 }
ba87ea69 353
04f658ee 354done:
787eb485
CH
355 if (ioend->io_append_trans)
356 error = xfs_setfilesize_ioend(ioend, error);
0e51a8e1 357 xfs_destroy_ioend(ioend, error);
c626d174
DC
358}
359
0e51a8e1
CH
360STATIC void
361xfs_end_bio(
362 struct bio *bio)
0829c360 363{
0e51a8e1
CH
364 struct xfs_ioend *ioend = bio->bi_private;
365 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
0829c360 366
43caeb18 367 if (ioend->io_type == XFS_IO_UNWRITTEN || ioend->io_type == XFS_IO_COW)
0e51a8e1
CH
368 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
369 else if (ioend->io_append_trans)
370 queue_work(mp->m_data_workqueue, &ioend->io_work);
371 else
4e4cbee9 372 xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
0829c360
CH
373}
374
1da177e4
LT
375STATIC int
376xfs_map_blocks(
377 struct inode *inode,
378 loff_t offset,
207d0416 379 struct xfs_bmbt_irec *imap,
988ef927 380 int type)
1da177e4 381{
a206c817
CH
382 struct xfs_inode *ip = XFS_I(inode);
383 struct xfs_mount *mp = ip->i_mount;
93407472 384 ssize_t count = i_blocksize(inode);
a206c817
CH
385 xfs_fileoff_t offset_fsb, end_fsb;
386 int error = 0;
a206c817
CH
387 int bmapi_flags = XFS_BMAPI_ENTIRE;
388 int nimaps = 1;
389
390 if (XFS_FORCED_SHUTDOWN(mp))
b474c7ae 391 return -EIO;
a206c817 392
ef473667 393 ASSERT(type != XFS_IO_COW);
0d882a36 394 if (type == XFS_IO_UNWRITTEN)
a206c817 395 bmapi_flags |= XFS_BMAPI_IGSTATE;
8ff2957d 396
988ef927 397 xfs_ilock(ip, XFS_ILOCK_SHARED);
8ff2957d
CH
398 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
399 (ip->i_df.if_flags & XFS_IFEXTENTS));
d2c28191 400 ASSERT(offset <= mp->m_super->s_maxbytes);
8ff2957d 401
d2c28191
DC
402 if (offset + count > mp->m_super->s_maxbytes)
403 count = mp->m_super->s_maxbytes - offset;
a206c817
CH
404 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
405 offset_fsb = XFS_B_TO_FSBT(mp, offset);
5c8ed202
DC
406 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
407 imap, &nimaps, bmapi_flags);
ef473667
DW
408 /*
409 * Truncate an overwrite extent if there's a pending CoW
410 * reservation before the end of this extent. This forces us
411 * to come back to writepage to take care of the CoW.
412 */
413 if (nimaps && type == XFS_IO_OVERWRITE)
414 xfs_reflink_trim_irec_to_next_cow(ip, offset_fsb, imap);
8ff2957d 415 xfs_iunlock(ip, XFS_ILOCK_SHARED);
a206c817 416
8ff2957d 417 if (error)
2451337d 418 return error;
a206c817 419
0d882a36 420 if (type == XFS_IO_DELALLOC &&
8ff2957d 421 (!nimaps || isnullstartblock(imap->br_startblock))) {
60b4984f
DW
422 error = xfs_iomap_write_allocate(ip, XFS_DATA_FORK, offset,
423 imap);
a206c817 424 if (!error)
ef473667 425 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
2451337d 426 return error;
a206c817
CH
427 }
428
8ff2957d 429#ifdef DEBUG
0d882a36 430 if (type == XFS_IO_UNWRITTEN) {
8ff2957d
CH
431 ASSERT(nimaps);
432 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
433 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
434 }
435#endif
436 if (nimaps)
437 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
438 return 0;
1da177e4
LT
439}
440
fbcc0256 441STATIC bool
558e6891 442xfs_imap_valid(
8699bb0a 443 struct inode *inode,
207d0416 444 struct xfs_bmbt_irec *imap,
558e6891 445 xfs_off_t offset)
1da177e4 446{
558e6891 447 offset >>= inode->i_blkbits;
8699bb0a 448
40214d12
BF
449 /*
450 * We have to make sure the cached mapping is within EOF to protect
451 * against eofblocks trimming on file release leaving us with a stale
452 * mapping. Otherwise, a page for a subsequent file extending buffered
453 * write could get picked up by this writeback cycle and written to the
454 * wrong blocks.
455 *
456 * Note that what we really want here is a generic mapping invalidation
457 * mechanism to protect us from arbitrary extent modifying contexts, not
458 * just eofblocks.
459 */
460 xfs_trim_extent_eof(imap, XFS_I(inode));
461
558e6891
CH
462 return offset >= imap->br_startoff &&
463 offset < imap->br_startoff + imap->br_blockcount;
1da177e4
LT
464}
465
f6d6d4fc
CH
466STATIC void
467xfs_start_buffer_writeback(
468 struct buffer_head *bh)
469{
470 ASSERT(buffer_mapped(bh));
471 ASSERT(buffer_locked(bh));
472 ASSERT(!buffer_delay(bh));
473 ASSERT(!buffer_unwritten(bh));
474
8353a814
CH
475 bh->b_end_io = NULL;
476 set_buffer_async_write(bh);
f6d6d4fc
CH
477 set_buffer_uptodate(bh);
478 clear_buffer_dirty(bh);
479}
480
481STATIC void
482xfs_start_page_writeback(
483 struct page *page,
e10de372 484 int clear_dirty)
f6d6d4fc
CH
485{
486 ASSERT(PageLocked(page));
487 ASSERT(!PageWriteback(page));
0d085a52
DC
488
489 /*
490 * if the page was not fully cleaned, we need to ensure that the higher
491 * layers come back to it correctly. That means we need to keep the page
492 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
493 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
494 * write this page in this writeback sweep will be made.
495 */
496 if (clear_dirty) {
92132021 497 clear_page_dirty_for_io(page);
0d085a52
DC
498 set_page_writeback(page);
499 } else
500 set_page_writeback_keepwrite(page);
501
f6d6d4fc 502 unlock_page(page);
f6d6d4fc
CH
503}
504
c7c1a7d8 505static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
f6d6d4fc
CH
506{
507 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
508}
509
510/*
bb18782a
DC
511 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
512 * it, and we submit that bio. The ioend may be used for multiple bio
513 * submissions, so we only want to allocate an append transaction for the ioend
514 * once. In the case of multiple bio submission, each bio will take an IO
515 * reference to the ioend to ensure that the ioend completion is only done once
516 * all bios have been submitted and the ioend is really done.
7bf7f352
DC
517 *
518 * If @fail is non-zero, it means that we have a situation where some part of
519 * the submission process has failed after we have marked paged for writeback
bb18782a
DC
520 * and unlocked them. In this situation, we need to fail the bio and ioend
521 * rather than submit it to IO. This typically only happens on a filesystem
522 * shutdown.
f6d6d4fc 523 */
e10de372 524STATIC int
f6d6d4fc 525xfs_submit_ioend(
06342cf8 526 struct writeback_control *wbc,
0e51a8e1 527 struct xfs_ioend *ioend,
e10de372 528 int status)
f6d6d4fc 529{
5eda4300
DW
530 /* Convert CoW extents to regular */
531 if (!status && ioend->io_type == XFS_IO_COW) {
532 status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
533 ioend->io_offset, ioend->io_size);
534 }
535
e10de372
DC
536 /* Reserve log space if we might write beyond the on-disk inode size. */
537 if (!status &&
0e51a8e1 538 ioend->io_type != XFS_IO_UNWRITTEN &&
bb18782a
DC
539 xfs_ioend_is_append(ioend) &&
540 !ioend->io_append_trans)
e10de372 541 status = xfs_setfilesize_trans_alloc(ioend);
bb18782a 542
0e51a8e1
CH
543 ioend->io_bio->bi_private = ioend;
544 ioend->io_bio->bi_end_io = xfs_end_bio;
7637241e 545 ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
70fd7614 546
e10de372
DC
547 /*
548 * If we are failing the IO now, just mark the ioend with an
549 * error and finish it. This will run IO completion immediately
550 * as there is only one reference to the ioend at this point in
551 * time.
552 */
553 if (status) {
4e4cbee9 554 ioend->io_bio->bi_status = errno_to_blk_status(status);
0e51a8e1 555 bio_endio(ioend->io_bio);
e10de372
DC
556 return status;
557 }
d88992f6 558
31d7d58d 559 ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
4e49ea4a 560 submit_bio(ioend->io_bio);
e10de372 561 return 0;
f6d6d4fc 562}
f6d6d4fc 563
0e51a8e1
CH
564static void
565xfs_init_bio_from_bh(
566 struct bio *bio,
567 struct buffer_head *bh)
568{
569 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
74d46992 570 bio_set_dev(bio, bh->b_bdev);
0e51a8e1 571}
7bf7f352 572
0e51a8e1
CH
573static struct xfs_ioend *
574xfs_alloc_ioend(
575 struct inode *inode,
576 unsigned int type,
577 xfs_off_t offset,
578 struct buffer_head *bh)
579{
580 struct xfs_ioend *ioend;
581 struct bio *bio;
f6d6d4fc 582
0e51a8e1
CH
583 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, xfs_ioend_bioset);
584 xfs_init_bio_from_bh(bio, bh);
585
586 ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
587 INIT_LIST_HEAD(&ioend->io_list);
588 ioend->io_type = type;
589 ioend->io_inode = inode;
590 ioend->io_size = 0;
591 ioend->io_offset = offset;
592 INIT_WORK(&ioend->io_work, xfs_end_io);
593 ioend->io_append_trans = NULL;
594 ioend->io_bio = bio;
595 return ioend;
596}
597
598/*
599 * Allocate a new bio, and chain the old bio to the new one.
600 *
601 * Note that we have to do perform the chaining in this unintuitive order
602 * so that the bi_private linkage is set up in the right direction for the
603 * traversal in xfs_destroy_ioend().
604 */
605static void
606xfs_chain_bio(
607 struct xfs_ioend *ioend,
608 struct writeback_control *wbc,
609 struct buffer_head *bh)
610{
611 struct bio *new;
612
613 new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
614 xfs_init_bio_from_bh(new, bh);
615
616 bio_chain(ioend->io_bio, new);
617 bio_get(ioend->io_bio); /* for xfs_destroy_ioend */
7637241e 618 ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
31d7d58d 619 ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint;
4e49ea4a 620 submit_bio(ioend->io_bio);
0e51a8e1 621 ioend->io_bio = new;
f6d6d4fc
CH
622}
623
624/*
625 * Test to see if we've been building up a completion structure for
626 * earlier buffers -- if so, we try to append to this ioend if we
627 * can, otherwise we finish off any current ioend and start another.
e10de372
DC
628 * Return the ioend we finished off so that the caller can submit it
629 * once it has finished processing the dirty page.
f6d6d4fc
CH
630 */
631STATIC void
632xfs_add_to_ioend(
633 struct inode *inode,
634 struct buffer_head *bh,
7336cea8 635 xfs_off_t offset,
e10de372 636 struct xfs_writepage_ctx *wpc,
bb18782a 637 struct writeback_control *wbc,
e10de372 638 struct list_head *iolist)
f6d6d4fc 639{
fbcc0256 640 if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
0df61da8
DW
641 bh->b_blocknr != wpc->last_block + 1 ||
642 offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
e10de372
DC
643 if (wpc->ioend)
644 list_add(&wpc->ioend->io_list, iolist);
0e51a8e1 645 wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, bh);
f6d6d4fc
CH
646 }
647
0e51a8e1
CH
648 /*
649 * If the buffer doesn't fit into the bio we need to allocate a new
650 * one. This shouldn't happen more than once for a given buffer.
651 */
652 while (xfs_bio_add_buffer(wpc->ioend->io_bio, bh) != bh->b_size)
653 xfs_chain_bio(wpc->ioend, wbc, bh);
bb18782a 654
fbcc0256
DC
655 wpc->ioend->io_size += bh->b_size;
656 wpc->last_block = bh->b_blocknr;
e10de372 657 xfs_start_buffer_writeback(bh);
f6d6d4fc
CH
658}
659
87cbc49c
NS
660STATIC void
661xfs_map_buffer(
046f1685 662 struct inode *inode,
87cbc49c 663 struct buffer_head *bh,
207d0416 664 struct xfs_bmbt_irec *imap,
046f1685 665 xfs_off_t offset)
87cbc49c
NS
666{
667 sector_t bn;
8699bb0a 668 struct xfs_mount *m = XFS_I(inode)->i_mount;
207d0416
CH
669 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
670 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
87cbc49c 671
207d0416
CH
672 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
673 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
87cbc49c 674
e513182d 675 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
8699bb0a 676 ((offset - iomap_offset) >> inode->i_blkbits);
87cbc49c 677
046f1685 678 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
87cbc49c
NS
679
680 bh->b_blocknr = bn;
681 set_buffer_mapped(bh);
682}
683
1da177e4
LT
684STATIC void
685xfs_map_at_offset(
046f1685 686 struct inode *inode,
1da177e4 687 struct buffer_head *bh,
207d0416 688 struct xfs_bmbt_irec *imap,
046f1685 689 xfs_off_t offset)
1da177e4 690{
207d0416
CH
691 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
692 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
1da177e4 693
207d0416 694 xfs_map_buffer(inode, bh, imap, offset);
1da177e4
LT
695 set_buffer_mapped(bh);
696 clear_buffer_delay(bh);
f6d6d4fc 697 clear_buffer_unwritten(bh);
1da177e4
LT
698}
699
1da177e4 700/*
a49935f2
DC
701 * Test if a given page contains at least one buffer of a given @type.
702 * If @check_all_buffers is true, then we walk all the buffers in the page to
703 * try to find one of the type passed in. If it is not set, then the caller only
704 * needs to check the first buffer on the page for a match.
1da177e4 705 */
a49935f2 706STATIC bool
6ffc4db5 707xfs_check_page_type(
10ce4444 708 struct page *page,
a49935f2
DC
709 unsigned int type,
710 bool check_all_buffers)
1da177e4 711{
a49935f2
DC
712 struct buffer_head *bh;
713 struct buffer_head *head;
1da177e4 714
a49935f2
DC
715 if (PageWriteback(page))
716 return false;
717 if (!page->mapping)
718 return false;
719 if (!page_has_buffers(page))
720 return false;
1da177e4 721
a49935f2
DC
722 bh = head = page_buffers(page);
723 do {
724 if (buffer_unwritten(bh)) {
725 if (type == XFS_IO_UNWRITTEN)
726 return true;
727 } else if (buffer_delay(bh)) {
805eeb8e 728 if (type == XFS_IO_DELALLOC)
a49935f2
DC
729 return true;
730 } else if (buffer_dirty(bh) && buffer_mapped(bh)) {
805eeb8e 731 if (type == XFS_IO_OVERWRITE)
a49935f2
DC
732 return true;
733 }
1da177e4 734
a49935f2
DC
735 /* If we are only checking the first buffer, we are done now. */
736 if (!check_all_buffers)
737 break;
738 } while ((bh = bh->b_this_page) != head);
1da177e4 739
a49935f2 740 return false;
1da177e4
LT
741}
742
3ed3a434
DC
743STATIC void
744xfs_vm_invalidatepage(
745 struct page *page,
d47992f8
LC
746 unsigned int offset,
747 unsigned int length)
3ed3a434 748{
34097dfe
LC
749 trace_xfs_invalidatepage(page->mapping->host, page, offset,
750 length);
793d7dbe
DC
751
752 /*
753 * If we are invalidating the entire page, clear the dirty state from it
754 * so that we can check for attempts to release dirty cached pages in
755 * xfs_vm_releasepage().
756 */
757 if (offset == 0 && length >= PAGE_SIZE)
758 cancel_dirty_page(page);
34097dfe 759 block_invalidatepage(page, offset, length);
3ed3a434
DC
760}
761
762/*
763 * If the page has delalloc buffers on it, we need to punch them out before we
764 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
765 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
766 * is done on that same region - the delalloc extent is returned when none is
767 * supposed to be there.
768 *
769 * We prevent this by truncating away the delalloc regions on the page before
770 * invalidating it. Because they are delalloc, we can do this without needing a
771 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
772 * truncation without a transaction as there is no space left for block
773 * reservation (typically why we see a ENOSPC in writeback).
774 *
775 * This is not a performance critical path, so for now just do the punching a
776 * buffer head at a time.
777 */
778STATIC void
779xfs_aops_discard_page(
780 struct page *page)
781{
782 struct inode *inode = page->mapping->host;
783 struct xfs_inode *ip = XFS_I(inode);
784 struct buffer_head *bh, *head;
785 loff_t offset = page_offset(page);
3ed3a434 786
a49935f2 787 if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
3ed3a434
DC
788 goto out_invalidate;
789
e8c3753c
DC
790 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
791 goto out_invalidate;
792
4f10700a 793 xfs_alert(ip->i_mount,
3ed3a434
DC
794 "page discard on page %p, inode 0x%llx, offset %llu.",
795 page, ip->i_ino, offset);
796
797 xfs_ilock(ip, XFS_ILOCK_EXCL);
798 bh = head = page_buffers(page);
799 do {
3ed3a434 800 int error;
c726de44 801 xfs_fileoff_t start_fsb;
3ed3a434
DC
802
803 if (!buffer_delay(bh))
804 goto next_buffer;
805
c726de44
DC
806 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
807 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
3ed3a434
DC
808 if (error) {
809 /* something screwed, just bail */
e8c3753c 810 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
4f10700a 811 xfs_alert(ip->i_mount,
3ed3a434 812 "page discard unable to remove delalloc mapping.");
e8c3753c 813 }
3ed3a434
DC
814 break;
815 }
816next_buffer:
93407472 817 offset += i_blocksize(inode);
3ed3a434
DC
818
819 } while ((bh = bh->b_this_page) != head);
820
821 xfs_iunlock(ip, XFS_ILOCK_EXCL);
822out_invalidate:
09cbfeaf 823 xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
3ed3a434
DC
824 return;
825}
826
ef473667
DW
827static int
828xfs_map_cow(
829 struct xfs_writepage_ctx *wpc,
830 struct inode *inode,
831 loff_t offset,
832 unsigned int *new_type)
833{
834 struct xfs_inode *ip = XFS_I(inode);
835 struct xfs_bmbt_irec imap;
092d5d9d 836 bool is_cow = false;
ef473667
DW
837 int error;
838
839 /*
840 * If we already have a valid COW mapping keep using it.
841 */
842 if (wpc->io_type == XFS_IO_COW) {
843 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, offset);
844 if (wpc->imap_valid) {
845 *new_type = XFS_IO_COW;
846 return 0;
847 }
848 }
849
850 /*
851 * Else we need to check if there is a COW mapping at this offset.
852 */
853 xfs_ilock(ip, XFS_ILOCK_SHARED);
092d5d9d 854 is_cow = xfs_reflink_find_cow_mapping(ip, offset, &imap);
ef473667
DW
855 xfs_iunlock(ip, XFS_ILOCK_SHARED);
856
857 if (!is_cow)
858 return 0;
859
860 /*
861 * And if the COW mapping has a delayed extent here we need to
862 * allocate real space for it now.
863 */
092d5d9d 864 if (isnullstartblock(imap.br_startblock)) {
ef473667
DW
865 error = xfs_iomap_write_allocate(ip, XFS_COW_FORK, offset,
866 &imap);
867 if (error)
868 return error;
869 }
870
871 wpc->io_type = *new_type = XFS_IO_COW;
872 wpc->imap_valid = true;
873 wpc->imap = imap;
874 return 0;
875}
876
e10de372
DC
877/*
878 * We implement an immediate ioend submission policy here to avoid needing to
879 * chain multiple ioends and hence nest mempool allocations which can violate
880 * forward progress guarantees we need to provide. The current ioend we are
881 * adding buffers to is cached on the writepage context, and if the new buffer
882 * does not append to the cached ioend it will create a new ioend and cache that
883 * instead.
884 *
885 * If a new ioend is created and cached, the old ioend is returned and queued
886 * locally for submission once the entire page is processed or an error has been
887 * detected. While ioends are submitted immediately after they are completed,
888 * batching optimisations are provided by higher level block plugging.
889 *
890 * At the end of a writeback pass, there will be a cached ioend remaining on the
891 * writepage context that the caller will need to submit.
892 */
bfce7d2e
DC
893static int
894xfs_writepage_map(
895 struct xfs_writepage_ctx *wpc,
e10de372 896 struct writeback_control *wbc,
bfce7d2e
DC
897 struct inode *inode,
898 struct page *page,
899 loff_t offset,
c8ce540d 900 uint64_t end_offset)
bfce7d2e 901{
e10de372
DC
902 LIST_HEAD(submit_list);
903 struct xfs_ioend *ioend, *next;
bfce7d2e 904 struct buffer_head *bh, *head;
93407472 905 ssize_t len = i_blocksize(inode);
bfce7d2e 906 int error = 0;
bfce7d2e 907 int count = 0;
e10de372 908 int uptodate = 1;
ef473667 909 unsigned int new_type;
bfce7d2e
DC
910
911 bh = head = page_buffers(page);
912 offset = page_offset(page);
bfce7d2e
DC
913 do {
914 if (offset >= end_offset)
915 break;
916 if (!buffer_uptodate(bh))
917 uptodate = 0;
918
919 /*
920 * set_page_dirty dirties all buffers in a page, independent
921 * of their state. The dirty state however is entirely
922 * meaningless for holes (!mapped && uptodate), so skip
923 * buffers covering holes here.
924 */
925 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
926 wpc->imap_valid = false;
927 continue;
928 }
929
ef473667
DW
930 if (buffer_unwritten(bh))
931 new_type = XFS_IO_UNWRITTEN;
932 else if (buffer_delay(bh))
933 new_type = XFS_IO_DELALLOC;
934 else if (buffer_uptodate(bh))
935 new_type = XFS_IO_OVERWRITE;
936 else {
bfce7d2e
DC
937 if (PageUptodate(page))
938 ASSERT(buffer_mapped(bh));
939 /*
940 * This buffer is not uptodate and will not be
941 * written to disk. Ensure that we will put any
942 * subsequent writeable buffers into a new
943 * ioend.
944 */
945 wpc->imap_valid = false;
946 continue;
947 }
948
ef473667
DW
949 if (xfs_is_reflink_inode(XFS_I(inode))) {
950 error = xfs_map_cow(wpc, inode, offset, &new_type);
951 if (error)
952 goto out;
953 }
954
955 if (wpc->io_type != new_type) {
956 wpc->io_type = new_type;
957 wpc->imap_valid = false;
958 }
959
bfce7d2e
DC
960 if (wpc->imap_valid)
961 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
962 offset);
963 if (!wpc->imap_valid) {
964 error = xfs_map_blocks(inode, offset, &wpc->imap,
965 wpc->io_type);
966 if (error)
e10de372 967 goto out;
bfce7d2e
DC
968 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
969 offset);
970 }
971 if (wpc->imap_valid) {
972 lock_buffer(bh);
973 if (wpc->io_type != XFS_IO_OVERWRITE)
974 xfs_map_at_offset(inode, bh, &wpc->imap, offset);
bb18782a 975 xfs_add_to_ioend(inode, bh, offset, wpc, wbc, &submit_list);
bfce7d2e
DC
976 count++;
977 }
978
bfce7d2e
DC
979 } while (offset += len, ((bh = bh->b_this_page) != head));
980
981 if (uptodate && bh == head)
982 SetPageUptodate(page);
983
e10de372 984 ASSERT(wpc->ioend || list_empty(&submit_list));
bfce7d2e 985
e10de372 986out:
bfce7d2e 987 /*
e10de372
DC
988 * On error, we have to fail the ioend here because we have locked
989 * buffers in the ioend. If we don't do this, we'll deadlock
990 * invalidating the page as that tries to lock the buffers on the page.
991 * Also, because we may have set pages under writeback, we have to make
992 * sure we run IO completion to mark the error state of the IO
993 * appropriately, so we can't cancel the ioend directly here. That means
994 * we have to mark this page as under writeback if we included any
995 * buffers from it in the ioend chain so that completion treats it
996 * correctly.
bfce7d2e 997 *
e10de372
DC
998 * If we didn't include the page in the ioend, the on error we can
999 * simply discard and unlock it as there are no other users of the page
1000 * or it's buffers right now. The caller will still need to trigger
1001 * submission of outstanding ioends on the writepage context so they are
1002 * treated correctly on error.
bfce7d2e 1003 */
e10de372
DC
1004 if (count) {
1005 xfs_start_page_writeback(page, !error);
1006
1007 /*
1008 * Preserve the original error if there was one, otherwise catch
1009 * submission errors here and propagate into subsequent ioend
1010 * submissions.
1011 */
1012 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
1013 int error2;
1014
1015 list_del_init(&ioend->io_list);
1016 error2 = xfs_submit_ioend(wbc, ioend, error);
1017 if (error2 && !error)
1018 error = error2;
1019 }
1020 } else if (error) {
bfce7d2e
DC
1021 xfs_aops_discard_page(page);
1022 ClearPageUptodate(page);
1023 unlock_page(page);
e10de372
DC
1024 } else {
1025 /*
1026 * We can end up here with no error and nothing to write if we
1027 * race with a partial page truncate on a sub-page block sized
1028 * filesystem. In that case we need to mark the page clean.
1029 */
1030 xfs_start_page_writeback(page, 1);
1031 end_page_writeback(page);
bfce7d2e 1032 }
e10de372 1033
bfce7d2e
DC
1034 mapping_set_error(page->mapping, error);
1035 return error;
1036}
1037
1da177e4 1038/*
89f3b363
CH
1039 * Write out a dirty page.
1040 *
1041 * For delalloc space on the page we need to allocate space and flush it.
1042 * For unwritten space on the page we need to start the conversion to
1043 * regular allocated space.
89f3b363 1044 * For any other dirty buffer heads on the page we should flush them.
1da177e4 1045 */
1da177e4 1046STATIC int
fbcc0256 1047xfs_do_writepage(
89f3b363 1048 struct page *page,
fbcc0256
DC
1049 struct writeback_control *wbc,
1050 void *data)
1da177e4 1051{
fbcc0256 1052 struct xfs_writepage_ctx *wpc = data;
89f3b363 1053 struct inode *inode = page->mapping->host;
1da177e4 1054 loff_t offset;
c8ce540d 1055 uint64_t end_offset;
ad68972a 1056 pgoff_t end_index;
89f3b363 1057
34097dfe 1058 trace_xfs_writepage(inode, page, 0, 0);
89f3b363 1059
20cb52eb
CH
1060 ASSERT(page_has_buffers(page));
1061
89f3b363
CH
1062 /*
1063 * Refuse to write the page out if we are called from reclaim context.
1064 *
d4f7a5cb
CH
1065 * This avoids stack overflows when called from deeply used stacks in
1066 * random callers for direct reclaim or memcg reclaim. We explicitly
1067 * allow reclaim from kswapd as the stack usage there is relatively low.
89f3b363 1068 *
94054fa3
MG
1069 * This should never happen except in the case of a VM regression so
1070 * warn about it.
89f3b363 1071 */
94054fa3
MG
1072 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1073 PF_MEMALLOC))
b5420f23 1074 goto redirty;
1da177e4 1075
89f3b363 1076 /*
680a647b
CH
1077 * Given that we do not allow direct reclaim to call us, we should
1078 * never be called while in a filesystem transaction.
89f3b363 1079 */
9070733b 1080 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
b5420f23 1081 goto redirty;
89f3b363 1082
8695d27e 1083 /*
ad68972a
DC
1084 * Is this page beyond the end of the file?
1085 *
8695d27e
JL
1086 * The page index is less than the end_index, adjust the end_offset
1087 * to the highest offset that this page should represent.
1088 * -----------------------------------------------------
1089 * | file mapping | <EOF> |
1090 * -----------------------------------------------------
1091 * | Page ... | Page N-2 | Page N-1 | Page N | |
1092 * ^--------------------------------^----------|--------
1093 * | desired writeback range | see else |
1094 * ---------------------------------^------------------|
1095 */
ad68972a 1096 offset = i_size_read(inode);
09cbfeaf 1097 end_index = offset >> PAGE_SHIFT;
8695d27e 1098 if (page->index < end_index)
09cbfeaf 1099 end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
8695d27e
JL
1100 else {
1101 /*
1102 * Check whether the page to write out is beyond or straddles
1103 * i_size or not.
1104 * -------------------------------------------------------
1105 * | file mapping | <EOF> |
1106 * -------------------------------------------------------
1107 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1108 * ^--------------------------------^-----------|---------
1109 * | | Straddles |
1110 * ---------------------------------^-----------|--------|
1111 */
09cbfeaf 1112 unsigned offset_into_page = offset & (PAGE_SIZE - 1);
6b7a03f0
CH
1113
1114 /*
ff9a28f6
JK
1115 * Skip the page if it is fully outside i_size, e.g. due to a
1116 * truncate operation that is in progress. We must redirty the
1117 * page so that reclaim stops reclaiming it. Otherwise
1118 * xfs_vm_releasepage() is called on it and gets confused.
8695d27e
JL
1119 *
1120 * Note that the end_index is unsigned long, it would overflow
1121 * if the given offset is greater than 16TB on 32-bit system
1122 * and if we do check the page is fully outside i_size or not
1123 * via "if (page->index >= end_index + 1)" as "end_index + 1"
1124 * will be evaluated to 0. Hence this page will be redirtied
1125 * and be written out repeatedly which would result in an
1126 * infinite loop, the user program that perform this operation
1127 * will hang. Instead, we can verify this situation by checking
1128 * if the page to write is totally beyond the i_size or if it's
1129 * offset is just equal to the EOF.
6b7a03f0 1130 */
8695d27e
JL
1131 if (page->index > end_index ||
1132 (page->index == end_index && offset_into_page == 0))
ff9a28f6 1133 goto redirty;
6b7a03f0
CH
1134
1135 /*
1136 * The page straddles i_size. It must be zeroed out on each
1137 * and every writepage invocation because it may be mmapped.
1138 * "A file is mapped in multiples of the page size. For a file
8695d27e 1139 * that is not a multiple of the page size, the remaining
6b7a03f0
CH
1140 * memory is zeroed when mapped, and writes to that region are
1141 * not written out to the file."
1142 */
09cbfeaf 1143 zero_user_segment(page, offset_into_page, PAGE_SIZE);
8695d27e
JL
1144
1145 /* Adjust the end_offset to the end of file */
1146 end_offset = offset;
1da177e4
LT
1147 }
1148
e10de372 1149 return xfs_writepage_map(wpc, wbc, inode, page, offset, end_offset);
f51623b2 1150
b5420f23 1151redirty:
f51623b2
NS
1152 redirty_page_for_writepage(wbc, page);
1153 unlock_page(page);
1154 return 0;
f51623b2
NS
1155}
1156
fbcc0256
DC
1157STATIC int
1158xfs_vm_writepage(
1159 struct page *page,
1160 struct writeback_control *wbc)
1161{
1162 struct xfs_writepage_ctx wpc = {
1163 .io_type = XFS_IO_INVALID,
1164 };
1165 int ret;
1166
1167 ret = xfs_do_writepage(page, wbc, &wpc);
e10de372
DC
1168 if (wpc.ioend)
1169 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1170 return ret;
fbcc0256
DC
1171}
1172
7d4fb40a
NS
1173STATIC int
1174xfs_vm_writepages(
1175 struct address_space *mapping,
1176 struct writeback_control *wbc)
1177{
fbcc0256
DC
1178 struct xfs_writepage_ctx wpc = {
1179 .io_type = XFS_IO_INVALID,
1180 };
1181 int ret;
1182
b3aea4ed 1183 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
7f6d5b52
RZ
1184 if (dax_mapping(mapping))
1185 return dax_writeback_mapping_range(mapping,
1186 xfs_find_bdev_for_inode(mapping->host), wbc);
1187
fbcc0256 1188 ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
e10de372
DC
1189 if (wpc.ioend)
1190 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1191 return ret;
7d4fb40a
NS
1192}
1193
f51623b2
NS
1194/*
1195 * Called to move a page into cleanable state - and from there
89f3b363 1196 * to be released. The page should already be clean. We always
f51623b2
NS
1197 * have buffer heads in this call.
1198 *
89f3b363 1199 * Returns 1 if the page is ok to release, 0 otherwise.
f51623b2
NS
1200 */
1201STATIC int
238f4c54 1202xfs_vm_releasepage(
f51623b2
NS
1203 struct page *page,
1204 gfp_t gfp_mask)
1205{
20cb52eb 1206 int delalloc, unwritten;
f51623b2 1207
34097dfe 1208 trace_xfs_releasepage(page->mapping->host, page, 0, 0);
238f4c54 1209
99579cce
BF
1210 /*
1211 * mm accommodates an old ext3 case where clean pages might not have had
1212 * the dirty bit cleared. Thus, it can send actual dirty pages to
1213 * ->releasepage() via shrink_active_list(). Conversely,
793d7dbe
DC
1214 * block_invalidatepage() can send pages that are still marked dirty but
1215 * otherwise have invalidated buffers.
99579cce 1216 *
0a417b8d 1217 * We want to release the latter to avoid unnecessary buildup of the
793d7dbe
DC
1218 * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages
1219 * that are entirely invalidated and need to be released. Hence the
1220 * only time we should get dirty pages here is through
1221 * shrink_active_list() and so we can simply skip those now.
1222 *
1223 * warn if we've left any lingering delalloc/unwritten buffers on clean
1224 * or invalidated pages we are about to release.
99579cce 1225 */
793d7dbe
DC
1226 if (PageDirty(page))
1227 return 0;
1228
20cb52eb 1229 xfs_count_page_state(page, &delalloc, &unwritten);
f51623b2 1230
793d7dbe 1231 if (WARN_ON_ONCE(delalloc))
f51623b2 1232 return 0;
793d7dbe 1233 if (WARN_ON_ONCE(unwritten))
f51623b2
NS
1234 return 0;
1235
f51623b2
NS
1236 return try_to_free_buffers(page);
1237}
1238
1fdca9c2
DC
1239/*
1240 * If this is O_DIRECT or the mpage code calling tell them how large the mapping
1241 * is, so that we can avoid repeated get_blocks calls.
1242 *
1243 * If the mapping spans EOF, then we have to break the mapping up as the mapping
1244 * for blocks beyond EOF must be marked new so that sub block regions can be
1245 * correctly zeroed. We can't do this for mappings within EOF unless the mapping
1246 * was just allocated or is unwritten, otherwise the callers would overwrite
1247 * existing data with zeros. Hence we have to split the mapping into a range up
1248 * to and including EOF, and a second mapping for beyond EOF.
1249 */
1250static void
1251xfs_map_trim_size(
1252 struct inode *inode,
1253 sector_t iblock,
1254 struct buffer_head *bh_result,
1255 struct xfs_bmbt_irec *imap,
1256 xfs_off_t offset,
1257 ssize_t size)
1258{
1259 xfs_off_t mapping_size;
1260
1261 mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
1262 mapping_size <<= inode->i_blkbits;
1263
1264 ASSERT(mapping_size > 0);
1265 if (mapping_size > size)
1266 mapping_size = size;
1267 if (offset < i_size_read(inode) &&
1268 offset + mapping_size >= i_size_read(inode)) {
1269 /* limit mapping to block that spans EOF */
1270 mapping_size = roundup_64(i_size_read(inode) - offset,
93407472 1271 i_blocksize(inode));
1fdca9c2
DC
1272 }
1273 if (mapping_size > LONG_MAX)
1274 mapping_size = LONG_MAX;
1275
1276 bh_result->b_size = mapping_size;
1277}
1278
0613f16c 1279static int
acdda3aa 1280xfs_get_blocks(
1da177e4
LT
1281 struct inode *inode,
1282 sector_t iblock,
1da177e4 1283 struct buffer_head *bh_result,
acdda3aa 1284 int create)
1da177e4 1285{
a206c817
CH
1286 struct xfs_inode *ip = XFS_I(inode);
1287 struct xfs_mount *mp = ip->i_mount;
1288 xfs_fileoff_t offset_fsb, end_fsb;
1289 int error = 0;
1290 int lockmode = 0;
207d0416 1291 struct xfs_bmbt_irec imap;
a206c817 1292 int nimaps = 1;
fdc7ed75
NS
1293 xfs_off_t offset;
1294 ssize_t size;
a206c817 1295
acdda3aa 1296 BUG_ON(create);
6e8a27a8 1297
a206c817 1298 if (XFS_FORCED_SHUTDOWN(mp))
b474c7ae 1299 return -EIO;
1da177e4 1300
fdc7ed75 1301 offset = (xfs_off_t)iblock << inode->i_blkbits;
93407472 1302 ASSERT(bh_result->b_size >= i_blocksize(inode));
c2536668 1303 size = bh_result->b_size;
364f358a 1304
acdda3aa 1305 if (offset >= i_size_read(inode))
364f358a
LM
1306 return 0;
1307
507630b2
DC
1308 /*
1309 * Direct I/O is usually done on preallocated files, so try getting
6e8a27a8 1310 * a block mapping without an exclusive lock first.
507630b2 1311 */
6e8a27a8 1312 lockmode = xfs_ilock_data_map_shared(ip);
f2bde9b8 1313
d2c28191
DC
1314 ASSERT(offset <= mp->m_super->s_maxbytes);
1315 if (offset + size > mp->m_super->s_maxbytes)
1316 size = mp->m_super->s_maxbytes - offset;
a206c817
CH
1317 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1318 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1319
acdda3aa
CH
1320 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1321 &imap, &nimaps, XFS_BMAPI_ENTIRE);
1da177e4 1322 if (error)
a206c817
CH
1323 goto out_unlock;
1324
acdda3aa 1325 if (nimaps) {
d5cc2e3f 1326 trace_xfs_get_blocks_found(ip, offset, size,
63fbb4c1
CH
1327 imap.br_state == XFS_EXT_UNWRITTEN ?
1328 XFS_IO_UNWRITTEN : XFS_IO_OVERWRITE, &imap);
507630b2 1329 xfs_iunlock(ip, lockmode);
a206c817
CH
1330 } else {
1331 trace_xfs_get_blocks_notfound(ip, offset, size);
1332 goto out_unlock;
1333 }
1da177e4 1334
1fdca9c2 1335 /* trim mapping down to size requested */
6e8a27a8 1336 xfs_map_trim_size(inode, iblock, bh_result, &imap, offset, size);
1fdca9c2 1337
a719370b
DC
1338 /*
1339 * For unwritten extents do not report a disk address in the buffered
1340 * read case (treat as if we're reading into a hole).
1341 */
9c4f29d3 1342 if (xfs_bmap_is_real_extent(&imap))
a719370b 1343 xfs_map_buffer(inode, bh_result, &imap, offset);
1da177e4 1344
c2536668
NS
1345 /*
1346 * If this is a realtime file, data may be on a different device.
1347 * to that pointed to from the buffer_head b_bdev currently.
1348 */
046f1685 1349 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1da177e4 1350 return 0;
a206c817
CH
1351
1352out_unlock:
1353 xfs_iunlock(ip, lockmode);
2451337d 1354 return error;
1da177e4
LT
1355}
1356
c19b104a
CH
1357STATIC ssize_t
1358xfs_vm_direct_IO(
6e1ba0bc 1359 struct kiocb *iocb,
c8b8e32d 1360 struct iov_iter *iter)
6e1ba0bc 1361{
58e59854 1362 /*
fa8d972d 1363 * We just need the method present so that open/fcntl allow direct I/O.
58e59854 1364 */
fa8d972d 1365 return -EINVAL;
f51623b2 1366}
1da177e4
LT
1367
1368STATIC sector_t
e4c573bb 1369xfs_vm_bmap(
1da177e4
LT
1370 struct address_space *mapping,
1371 sector_t block)
1372{
1373 struct inode *inode = (struct inode *)mapping->host;
739bfb2a 1374 struct xfs_inode *ip = XFS_I(inode);
1da177e4 1375
cca28fb8 1376 trace_xfs_vm_bmap(XFS_I(inode));
db1327b1
DW
1377
1378 /*
1379 * The swap code (ab-)uses ->bmap to get a block mapping and then
1380 * bypasseѕ the file system for actual I/O. We really can't allow
1381 * that on reflinks inodes, so we have to skip out here. And yes,
eb5e248d
DW
1382 * 0 is the magic code for a bmap error.
1383 *
1384 * Since we don't pass back blockdev info, we can't return bmap
1385 * information for rt files either.
db1327b1 1386 */
eb5e248d 1387 if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip))
db1327b1 1388 return 0;
65523218 1389
4bc1ea6b 1390 filemap_write_and_wait(mapping);
c2536668 1391 return generic_block_bmap(mapping, block, xfs_get_blocks);
1da177e4
LT
1392}
1393
1394STATIC int
e4c573bb 1395xfs_vm_readpage(
1da177e4
LT
1396 struct file *unused,
1397 struct page *page)
1398{
121e213e 1399 trace_xfs_vm_readpage(page->mapping->host, 1);
c2536668 1400 return mpage_readpage(page, xfs_get_blocks);
1da177e4
LT
1401}
1402
1403STATIC int
e4c573bb 1404xfs_vm_readpages(
1da177e4
LT
1405 struct file *unused,
1406 struct address_space *mapping,
1407 struct list_head *pages,
1408 unsigned nr_pages)
1409{
121e213e 1410 trace_xfs_vm_readpages(mapping->host, nr_pages);
c2536668 1411 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1da177e4
LT
1412}
1413
22e757a4
DC
1414/*
1415 * This is basically a copy of __set_page_dirty_buffers() with one
1416 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
1417 * dirty, we'll never be able to clean them because we don't write buffers
1418 * beyond EOF, and that means we can't invalidate pages that span EOF
1419 * that have been marked dirty. Further, the dirty state can leak into
1420 * the file interior if the file is extended, resulting in all sorts of
1421 * bad things happening as the state does not match the underlying data.
1422 *
1423 * XXX: this really indicates that bufferheads in XFS need to die. Warts like
1424 * this only exist because of bufferheads and how the generic code manages them.
1425 */
1426STATIC int
1427xfs_vm_set_page_dirty(
1428 struct page *page)
1429{
1430 struct address_space *mapping = page->mapping;
1431 struct inode *inode = mapping->host;
1432 loff_t end_offset;
1433 loff_t offset;
1434 int newly_dirty;
1435
1436 if (unlikely(!mapping))
1437 return !TestSetPageDirty(page);
1438
1439 end_offset = i_size_read(inode);
1440 offset = page_offset(page);
1441
1442 spin_lock(&mapping->private_lock);
1443 if (page_has_buffers(page)) {
1444 struct buffer_head *head = page_buffers(page);
1445 struct buffer_head *bh = head;
1446
1447 do {
1448 if (offset < end_offset)
1449 set_buffer_dirty(bh);
1450 bh = bh->b_this_page;
93407472 1451 offset += i_blocksize(inode);
22e757a4
DC
1452 } while (bh != head);
1453 }
c4843a75 1454 /*
81f8c3a4
JW
1455 * Lock out page->mem_cgroup migration to keep PageDirty
1456 * synchronized with per-memcg dirty page counters.
c4843a75 1457 */
62cccb8c 1458 lock_page_memcg(page);
22e757a4
DC
1459 newly_dirty = !TestSetPageDirty(page);
1460 spin_unlock(&mapping->private_lock);
1461
1462 if (newly_dirty) {
1463 /* sigh - __set_page_dirty() is static, so copy it here, too */
1464 unsigned long flags;
1465
1466 spin_lock_irqsave(&mapping->tree_lock, flags);
1467 if (page->mapping) { /* Race with truncate? */
1468 WARN_ON_ONCE(!PageUptodate(page));
62cccb8c 1469 account_page_dirtied(page, mapping);
22e757a4
DC
1470 radix_tree_tag_set(&mapping->page_tree,
1471 page_index(page), PAGECACHE_TAG_DIRTY);
1472 }
1473 spin_unlock_irqrestore(&mapping->tree_lock, flags);
22e757a4 1474 }
62cccb8c 1475 unlock_page_memcg(page);
c4843a75
GT
1476 if (newly_dirty)
1477 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
22e757a4
DC
1478 return newly_dirty;
1479}
1480
f5e54d6e 1481const struct address_space_operations xfs_address_space_operations = {
e4c573bb
NS
1482 .readpage = xfs_vm_readpage,
1483 .readpages = xfs_vm_readpages,
1484 .writepage = xfs_vm_writepage,
7d4fb40a 1485 .writepages = xfs_vm_writepages,
22e757a4 1486 .set_page_dirty = xfs_vm_set_page_dirty,
238f4c54
NS
1487 .releasepage = xfs_vm_releasepage,
1488 .invalidatepage = xfs_vm_invalidatepage,
e4c573bb
NS
1489 .bmap = xfs_vm_bmap,
1490 .direct_IO = xfs_vm_direct_IO,
e965f963 1491 .migratepage = buffer_migrate_page,
bddaafa1 1492 .is_partially_uptodate = block_is_partially_uptodate,
aa261f54 1493 .error_remove_page = generic_error_remove_page,
1da177e4 1494};