block: switch bios to blk_status_t
[linux-block.git] / fs / xfs / xfs_aops.c
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
70a9883c 19#include "xfs_shared.h"
239880ef
DC
20#include "xfs_format.h"
21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h"
1da177e4 23#include "xfs_mount.h"
1da177e4 24#include "xfs_inode.h"
239880ef 25#include "xfs_trans.h"
281627df 26#include "xfs_inode_item.h"
a844f451 27#include "xfs_alloc.h"
1da177e4 28#include "xfs_error.h"
1da177e4 29#include "xfs_iomap.h"
0b1b213f 30#include "xfs_trace.h"
3ed3a434 31#include "xfs_bmap.h"
68988114 32#include "xfs_bmap_util.h"
a4fbe6ab 33#include "xfs_bmap_btree.h"
ef473667 34#include "xfs_reflink.h"
5a0e3ad6 35#include <linux/gfp.h>
1da177e4 36#include <linux/mpage.h>
10ce4444 37#include <linux/pagevec.h>
1da177e4
LT
38#include <linux/writeback.h>
39
fbcc0256
DC
40/*
41 * structure owned by writepages passed to individual writepage calls
42 */
43struct xfs_writepage_ctx {
44 struct xfs_bmbt_irec imap;
45 bool imap_valid;
46 unsigned int io_type;
fbcc0256
DC
47 struct xfs_ioend *ioend;
48 sector_t last_block;
49};
50
0b1b213f 51void
f51623b2
NS
52xfs_count_page_state(
53 struct page *page,
54 int *delalloc,
f51623b2
NS
55 int *unwritten)
56{
57 struct buffer_head *bh, *head;
58
20cb52eb 59 *delalloc = *unwritten = 0;
f51623b2
NS
60
61 bh = head = page_buffers(page);
62 do {
20cb52eb 63 if (buffer_unwritten(bh))
f51623b2
NS
64 (*unwritten) = 1;
65 else if (buffer_delay(bh))
66 (*delalloc) = 1;
67 } while ((bh = bh->b_this_page) != head);
68}
69
20a90f58 70struct block_device *
6214ed44 71xfs_find_bdev_for_inode(
046f1685 72 struct inode *inode)
6214ed44 73{
046f1685 74 struct xfs_inode *ip = XFS_I(inode);
6214ed44
CH
75 struct xfs_mount *mp = ip->i_mount;
76
71ddabb9 77 if (XFS_IS_REALTIME_INODE(ip))
6214ed44
CH
78 return mp->m_rtdev_targp->bt_bdev;
79 else
80 return mp->m_ddev_targp->bt_bdev;
81}
82
f6d6d4fc 83/*
37992c18
DC
84 * We're now finished for good with this page. Update the page state via the
85 * associated buffer_heads, paying attention to the start and end offsets that
86 * we need to process on the page.
28b783e4
DC
87 *
88 * Landmine Warning: bh->b_end_io() will call end_page_writeback() on the last
89 * buffer in the IO. Once it does this, it is unsafe to access the bufferhead or
90 * the page at all, as we may be racing with memory reclaim and it can free both
91 * the bufferhead chain and the page as it will see the page as clean and
92 * unused.
37992c18
DC
93 */
94static void
95xfs_finish_page_writeback(
96 struct inode *inode,
97 struct bio_vec *bvec,
98 int error)
99{
37992c18 100 unsigned int end = bvec->bv_offset + bvec->bv_len - 1;
28b783e4 101 struct buffer_head *head, *bh, *next;
37992c18 102 unsigned int off = 0;
28b783e4 103 unsigned int bsize;
37992c18
DC
104
105 ASSERT(bvec->bv_offset < PAGE_SIZE);
93407472 106 ASSERT((bvec->bv_offset & (i_blocksize(inode) - 1)) == 0);
37992c18 107 ASSERT(end < PAGE_SIZE);
93407472 108 ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0);
37992c18
DC
109
110 bh = head = page_buffers(bvec->bv_page);
111
28b783e4 112 bsize = bh->b_size;
37992c18 113 do {
161f55ef
EG
114 if (off > end)
115 break;
28b783e4 116 next = bh->b_this_page;
37992c18
DC
117 if (off < bvec->bv_offset)
118 goto next_bh;
37992c18
DC
119 bh->b_end_io(bh, !error);
120next_bh:
28b783e4
DC
121 off += bsize;
122 } while ((bh = next) != head);
37992c18
DC
123}
124
125/*
126 * We're now finished for good with this ioend structure. Update the page
127 * state, release holds on bios, and finally free up memory. Do not use the
128 * ioend after this.
f6d6d4fc 129 */
0829c360
CH
130STATIC void
131xfs_destroy_ioend(
0e51a8e1
CH
132 struct xfs_ioend *ioend,
133 int error)
0829c360 134{
37992c18 135 struct inode *inode = ioend->io_inode;
0e51a8e1 136 struct bio *last = ioend->io_bio;
37992c18 137 struct bio *bio, *next;
f6d6d4fc 138
0e51a8e1 139 for (bio = &ioend->io_inline_bio; bio; bio = next) {
37992c18
DC
140 struct bio_vec *bvec;
141 int i;
142
0e51a8e1
CH
143 /*
144 * For the last bio, bi_private points to the ioend, so we
145 * need to explicitly end the iteration here.
146 */
147 if (bio == last)
148 next = NULL;
149 else
150 next = bio->bi_private;
583fa586 151
37992c18
DC
152 /* walk each page on bio, ending page IO on them */
153 bio_for_each_segment_all(bvec, bio, i)
154 xfs_finish_page_writeback(inode, bvec, error);
155
156 bio_put(bio);
f6d6d4fc 157 }
0829c360
CH
158}
159
fc0063c4
CH
160/*
161 * Fast and loose check if this write could update the on-disk inode size.
162 */
163static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
164{
165 return ioend->io_offset + ioend->io_size >
166 XFS_I(ioend->io_inode)->i_d.di_size;
167}
168
281627df
CH
169STATIC int
170xfs_setfilesize_trans_alloc(
171 struct xfs_ioend *ioend)
172{
173 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
174 struct xfs_trans *tp;
175 int error;
176
253f4911
CH
177 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
178 if (error)
281627df 179 return error;
281627df
CH
180
181 ioend->io_append_trans = tp;
182
d9457dc0 183 /*
437a255a 184 * We may pass freeze protection with a transaction. So tell lockdep
d9457dc0
JK
185 * we released it.
186 */
bee9182d 187 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
281627df
CH
188 /*
189 * We hand off the transaction to the completion thread now, so
190 * clear the flag here.
191 */
9070733b 192 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
281627df
CH
193 return 0;
194}
195
ba87ea69 196/*
2813d682 197 * Update on-disk file size now that data has been written to disk.
ba87ea69 198 */
281627df 199STATIC int
e372843a 200__xfs_setfilesize(
2ba66237
CH
201 struct xfs_inode *ip,
202 struct xfs_trans *tp,
203 xfs_off_t offset,
204 size_t size)
ba87ea69 205{
ba87ea69 206 xfs_fsize_t isize;
ba87ea69 207
aa6bf01d 208 xfs_ilock(ip, XFS_ILOCK_EXCL);
2ba66237 209 isize = xfs_new_eof(ip, offset + size);
281627df
CH
210 if (!isize) {
211 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4906e215 212 xfs_trans_cancel(tp);
281627df 213 return 0;
ba87ea69
LM
214 }
215
2ba66237 216 trace_xfs_setfilesize(ip, offset, size);
281627df
CH
217
218 ip->i_d.di_size = isize;
219 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
220 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
221
70393313 222 return xfs_trans_commit(tp);
77d7a0c2
DC
223}
224
e372843a
CH
225int
226xfs_setfilesize(
227 struct xfs_inode *ip,
228 xfs_off_t offset,
229 size_t size)
230{
231 struct xfs_mount *mp = ip->i_mount;
232 struct xfs_trans *tp;
233 int error;
234
235 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
236 if (error)
237 return error;
238
239 return __xfs_setfilesize(ip, tp, offset, size);
240}
241
2ba66237
CH
242STATIC int
243xfs_setfilesize_ioend(
0e51a8e1
CH
244 struct xfs_ioend *ioend,
245 int error)
2ba66237
CH
246{
247 struct xfs_inode *ip = XFS_I(ioend->io_inode);
248 struct xfs_trans *tp = ioend->io_append_trans;
249
250 /*
251 * The transaction may have been allocated in the I/O submission thread,
252 * thus we need to mark ourselves as being in a transaction manually.
253 * Similarly for freeze protection.
254 */
9070733b 255 current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
bee9182d 256 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
2ba66237 257
5cb13dcd 258 /* we abort the update if there was an IO error */
0e51a8e1 259 if (error) {
5cb13dcd 260 xfs_trans_cancel(tp);
0e51a8e1 261 return error;
5cb13dcd
Z
262 }
263
e372843a 264 return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
2ba66237
CH
265}
266
0829c360 267/*
5ec4fabb 268 * IO write completion.
f6d6d4fc
CH
269 */
270STATIC void
5ec4fabb 271xfs_end_io(
77d7a0c2 272 struct work_struct *work)
0829c360 273{
0e51a8e1
CH
274 struct xfs_ioend *ioend =
275 container_of(work, struct xfs_ioend, io_work);
276 struct xfs_inode *ip = XFS_I(ioend->io_inode);
787eb485
CH
277 xfs_off_t offset = ioend->io_offset;
278 size_t size = ioend->io_size;
4e4cbee9 279 int error;
ba87ea69 280
af055e37 281 /*
787eb485 282 * Just clean up the in-memory strutures if the fs has been shut down.
af055e37 283 */
787eb485 284 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
0e51a8e1 285 error = -EIO;
787eb485
CH
286 goto done;
287 }
04f658ee 288
43caeb18 289 /*
787eb485 290 * Clean up any COW blocks on an I/O error.
43caeb18 291 */
4e4cbee9 292 error = blk_status_to_errno(ioend->io_bio->bi_status);
787eb485
CH
293 if (unlikely(error)) {
294 switch (ioend->io_type) {
295 case XFS_IO_COW:
296 xfs_reflink_cancel_cow_range(ip, offset, size, true);
297 break;
43caeb18 298 }
787eb485
CH
299
300 goto done;
43caeb18
DW
301 }
302
5ec4fabb 303 /*
787eb485 304 * Success: commit the COW or unwritten blocks if needed.
5ec4fabb 305 */
787eb485
CH
306 switch (ioend->io_type) {
307 case XFS_IO_COW:
308 error = xfs_reflink_end_cow(ip, offset, size);
309 break;
310 case XFS_IO_UNWRITTEN:
311 error = xfs_iomap_write_unwritten(ip, offset, size);
312 break;
313 default:
314 ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
315 break;
5ec4fabb 316 }
ba87ea69 317
04f658ee 318done:
787eb485
CH
319 if (ioend->io_append_trans)
320 error = xfs_setfilesize_ioend(ioend, error);
0e51a8e1 321 xfs_destroy_ioend(ioend, error);
c626d174
DC
322}
323
0e51a8e1
CH
324STATIC void
325xfs_end_bio(
326 struct bio *bio)
0829c360 327{
0e51a8e1
CH
328 struct xfs_ioend *ioend = bio->bi_private;
329 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
0829c360 330
43caeb18 331 if (ioend->io_type == XFS_IO_UNWRITTEN || ioend->io_type == XFS_IO_COW)
0e51a8e1
CH
332 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
333 else if (ioend->io_append_trans)
334 queue_work(mp->m_data_workqueue, &ioend->io_work);
335 else
4e4cbee9 336 xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
0829c360
CH
337}
338
1da177e4
LT
339STATIC int
340xfs_map_blocks(
341 struct inode *inode,
342 loff_t offset,
207d0416 343 struct xfs_bmbt_irec *imap,
988ef927 344 int type)
1da177e4 345{
a206c817
CH
346 struct xfs_inode *ip = XFS_I(inode);
347 struct xfs_mount *mp = ip->i_mount;
93407472 348 ssize_t count = i_blocksize(inode);
a206c817
CH
349 xfs_fileoff_t offset_fsb, end_fsb;
350 int error = 0;
a206c817
CH
351 int bmapi_flags = XFS_BMAPI_ENTIRE;
352 int nimaps = 1;
353
354 if (XFS_FORCED_SHUTDOWN(mp))
b474c7ae 355 return -EIO;
a206c817 356
ef473667 357 ASSERT(type != XFS_IO_COW);
0d882a36 358 if (type == XFS_IO_UNWRITTEN)
a206c817 359 bmapi_flags |= XFS_BMAPI_IGSTATE;
8ff2957d 360
988ef927 361 xfs_ilock(ip, XFS_ILOCK_SHARED);
8ff2957d
CH
362 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
363 (ip->i_df.if_flags & XFS_IFEXTENTS));
d2c28191 364 ASSERT(offset <= mp->m_super->s_maxbytes);
8ff2957d 365
d2c28191
DC
366 if (offset + count > mp->m_super->s_maxbytes)
367 count = mp->m_super->s_maxbytes - offset;
a206c817
CH
368 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
369 offset_fsb = XFS_B_TO_FSBT(mp, offset);
5c8ed202
DC
370 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
371 imap, &nimaps, bmapi_flags);
ef473667
DW
372 /*
373 * Truncate an overwrite extent if there's a pending CoW
374 * reservation before the end of this extent. This forces us
375 * to come back to writepage to take care of the CoW.
376 */
377 if (nimaps && type == XFS_IO_OVERWRITE)
378 xfs_reflink_trim_irec_to_next_cow(ip, offset_fsb, imap);
8ff2957d 379 xfs_iunlock(ip, XFS_ILOCK_SHARED);
a206c817 380
8ff2957d 381 if (error)
2451337d 382 return error;
a206c817 383
0d882a36 384 if (type == XFS_IO_DELALLOC &&
8ff2957d 385 (!nimaps || isnullstartblock(imap->br_startblock))) {
60b4984f
DW
386 error = xfs_iomap_write_allocate(ip, XFS_DATA_FORK, offset,
387 imap);
a206c817 388 if (!error)
ef473667 389 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
2451337d 390 return error;
a206c817
CH
391 }
392
8ff2957d 393#ifdef DEBUG
0d882a36 394 if (type == XFS_IO_UNWRITTEN) {
8ff2957d
CH
395 ASSERT(nimaps);
396 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
397 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
398 }
399#endif
400 if (nimaps)
401 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
402 return 0;
1da177e4
LT
403}
404
fbcc0256 405STATIC bool
558e6891 406xfs_imap_valid(
8699bb0a 407 struct inode *inode,
207d0416 408 struct xfs_bmbt_irec *imap,
558e6891 409 xfs_off_t offset)
1da177e4 410{
558e6891 411 offset >>= inode->i_blkbits;
8699bb0a 412
558e6891
CH
413 return offset >= imap->br_startoff &&
414 offset < imap->br_startoff + imap->br_blockcount;
1da177e4
LT
415}
416
f6d6d4fc
CH
417STATIC void
418xfs_start_buffer_writeback(
419 struct buffer_head *bh)
420{
421 ASSERT(buffer_mapped(bh));
422 ASSERT(buffer_locked(bh));
423 ASSERT(!buffer_delay(bh));
424 ASSERT(!buffer_unwritten(bh));
425
426 mark_buffer_async_write(bh);
427 set_buffer_uptodate(bh);
428 clear_buffer_dirty(bh);
429}
430
431STATIC void
432xfs_start_page_writeback(
433 struct page *page,
e10de372 434 int clear_dirty)
f6d6d4fc
CH
435{
436 ASSERT(PageLocked(page));
437 ASSERT(!PageWriteback(page));
0d085a52
DC
438
439 /*
440 * if the page was not fully cleaned, we need to ensure that the higher
441 * layers come back to it correctly. That means we need to keep the page
442 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
443 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
444 * write this page in this writeback sweep will be made.
445 */
446 if (clear_dirty) {
92132021 447 clear_page_dirty_for_io(page);
0d085a52
DC
448 set_page_writeback(page);
449 } else
450 set_page_writeback_keepwrite(page);
451
f6d6d4fc 452 unlock_page(page);
f6d6d4fc
CH
453}
454
c7c1a7d8 455static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
f6d6d4fc
CH
456{
457 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
458}
459
460/*
bb18782a
DC
461 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
462 * it, and we submit that bio. The ioend may be used for multiple bio
463 * submissions, so we only want to allocate an append transaction for the ioend
464 * once. In the case of multiple bio submission, each bio will take an IO
465 * reference to the ioend to ensure that the ioend completion is only done once
466 * all bios have been submitted and the ioend is really done.
7bf7f352
DC
467 *
468 * If @fail is non-zero, it means that we have a situation where some part of
469 * the submission process has failed after we have marked paged for writeback
bb18782a
DC
470 * and unlocked them. In this situation, we need to fail the bio and ioend
471 * rather than submit it to IO. This typically only happens on a filesystem
472 * shutdown.
f6d6d4fc 473 */
e10de372 474STATIC int
f6d6d4fc 475xfs_submit_ioend(
06342cf8 476 struct writeback_control *wbc,
0e51a8e1 477 struct xfs_ioend *ioend,
e10de372 478 int status)
f6d6d4fc 479{
5eda4300
DW
480 /* Convert CoW extents to regular */
481 if (!status && ioend->io_type == XFS_IO_COW) {
482 status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
483 ioend->io_offset, ioend->io_size);
484 }
485
e10de372
DC
486 /* Reserve log space if we might write beyond the on-disk inode size. */
487 if (!status &&
0e51a8e1 488 ioend->io_type != XFS_IO_UNWRITTEN &&
bb18782a
DC
489 xfs_ioend_is_append(ioend) &&
490 !ioend->io_append_trans)
e10de372 491 status = xfs_setfilesize_trans_alloc(ioend);
bb18782a 492
0e51a8e1
CH
493 ioend->io_bio->bi_private = ioend;
494 ioend->io_bio->bi_end_io = xfs_end_bio;
7637241e 495 ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
70fd7614 496
e10de372
DC
497 /*
498 * If we are failing the IO now, just mark the ioend with an
499 * error and finish it. This will run IO completion immediately
500 * as there is only one reference to the ioend at this point in
501 * time.
502 */
503 if (status) {
4e4cbee9 504 ioend->io_bio->bi_status = errno_to_blk_status(status);
0e51a8e1 505 bio_endio(ioend->io_bio);
e10de372
DC
506 return status;
507 }
d88992f6 508
4e49ea4a 509 submit_bio(ioend->io_bio);
e10de372 510 return 0;
f6d6d4fc 511}
f6d6d4fc 512
0e51a8e1
CH
513static void
514xfs_init_bio_from_bh(
515 struct bio *bio,
516 struct buffer_head *bh)
517{
518 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
519 bio->bi_bdev = bh->b_bdev;
520}
7bf7f352 521
0e51a8e1
CH
522static struct xfs_ioend *
523xfs_alloc_ioend(
524 struct inode *inode,
525 unsigned int type,
526 xfs_off_t offset,
527 struct buffer_head *bh)
528{
529 struct xfs_ioend *ioend;
530 struct bio *bio;
f6d6d4fc 531
0e51a8e1
CH
532 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, xfs_ioend_bioset);
533 xfs_init_bio_from_bh(bio, bh);
534
535 ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
536 INIT_LIST_HEAD(&ioend->io_list);
537 ioend->io_type = type;
538 ioend->io_inode = inode;
539 ioend->io_size = 0;
540 ioend->io_offset = offset;
541 INIT_WORK(&ioend->io_work, xfs_end_io);
542 ioend->io_append_trans = NULL;
543 ioend->io_bio = bio;
544 return ioend;
545}
546
547/*
548 * Allocate a new bio, and chain the old bio to the new one.
549 *
550 * Note that we have to do perform the chaining in this unintuitive order
551 * so that the bi_private linkage is set up in the right direction for the
552 * traversal in xfs_destroy_ioend().
553 */
554static void
555xfs_chain_bio(
556 struct xfs_ioend *ioend,
557 struct writeback_control *wbc,
558 struct buffer_head *bh)
559{
560 struct bio *new;
561
562 new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
563 xfs_init_bio_from_bh(new, bh);
564
565 bio_chain(ioend->io_bio, new);
566 bio_get(ioend->io_bio); /* for xfs_destroy_ioend */
7637241e 567 ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
4e49ea4a 568 submit_bio(ioend->io_bio);
0e51a8e1 569 ioend->io_bio = new;
f6d6d4fc
CH
570}
571
572/*
573 * Test to see if we've been building up a completion structure for
574 * earlier buffers -- if so, we try to append to this ioend if we
575 * can, otherwise we finish off any current ioend and start another.
e10de372
DC
576 * Return the ioend we finished off so that the caller can submit it
577 * once it has finished processing the dirty page.
f6d6d4fc
CH
578 */
579STATIC void
580xfs_add_to_ioend(
581 struct inode *inode,
582 struct buffer_head *bh,
7336cea8 583 xfs_off_t offset,
e10de372 584 struct xfs_writepage_ctx *wpc,
bb18782a 585 struct writeback_control *wbc,
e10de372 586 struct list_head *iolist)
f6d6d4fc 587{
fbcc0256 588 if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
0df61da8
DW
589 bh->b_blocknr != wpc->last_block + 1 ||
590 offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
e10de372
DC
591 if (wpc->ioend)
592 list_add(&wpc->ioend->io_list, iolist);
0e51a8e1 593 wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, bh);
f6d6d4fc
CH
594 }
595
0e51a8e1
CH
596 /*
597 * If the buffer doesn't fit into the bio we need to allocate a new
598 * one. This shouldn't happen more than once for a given buffer.
599 */
600 while (xfs_bio_add_buffer(wpc->ioend->io_bio, bh) != bh->b_size)
601 xfs_chain_bio(wpc->ioend, wbc, bh);
bb18782a 602
fbcc0256
DC
603 wpc->ioend->io_size += bh->b_size;
604 wpc->last_block = bh->b_blocknr;
e10de372 605 xfs_start_buffer_writeback(bh);
f6d6d4fc
CH
606}
607
87cbc49c
NS
608STATIC void
609xfs_map_buffer(
046f1685 610 struct inode *inode,
87cbc49c 611 struct buffer_head *bh,
207d0416 612 struct xfs_bmbt_irec *imap,
046f1685 613 xfs_off_t offset)
87cbc49c
NS
614{
615 sector_t bn;
8699bb0a 616 struct xfs_mount *m = XFS_I(inode)->i_mount;
207d0416
CH
617 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
618 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
87cbc49c 619
207d0416
CH
620 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
621 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
87cbc49c 622
e513182d 623 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
8699bb0a 624 ((offset - iomap_offset) >> inode->i_blkbits);
87cbc49c 625
046f1685 626 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
87cbc49c
NS
627
628 bh->b_blocknr = bn;
629 set_buffer_mapped(bh);
630}
631
1da177e4
LT
632STATIC void
633xfs_map_at_offset(
046f1685 634 struct inode *inode,
1da177e4 635 struct buffer_head *bh,
207d0416 636 struct xfs_bmbt_irec *imap,
046f1685 637 xfs_off_t offset)
1da177e4 638{
207d0416
CH
639 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
640 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
1da177e4 641
207d0416 642 xfs_map_buffer(inode, bh, imap, offset);
1da177e4
LT
643 set_buffer_mapped(bh);
644 clear_buffer_delay(bh);
f6d6d4fc 645 clear_buffer_unwritten(bh);
1da177e4
LT
646}
647
1da177e4 648/*
a49935f2
DC
649 * Test if a given page contains at least one buffer of a given @type.
650 * If @check_all_buffers is true, then we walk all the buffers in the page to
651 * try to find one of the type passed in. If it is not set, then the caller only
652 * needs to check the first buffer on the page for a match.
1da177e4 653 */
a49935f2 654STATIC bool
6ffc4db5 655xfs_check_page_type(
10ce4444 656 struct page *page,
a49935f2
DC
657 unsigned int type,
658 bool check_all_buffers)
1da177e4 659{
a49935f2
DC
660 struct buffer_head *bh;
661 struct buffer_head *head;
1da177e4 662
a49935f2
DC
663 if (PageWriteback(page))
664 return false;
665 if (!page->mapping)
666 return false;
667 if (!page_has_buffers(page))
668 return false;
1da177e4 669
a49935f2
DC
670 bh = head = page_buffers(page);
671 do {
672 if (buffer_unwritten(bh)) {
673 if (type == XFS_IO_UNWRITTEN)
674 return true;
675 } else if (buffer_delay(bh)) {
805eeb8e 676 if (type == XFS_IO_DELALLOC)
a49935f2
DC
677 return true;
678 } else if (buffer_dirty(bh) && buffer_mapped(bh)) {
805eeb8e 679 if (type == XFS_IO_OVERWRITE)
a49935f2
DC
680 return true;
681 }
1da177e4 682
a49935f2
DC
683 /* If we are only checking the first buffer, we are done now. */
684 if (!check_all_buffers)
685 break;
686 } while ((bh = bh->b_this_page) != head);
1da177e4 687
a49935f2 688 return false;
1da177e4
LT
689}
690
3ed3a434
DC
691STATIC void
692xfs_vm_invalidatepage(
693 struct page *page,
d47992f8
LC
694 unsigned int offset,
695 unsigned int length)
3ed3a434 696{
34097dfe
LC
697 trace_xfs_invalidatepage(page->mapping->host, page, offset,
698 length);
699 block_invalidatepage(page, offset, length);
3ed3a434
DC
700}
701
702/*
703 * If the page has delalloc buffers on it, we need to punch them out before we
704 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
705 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
706 * is done on that same region - the delalloc extent is returned when none is
707 * supposed to be there.
708 *
709 * We prevent this by truncating away the delalloc regions on the page before
710 * invalidating it. Because they are delalloc, we can do this without needing a
711 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
712 * truncation without a transaction as there is no space left for block
713 * reservation (typically why we see a ENOSPC in writeback).
714 *
715 * This is not a performance critical path, so for now just do the punching a
716 * buffer head at a time.
717 */
718STATIC void
719xfs_aops_discard_page(
720 struct page *page)
721{
722 struct inode *inode = page->mapping->host;
723 struct xfs_inode *ip = XFS_I(inode);
724 struct buffer_head *bh, *head;
725 loff_t offset = page_offset(page);
3ed3a434 726
a49935f2 727 if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
3ed3a434
DC
728 goto out_invalidate;
729
e8c3753c
DC
730 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
731 goto out_invalidate;
732
4f10700a 733 xfs_alert(ip->i_mount,
3ed3a434
DC
734 "page discard on page %p, inode 0x%llx, offset %llu.",
735 page, ip->i_ino, offset);
736
737 xfs_ilock(ip, XFS_ILOCK_EXCL);
738 bh = head = page_buffers(page);
739 do {
3ed3a434 740 int error;
c726de44 741 xfs_fileoff_t start_fsb;
3ed3a434
DC
742
743 if (!buffer_delay(bh))
744 goto next_buffer;
745
c726de44
DC
746 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
747 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
3ed3a434
DC
748 if (error) {
749 /* something screwed, just bail */
e8c3753c 750 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
4f10700a 751 xfs_alert(ip->i_mount,
3ed3a434 752 "page discard unable to remove delalloc mapping.");
e8c3753c 753 }
3ed3a434
DC
754 break;
755 }
756next_buffer:
93407472 757 offset += i_blocksize(inode);
3ed3a434
DC
758
759 } while ((bh = bh->b_this_page) != head);
760
761 xfs_iunlock(ip, XFS_ILOCK_EXCL);
762out_invalidate:
09cbfeaf 763 xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
3ed3a434
DC
764 return;
765}
766
ef473667
DW
767static int
768xfs_map_cow(
769 struct xfs_writepage_ctx *wpc,
770 struct inode *inode,
771 loff_t offset,
772 unsigned int *new_type)
773{
774 struct xfs_inode *ip = XFS_I(inode);
775 struct xfs_bmbt_irec imap;
092d5d9d 776 bool is_cow = false;
ef473667
DW
777 int error;
778
779 /*
780 * If we already have a valid COW mapping keep using it.
781 */
782 if (wpc->io_type == XFS_IO_COW) {
783 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, offset);
784 if (wpc->imap_valid) {
785 *new_type = XFS_IO_COW;
786 return 0;
787 }
788 }
789
790 /*
791 * Else we need to check if there is a COW mapping at this offset.
792 */
793 xfs_ilock(ip, XFS_ILOCK_SHARED);
092d5d9d 794 is_cow = xfs_reflink_find_cow_mapping(ip, offset, &imap);
ef473667
DW
795 xfs_iunlock(ip, XFS_ILOCK_SHARED);
796
797 if (!is_cow)
798 return 0;
799
800 /*
801 * And if the COW mapping has a delayed extent here we need to
802 * allocate real space for it now.
803 */
092d5d9d 804 if (isnullstartblock(imap.br_startblock)) {
ef473667
DW
805 error = xfs_iomap_write_allocate(ip, XFS_COW_FORK, offset,
806 &imap);
807 if (error)
808 return error;
809 }
810
811 wpc->io_type = *new_type = XFS_IO_COW;
812 wpc->imap_valid = true;
813 wpc->imap = imap;
814 return 0;
815}
816
e10de372
DC
817/*
818 * We implement an immediate ioend submission policy here to avoid needing to
819 * chain multiple ioends and hence nest mempool allocations which can violate
820 * forward progress guarantees we need to provide. The current ioend we are
821 * adding buffers to is cached on the writepage context, and if the new buffer
822 * does not append to the cached ioend it will create a new ioend and cache that
823 * instead.
824 *
825 * If a new ioend is created and cached, the old ioend is returned and queued
826 * locally for submission once the entire page is processed or an error has been
827 * detected. While ioends are submitted immediately after they are completed,
828 * batching optimisations are provided by higher level block plugging.
829 *
830 * At the end of a writeback pass, there will be a cached ioend remaining on the
831 * writepage context that the caller will need to submit.
832 */
bfce7d2e
DC
833static int
834xfs_writepage_map(
835 struct xfs_writepage_ctx *wpc,
e10de372 836 struct writeback_control *wbc,
bfce7d2e
DC
837 struct inode *inode,
838 struct page *page,
839 loff_t offset,
840 __uint64_t end_offset)
841{
e10de372
DC
842 LIST_HEAD(submit_list);
843 struct xfs_ioend *ioend, *next;
bfce7d2e 844 struct buffer_head *bh, *head;
93407472 845 ssize_t len = i_blocksize(inode);
bfce7d2e 846 int error = 0;
bfce7d2e 847 int count = 0;
e10de372 848 int uptodate = 1;
ef473667 849 unsigned int new_type;
bfce7d2e
DC
850
851 bh = head = page_buffers(page);
852 offset = page_offset(page);
bfce7d2e
DC
853 do {
854 if (offset >= end_offset)
855 break;
856 if (!buffer_uptodate(bh))
857 uptodate = 0;
858
859 /*
860 * set_page_dirty dirties all buffers in a page, independent
861 * of their state. The dirty state however is entirely
862 * meaningless for holes (!mapped && uptodate), so skip
863 * buffers covering holes here.
864 */
865 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
866 wpc->imap_valid = false;
867 continue;
868 }
869
ef473667
DW
870 if (buffer_unwritten(bh))
871 new_type = XFS_IO_UNWRITTEN;
872 else if (buffer_delay(bh))
873 new_type = XFS_IO_DELALLOC;
874 else if (buffer_uptodate(bh))
875 new_type = XFS_IO_OVERWRITE;
876 else {
bfce7d2e
DC
877 if (PageUptodate(page))
878 ASSERT(buffer_mapped(bh));
879 /*
880 * This buffer is not uptodate and will not be
881 * written to disk. Ensure that we will put any
882 * subsequent writeable buffers into a new
883 * ioend.
884 */
885 wpc->imap_valid = false;
886 continue;
887 }
888
ef473667
DW
889 if (xfs_is_reflink_inode(XFS_I(inode))) {
890 error = xfs_map_cow(wpc, inode, offset, &new_type);
891 if (error)
892 goto out;
893 }
894
895 if (wpc->io_type != new_type) {
896 wpc->io_type = new_type;
897 wpc->imap_valid = false;
898 }
899
bfce7d2e
DC
900 if (wpc->imap_valid)
901 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
902 offset);
903 if (!wpc->imap_valid) {
904 error = xfs_map_blocks(inode, offset, &wpc->imap,
905 wpc->io_type);
906 if (error)
e10de372 907 goto out;
bfce7d2e
DC
908 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
909 offset);
910 }
911 if (wpc->imap_valid) {
912 lock_buffer(bh);
913 if (wpc->io_type != XFS_IO_OVERWRITE)
914 xfs_map_at_offset(inode, bh, &wpc->imap, offset);
bb18782a 915 xfs_add_to_ioend(inode, bh, offset, wpc, wbc, &submit_list);
bfce7d2e
DC
916 count++;
917 }
918
bfce7d2e
DC
919 } while (offset += len, ((bh = bh->b_this_page) != head));
920
921 if (uptodate && bh == head)
922 SetPageUptodate(page);
923
e10de372 924 ASSERT(wpc->ioend || list_empty(&submit_list));
bfce7d2e 925
e10de372 926out:
bfce7d2e 927 /*
e10de372
DC
928 * On error, we have to fail the ioend here because we have locked
929 * buffers in the ioend. If we don't do this, we'll deadlock
930 * invalidating the page as that tries to lock the buffers on the page.
931 * Also, because we may have set pages under writeback, we have to make
932 * sure we run IO completion to mark the error state of the IO
933 * appropriately, so we can't cancel the ioend directly here. That means
934 * we have to mark this page as under writeback if we included any
935 * buffers from it in the ioend chain so that completion treats it
936 * correctly.
bfce7d2e 937 *
e10de372
DC
938 * If we didn't include the page in the ioend, the on error we can
939 * simply discard and unlock it as there are no other users of the page
940 * or it's buffers right now. The caller will still need to trigger
941 * submission of outstanding ioends on the writepage context so they are
942 * treated correctly on error.
bfce7d2e 943 */
e10de372
DC
944 if (count) {
945 xfs_start_page_writeback(page, !error);
946
947 /*
948 * Preserve the original error if there was one, otherwise catch
949 * submission errors here and propagate into subsequent ioend
950 * submissions.
951 */
952 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
953 int error2;
954
955 list_del_init(&ioend->io_list);
956 error2 = xfs_submit_ioend(wbc, ioend, error);
957 if (error2 && !error)
958 error = error2;
959 }
960 } else if (error) {
bfce7d2e
DC
961 xfs_aops_discard_page(page);
962 ClearPageUptodate(page);
963 unlock_page(page);
e10de372
DC
964 } else {
965 /*
966 * We can end up here with no error and nothing to write if we
967 * race with a partial page truncate on a sub-page block sized
968 * filesystem. In that case we need to mark the page clean.
969 */
970 xfs_start_page_writeback(page, 1);
971 end_page_writeback(page);
bfce7d2e 972 }
e10de372 973
bfce7d2e
DC
974 mapping_set_error(page->mapping, error);
975 return error;
976}
977
1da177e4 978/*
89f3b363
CH
979 * Write out a dirty page.
980 *
981 * For delalloc space on the page we need to allocate space and flush it.
982 * For unwritten space on the page we need to start the conversion to
983 * regular allocated space.
89f3b363 984 * For any other dirty buffer heads on the page we should flush them.
1da177e4 985 */
1da177e4 986STATIC int
fbcc0256 987xfs_do_writepage(
89f3b363 988 struct page *page,
fbcc0256
DC
989 struct writeback_control *wbc,
990 void *data)
1da177e4 991{
fbcc0256 992 struct xfs_writepage_ctx *wpc = data;
89f3b363 993 struct inode *inode = page->mapping->host;
1da177e4 994 loff_t offset;
1da177e4 995 __uint64_t end_offset;
ad68972a 996 pgoff_t end_index;
89f3b363 997
34097dfe 998 trace_xfs_writepage(inode, page, 0, 0);
89f3b363 999
20cb52eb
CH
1000 ASSERT(page_has_buffers(page));
1001
89f3b363
CH
1002 /*
1003 * Refuse to write the page out if we are called from reclaim context.
1004 *
d4f7a5cb
CH
1005 * This avoids stack overflows when called from deeply used stacks in
1006 * random callers for direct reclaim or memcg reclaim. We explicitly
1007 * allow reclaim from kswapd as the stack usage there is relatively low.
89f3b363 1008 *
94054fa3
MG
1009 * This should never happen except in the case of a VM regression so
1010 * warn about it.
89f3b363 1011 */
94054fa3
MG
1012 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1013 PF_MEMALLOC))
b5420f23 1014 goto redirty;
1da177e4 1015
89f3b363 1016 /*
680a647b
CH
1017 * Given that we do not allow direct reclaim to call us, we should
1018 * never be called while in a filesystem transaction.
89f3b363 1019 */
9070733b 1020 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
b5420f23 1021 goto redirty;
89f3b363 1022
8695d27e 1023 /*
ad68972a
DC
1024 * Is this page beyond the end of the file?
1025 *
8695d27e
JL
1026 * The page index is less than the end_index, adjust the end_offset
1027 * to the highest offset that this page should represent.
1028 * -----------------------------------------------------
1029 * | file mapping | <EOF> |
1030 * -----------------------------------------------------
1031 * | Page ... | Page N-2 | Page N-1 | Page N | |
1032 * ^--------------------------------^----------|--------
1033 * | desired writeback range | see else |
1034 * ---------------------------------^------------------|
1035 */
ad68972a 1036 offset = i_size_read(inode);
09cbfeaf 1037 end_index = offset >> PAGE_SHIFT;
8695d27e 1038 if (page->index < end_index)
09cbfeaf 1039 end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
8695d27e
JL
1040 else {
1041 /*
1042 * Check whether the page to write out is beyond or straddles
1043 * i_size or not.
1044 * -------------------------------------------------------
1045 * | file mapping | <EOF> |
1046 * -------------------------------------------------------
1047 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1048 * ^--------------------------------^-----------|---------
1049 * | | Straddles |
1050 * ---------------------------------^-----------|--------|
1051 */
09cbfeaf 1052 unsigned offset_into_page = offset & (PAGE_SIZE - 1);
6b7a03f0
CH
1053
1054 /*
ff9a28f6
JK
1055 * Skip the page if it is fully outside i_size, e.g. due to a
1056 * truncate operation that is in progress. We must redirty the
1057 * page so that reclaim stops reclaiming it. Otherwise
1058 * xfs_vm_releasepage() is called on it and gets confused.
8695d27e
JL
1059 *
1060 * Note that the end_index is unsigned long, it would overflow
1061 * if the given offset is greater than 16TB on 32-bit system
1062 * and if we do check the page is fully outside i_size or not
1063 * via "if (page->index >= end_index + 1)" as "end_index + 1"
1064 * will be evaluated to 0. Hence this page will be redirtied
1065 * and be written out repeatedly which would result in an
1066 * infinite loop, the user program that perform this operation
1067 * will hang. Instead, we can verify this situation by checking
1068 * if the page to write is totally beyond the i_size or if it's
1069 * offset is just equal to the EOF.
6b7a03f0 1070 */
8695d27e
JL
1071 if (page->index > end_index ||
1072 (page->index == end_index && offset_into_page == 0))
ff9a28f6 1073 goto redirty;
6b7a03f0
CH
1074
1075 /*
1076 * The page straddles i_size. It must be zeroed out on each
1077 * and every writepage invocation because it may be mmapped.
1078 * "A file is mapped in multiples of the page size. For a file
8695d27e 1079 * that is not a multiple of the page size, the remaining
6b7a03f0
CH
1080 * memory is zeroed when mapped, and writes to that region are
1081 * not written out to the file."
1082 */
09cbfeaf 1083 zero_user_segment(page, offset_into_page, PAGE_SIZE);
8695d27e
JL
1084
1085 /* Adjust the end_offset to the end of file */
1086 end_offset = offset;
1da177e4
LT
1087 }
1088
e10de372 1089 return xfs_writepage_map(wpc, wbc, inode, page, offset, end_offset);
f51623b2 1090
b5420f23 1091redirty:
f51623b2
NS
1092 redirty_page_for_writepage(wbc, page);
1093 unlock_page(page);
1094 return 0;
f51623b2
NS
1095}
1096
fbcc0256
DC
1097STATIC int
1098xfs_vm_writepage(
1099 struct page *page,
1100 struct writeback_control *wbc)
1101{
1102 struct xfs_writepage_ctx wpc = {
1103 .io_type = XFS_IO_INVALID,
1104 };
1105 int ret;
1106
1107 ret = xfs_do_writepage(page, wbc, &wpc);
e10de372
DC
1108 if (wpc.ioend)
1109 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1110 return ret;
fbcc0256
DC
1111}
1112
7d4fb40a
NS
1113STATIC int
1114xfs_vm_writepages(
1115 struct address_space *mapping,
1116 struct writeback_control *wbc)
1117{
fbcc0256
DC
1118 struct xfs_writepage_ctx wpc = {
1119 .io_type = XFS_IO_INVALID,
1120 };
1121 int ret;
1122
b3aea4ed 1123 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
7f6d5b52
RZ
1124 if (dax_mapping(mapping))
1125 return dax_writeback_mapping_range(mapping,
1126 xfs_find_bdev_for_inode(mapping->host), wbc);
1127
fbcc0256 1128 ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
e10de372
DC
1129 if (wpc.ioend)
1130 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1131 return ret;
7d4fb40a
NS
1132}
1133
f51623b2
NS
1134/*
1135 * Called to move a page into cleanable state - and from there
89f3b363 1136 * to be released. The page should already be clean. We always
f51623b2
NS
1137 * have buffer heads in this call.
1138 *
89f3b363 1139 * Returns 1 if the page is ok to release, 0 otherwise.
f51623b2
NS
1140 */
1141STATIC int
238f4c54 1142xfs_vm_releasepage(
f51623b2
NS
1143 struct page *page,
1144 gfp_t gfp_mask)
1145{
20cb52eb 1146 int delalloc, unwritten;
f51623b2 1147
34097dfe 1148 trace_xfs_releasepage(page->mapping->host, page, 0, 0);
238f4c54 1149
99579cce
BF
1150 /*
1151 * mm accommodates an old ext3 case where clean pages might not have had
1152 * the dirty bit cleared. Thus, it can send actual dirty pages to
1153 * ->releasepage() via shrink_active_list(). Conversely,
1154 * block_invalidatepage() can send pages that are still marked dirty
1155 * but otherwise have invalidated buffers.
1156 *
0a417b8d
JK
1157 * We want to release the latter to avoid unnecessary buildup of the
1158 * LRU, skip the former and warn if we've left any lingering
1159 * delalloc/unwritten buffers on clean pages. Skip pages with delalloc
1160 * or unwritten buffers and warn if the page is not dirty. Otherwise
1161 * try to release the buffers.
99579cce 1162 */
20cb52eb 1163 xfs_count_page_state(page, &delalloc, &unwritten);
f51623b2 1164
0a417b8d
JK
1165 if (delalloc) {
1166 WARN_ON_ONCE(!PageDirty(page));
f51623b2 1167 return 0;
0a417b8d
JK
1168 }
1169 if (unwritten) {
1170 WARN_ON_ONCE(!PageDirty(page));
f51623b2 1171 return 0;
0a417b8d 1172 }
f51623b2 1173
f51623b2
NS
1174 return try_to_free_buffers(page);
1175}
1176
1fdca9c2
DC
1177/*
1178 * If this is O_DIRECT or the mpage code calling tell them how large the mapping
1179 * is, so that we can avoid repeated get_blocks calls.
1180 *
1181 * If the mapping spans EOF, then we have to break the mapping up as the mapping
1182 * for blocks beyond EOF must be marked new so that sub block regions can be
1183 * correctly zeroed. We can't do this for mappings within EOF unless the mapping
1184 * was just allocated or is unwritten, otherwise the callers would overwrite
1185 * existing data with zeros. Hence we have to split the mapping into a range up
1186 * to and including EOF, and a second mapping for beyond EOF.
1187 */
1188static void
1189xfs_map_trim_size(
1190 struct inode *inode,
1191 sector_t iblock,
1192 struct buffer_head *bh_result,
1193 struct xfs_bmbt_irec *imap,
1194 xfs_off_t offset,
1195 ssize_t size)
1196{
1197 xfs_off_t mapping_size;
1198
1199 mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
1200 mapping_size <<= inode->i_blkbits;
1201
1202 ASSERT(mapping_size > 0);
1203 if (mapping_size > size)
1204 mapping_size = size;
1205 if (offset < i_size_read(inode) &&
1206 offset + mapping_size >= i_size_read(inode)) {
1207 /* limit mapping to block that spans EOF */
1208 mapping_size = roundup_64(i_size_read(inode) - offset,
93407472 1209 i_blocksize(inode));
1fdca9c2
DC
1210 }
1211 if (mapping_size > LONG_MAX)
1212 mapping_size = LONG_MAX;
1213
1214 bh_result->b_size = mapping_size;
1215}
1216
0613f16c 1217static int
acdda3aa 1218xfs_get_blocks(
1da177e4
LT
1219 struct inode *inode,
1220 sector_t iblock,
1da177e4 1221 struct buffer_head *bh_result,
acdda3aa 1222 int create)
1da177e4 1223{
a206c817
CH
1224 struct xfs_inode *ip = XFS_I(inode);
1225 struct xfs_mount *mp = ip->i_mount;
1226 xfs_fileoff_t offset_fsb, end_fsb;
1227 int error = 0;
1228 int lockmode = 0;
207d0416 1229 struct xfs_bmbt_irec imap;
a206c817 1230 int nimaps = 1;
fdc7ed75
NS
1231 xfs_off_t offset;
1232 ssize_t size;
a206c817 1233
acdda3aa 1234 BUG_ON(create);
6e8a27a8 1235
a206c817 1236 if (XFS_FORCED_SHUTDOWN(mp))
b474c7ae 1237 return -EIO;
1da177e4 1238
fdc7ed75 1239 offset = (xfs_off_t)iblock << inode->i_blkbits;
93407472 1240 ASSERT(bh_result->b_size >= i_blocksize(inode));
c2536668 1241 size = bh_result->b_size;
364f358a 1242
acdda3aa 1243 if (offset >= i_size_read(inode))
364f358a
LM
1244 return 0;
1245
507630b2
DC
1246 /*
1247 * Direct I/O is usually done on preallocated files, so try getting
6e8a27a8 1248 * a block mapping without an exclusive lock first.
507630b2 1249 */
6e8a27a8 1250 lockmode = xfs_ilock_data_map_shared(ip);
f2bde9b8 1251
d2c28191
DC
1252 ASSERT(offset <= mp->m_super->s_maxbytes);
1253 if (offset + size > mp->m_super->s_maxbytes)
1254 size = mp->m_super->s_maxbytes - offset;
a206c817
CH
1255 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1256 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1257
acdda3aa
CH
1258 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1259 &imap, &nimaps, XFS_BMAPI_ENTIRE);
1da177e4 1260 if (error)
a206c817
CH
1261 goto out_unlock;
1262
acdda3aa 1263 if (nimaps) {
d5cc2e3f 1264 trace_xfs_get_blocks_found(ip, offset, size,
63fbb4c1
CH
1265 imap.br_state == XFS_EXT_UNWRITTEN ?
1266 XFS_IO_UNWRITTEN : XFS_IO_OVERWRITE, &imap);
507630b2 1267 xfs_iunlock(ip, lockmode);
a206c817
CH
1268 } else {
1269 trace_xfs_get_blocks_notfound(ip, offset, size);
1270 goto out_unlock;
1271 }
1da177e4 1272
1fdca9c2 1273 /* trim mapping down to size requested */
6e8a27a8 1274 xfs_map_trim_size(inode, iblock, bh_result, &imap, offset, size);
1fdca9c2 1275
a719370b
DC
1276 /*
1277 * For unwritten extents do not report a disk address in the buffered
1278 * read case (treat as if we're reading into a hole).
1279 */
9c4f29d3 1280 if (xfs_bmap_is_real_extent(&imap))
a719370b 1281 xfs_map_buffer(inode, bh_result, &imap, offset);
1da177e4 1282
c2536668
NS
1283 /*
1284 * If this is a realtime file, data may be on a different device.
1285 * to that pointed to from the buffer_head b_bdev currently.
1286 */
046f1685 1287 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1da177e4 1288 return 0;
a206c817
CH
1289
1290out_unlock:
1291 xfs_iunlock(ip, lockmode);
2451337d 1292 return error;
1da177e4
LT
1293}
1294
c19b104a
CH
1295STATIC ssize_t
1296xfs_vm_direct_IO(
6e1ba0bc 1297 struct kiocb *iocb,
c8b8e32d 1298 struct iov_iter *iter)
6e1ba0bc 1299{
58e59854 1300 /*
fa8d972d 1301 * We just need the method present so that open/fcntl allow direct I/O.
58e59854 1302 */
fa8d972d 1303 return -EINVAL;
f51623b2 1304}
1da177e4
LT
1305
1306STATIC sector_t
e4c573bb 1307xfs_vm_bmap(
1da177e4
LT
1308 struct address_space *mapping,
1309 sector_t block)
1310{
1311 struct inode *inode = (struct inode *)mapping->host;
739bfb2a 1312 struct xfs_inode *ip = XFS_I(inode);
1da177e4 1313
cca28fb8 1314 trace_xfs_vm_bmap(XFS_I(inode));
db1327b1
DW
1315
1316 /*
1317 * The swap code (ab-)uses ->bmap to get a block mapping and then
1318 * bypasseѕ the file system for actual I/O. We really can't allow
1319 * that on reflinks inodes, so we have to skip out here. And yes,
1320 * 0 is the magic code for a bmap error..
1321 */
65523218 1322 if (xfs_is_reflink_inode(ip))
db1327b1 1323 return 0;
65523218 1324
4bc1ea6b 1325 filemap_write_and_wait(mapping);
c2536668 1326 return generic_block_bmap(mapping, block, xfs_get_blocks);
1da177e4
LT
1327}
1328
1329STATIC int
e4c573bb 1330xfs_vm_readpage(
1da177e4
LT
1331 struct file *unused,
1332 struct page *page)
1333{
121e213e 1334 trace_xfs_vm_readpage(page->mapping->host, 1);
c2536668 1335 return mpage_readpage(page, xfs_get_blocks);
1da177e4
LT
1336}
1337
1338STATIC int
e4c573bb 1339xfs_vm_readpages(
1da177e4
LT
1340 struct file *unused,
1341 struct address_space *mapping,
1342 struct list_head *pages,
1343 unsigned nr_pages)
1344{
121e213e 1345 trace_xfs_vm_readpages(mapping->host, nr_pages);
c2536668 1346 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1da177e4
LT
1347}
1348
22e757a4
DC
1349/*
1350 * This is basically a copy of __set_page_dirty_buffers() with one
1351 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
1352 * dirty, we'll never be able to clean them because we don't write buffers
1353 * beyond EOF, and that means we can't invalidate pages that span EOF
1354 * that have been marked dirty. Further, the dirty state can leak into
1355 * the file interior if the file is extended, resulting in all sorts of
1356 * bad things happening as the state does not match the underlying data.
1357 *
1358 * XXX: this really indicates that bufferheads in XFS need to die. Warts like
1359 * this only exist because of bufferheads and how the generic code manages them.
1360 */
1361STATIC int
1362xfs_vm_set_page_dirty(
1363 struct page *page)
1364{
1365 struct address_space *mapping = page->mapping;
1366 struct inode *inode = mapping->host;
1367 loff_t end_offset;
1368 loff_t offset;
1369 int newly_dirty;
1370
1371 if (unlikely(!mapping))
1372 return !TestSetPageDirty(page);
1373
1374 end_offset = i_size_read(inode);
1375 offset = page_offset(page);
1376
1377 spin_lock(&mapping->private_lock);
1378 if (page_has_buffers(page)) {
1379 struct buffer_head *head = page_buffers(page);
1380 struct buffer_head *bh = head;
1381
1382 do {
1383 if (offset < end_offset)
1384 set_buffer_dirty(bh);
1385 bh = bh->b_this_page;
93407472 1386 offset += i_blocksize(inode);
22e757a4
DC
1387 } while (bh != head);
1388 }
c4843a75 1389 /*
81f8c3a4
JW
1390 * Lock out page->mem_cgroup migration to keep PageDirty
1391 * synchronized with per-memcg dirty page counters.
c4843a75 1392 */
62cccb8c 1393 lock_page_memcg(page);
22e757a4
DC
1394 newly_dirty = !TestSetPageDirty(page);
1395 spin_unlock(&mapping->private_lock);
1396
1397 if (newly_dirty) {
1398 /* sigh - __set_page_dirty() is static, so copy it here, too */
1399 unsigned long flags;
1400
1401 spin_lock_irqsave(&mapping->tree_lock, flags);
1402 if (page->mapping) { /* Race with truncate? */
1403 WARN_ON_ONCE(!PageUptodate(page));
62cccb8c 1404 account_page_dirtied(page, mapping);
22e757a4
DC
1405 radix_tree_tag_set(&mapping->page_tree,
1406 page_index(page), PAGECACHE_TAG_DIRTY);
1407 }
1408 spin_unlock_irqrestore(&mapping->tree_lock, flags);
22e757a4 1409 }
62cccb8c 1410 unlock_page_memcg(page);
c4843a75
GT
1411 if (newly_dirty)
1412 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
22e757a4
DC
1413 return newly_dirty;
1414}
1415
f5e54d6e 1416const struct address_space_operations xfs_address_space_operations = {
e4c573bb
NS
1417 .readpage = xfs_vm_readpage,
1418 .readpages = xfs_vm_readpages,
1419 .writepage = xfs_vm_writepage,
7d4fb40a 1420 .writepages = xfs_vm_writepages,
22e757a4 1421 .set_page_dirty = xfs_vm_set_page_dirty,
238f4c54
NS
1422 .releasepage = xfs_vm_releasepage,
1423 .invalidatepage = xfs_vm_invalidatepage,
e4c573bb
NS
1424 .bmap = xfs_vm_bmap,
1425 .direct_IO = xfs_vm_direct_IO,
e965f963 1426 .migratepage = buffer_migrate_page,
bddaafa1 1427 .is_partially_uptodate = block_is_partially_uptodate,
aa261f54 1428 .error_remove_page = generic_error_remove_page,
1da177e4 1429};