Merge tag 'gpio-for-linus' of git://git.secretlab.ca/git/linux
[linux-block.git] / fs / xfs / xfs_aops.c
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
1da177e4
LT
19#include "xfs_log.h"
20#include "xfs_sb.h"
a844f451 21#include "xfs_ag.h"
1da177e4 22#include "xfs_trans.h"
1da177e4
LT
23#include "xfs_mount.h"
24#include "xfs_bmap_btree.h"
1da177e4
LT
25#include "xfs_dinode.h"
26#include "xfs_inode.h"
281627df 27#include "xfs_inode_item.h"
a844f451 28#include "xfs_alloc.h"
1da177e4 29#include "xfs_error.h"
1da177e4 30#include "xfs_iomap.h"
739bfb2a 31#include "xfs_vnodeops.h"
0b1b213f 32#include "xfs_trace.h"
3ed3a434 33#include "xfs_bmap.h"
5a0e3ad6 34#include <linux/gfp.h>
1da177e4 35#include <linux/mpage.h>
10ce4444 36#include <linux/pagevec.h>
1da177e4
LT
37#include <linux/writeback.h>
38
0b1b213f 39void
f51623b2
NS
40xfs_count_page_state(
41 struct page *page,
42 int *delalloc,
f51623b2
NS
43 int *unwritten)
44{
45 struct buffer_head *bh, *head;
46
20cb52eb 47 *delalloc = *unwritten = 0;
f51623b2
NS
48
49 bh = head = page_buffers(page);
50 do {
20cb52eb 51 if (buffer_unwritten(bh))
f51623b2
NS
52 (*unwritten) = 1;
53 else if (buffer_delay(bh))
54 (*delalloc) = 1;
55 } while ((bh = bh->b_this_page) != head);
56}
57
6214ed44
CH
58STATIC struct block_device *
59xfs_find_bdev_for_inode(
046f1685 60 struct inode *inode)
6214ed44 61{
046f1685 62 struct xfs_inode *ip = XFS_I(inode);
6214ed44
CH
63 struct xfs_mount *mp = ip->i_mount;
64
71ddabb9 65 if (XFS_IS_REALTIME_INODE(ip))
6214ed44
CH
66 return mp->m_rtdev_targp->bt_bdev;
67 else
68 return mp->m_ddev_targp->bt_bdev;
69}
70
f6d6d4fc
CH
71/*
72 * We're now finished for good with this ioend structure.
73 * Update the page state via the associated buffer_heads,
74 * release holds on the inode and bio, and finally free
75 * up memory. Do not use the ioend after this.
76 */
0829c360
CH
77STATIC void
78xfs_destroy_ioend(
79 xfs_ioend_t *ioend)
80{
f6d6d4fc
CH
81 struct buffer_head *bh, *next;
82
83 for (bh = ioend->io_buffer_head; bh; bh = next) {
84 next = bh->b_private;
7d04a335 85 bh->b_end_io(bh, !ioend->io_error);
f6d6d4fc 86 }
583fa586 87
c859cdd1 88 if (ioend->io_iocb) {
4b05d09c 89 inode_dio_done(ioend->io_inode);
04f658ee
CH
90 if (ioend->io_isasync) {
91 aio_complete(ioend->io_iocb, ioend->io_error ?
92 ioend->io_error : ioend->io_result, 0);
93 }
c859cdd1 94 }
4a06fd26 95
0829c360
CH
96 mempool_free(ioend, xfs_ioend_pool);
97}
98
fc0063c4
CH
99/*
100 * Fast and loose check if this write could update the on-disk inode size.
101 */
102static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
103{
104 return ioend->io_offset + ioend->io_size >
105 XFS_I(ioend->io_inode)->i_d.di_size;
106}
107
281627df
CH
108STATIC int
109xfs_setfilesize_trans_alloc(
110 struct xfs_ioend *ioend)
111{
112 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
113 struct xfs_trans *tp;
114 int error;
115
116 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
117
118 error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
119 if (error) {
120 xfs_trans_cancel(tp, 0);
121 return error;
122 }
123
124 ioend->io_append_trans = tp;
125
d9457dc0 126 /*
437a255a 127 * We may pass freeze protection with a transaction. So tell lockdep
d9457dc0
JK
128 * we released it.
129 */
130 rwsem_release(&ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
131 1, _THIS_IP_);
281627df
CH
132 /*
133 * We hand off the transaction to the completion thread now, so
134 * clear the flag here.
135 */
136 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
137 return 0;
138}
139
ba87ea69 140/*
2813d682 141 * Update on-disk file size now that data has been written to disk.
ba87ea69 142 */
281627df 143STATIC int
ba87ea69 144xfs_setfilesize(
aa6bf01d 145 struct xfs_ioend *ioend)
ba87ea69 146{
aa6bf01d 147 struct xfs_inode *ip = XFS_I(ioend->io_inode);
281627df 148 struct xfs_trans *tp = ioend->io_append_trans;
ba87ea69 149 xfs_fsize_t isize;
ba87ea69 150
281627df 151 /*
437a255a
DC
152 * The transaction may have been allocated in the I/O submission thread,
153 * thus we need to mark ourselves as beeing in a transaction manually.
154 * Similarly for freeze protection.
281627df
CH
155 */
156 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
437a255a
DC
157 rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
158 0, 1, _THIS_IP_);
281627df 159
aa6bf01d 160 xfs_ilock(ip, XFS_ILOCK_EXCL);
6923e686 161 isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size);
281627df
CH
162 if (!isize) {
163 xfs_iunlock(ip, XFS_ILOCK_EXCL);
164 xfs_trans_cancel(tp, 0);
165 return 0;
ba87ea69
LM
166 }
167
281627df
CH
168 trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
169
170 ip->i_d.di_size = isize;
171 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
172 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
173
174 return xfs_trans_commit(tp, 0);
77d7a0c2
DC
175}
176
177/*
209fb87a 178 * Schedule IO completion handling on the final put of an ioend.
fc0063c4
CH
179 *
180 * If there is no work to do we might as well call it a day and free the
181 * ioend right now.
77d7a0c2
DC
182 */
183STATIC void
184xfs_finish_ioend(
209fb87a 185 struct xfs_ioend *ioend)
77d7a0c2
DC
186{
187 if (atomic_dec_and_test(&ioend->io_remaining)) {
aa6bf01d
CH
188 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
189
0d882a36 190 if (ioend->io_type == XFS_IO_UNWRITTEN)
aa6bf01d 191 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
437a255a
DC
192 else if (ioend->io_append_trans ||
193 (ioend->io_isdirect && xfs_ioend_is_append(ioend)))
aa6bf01d 194 queue_work(mp->m_data_workqueue, &ioend->io_work);
fc0063c4
CH
195 else
196 xfs_destroy_ioend(ioend);
77d7a0c2 197 }
ba87ea69
LM
198}
199
0829c360 200/*
5ec4fabb 201 * IO write completion.
f6d6d4fc
CH
202 */
203STATIC void
5ec4fabb 204xfs_end_io(
77d7a0c2 205 struct work_struct *work)
0829c360 206{
77d7a0c2
DC
207 xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
208 struct xfs_inode *ip = XFS_I(ioend->io_inode);
69418932 209 int error = 0;
ba87ea69 210
04f658ee 211 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
810627d9 212 ioend->io_error = -EIO;
04f658ee
CH
213 goto done;
214 }
215 if (ioend->io_error)
216 goto done;
217
5ec4fabb
CH
218 /*
219 * For unwritten extents we need to issue transactions to convert a
220 * range to normal written extens after the data I/O has finished.
221 */
0d882a36 222 if (ioend->io_type == XFS_IO_UNWRITTEN) {
437a255a
DC
223 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
224 ioend->io_size);
225 } else if (ioend->io_isdirect && xfs_ioend_is_append(ioend)) {
281627df 226 /*
437a255a
DC
227 * For direct I/O we do not know if we need to allocate blocks
228 * or not so we can't preallocate an append transaction as that
229 * results in nested reservations and log space deadlocks. Hence
230 * allocate the transaction here. While this is sub-optimal and
231 * can block IO completion for some time, we're stuck with doing
232 * it this way until we can pass the ioend to the direct IO
233 * allocation callbacks and avoid nesting that way.
281627df 234 */
437a255a
DC
235 error = xfs_setfilesize_trans_alloc(ioend);
236 if (error)
04f658ee 237 goto done;
437a255a 238 error = xfs_setfilesize(ioend);
281627df
CH
239 } else if (ioend->io_append_trans) {
240 error = xfs_setfilesize(ioend);
84803fb7 241 } else {
281627df 242 ASSERT(!xfs_ioend_is_append(ioend));
5ec4fabb 243 }
ba87ea69 244
04f658ee 245done:
437a255a
DC
246 if (error)
247 ioend->io_error = -error;
aa6bf01d 248 xfs_destroy_ioend(ioend);
c626d174
DC
249}
250
209fb87a
CH
251/*
252 * Call IO completion handling in caller context on the final put of an ioend.
253 */
254STATIC void
255xfs_finish_ioend_sync(
256 struct xfs_ioend *ioend)
257{
258 if (atomic_dec_and_test(&ioend->io_remaining))
259 xfs_end_io(&ioend->io_work);
260}
261
0829c360
CH
262/*
263 * Allocate and initialise an IO completion structure.
264 * We need to track unwritten extent write completion here initially.
265 * We'll need to extend this for updating the ondisk inode size later
266 * (vs. incore size).
267 */
268STATIC xfs_ioend_t *
269xfs_alloc_ioend(
f6d6d4fc
CH
270 struct inode *inode,
271 unsigned int type)
0829c360
CH
272{
273 xfs_ioend_t *ioend;
274
275 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
276
277 /*
278 * Set the count to 1 initially, which will prevent an I/O
279 * completion callback from happening before we have started
280 * all the I/O from calling the completion routine too early.
281 */
282 atomic_set(&ioend->io_remaining, 1);
c859cdd1 283 ioend->io_isasync = 0;
281627df 284 ioend->io_isdirect = 0;
7d04a335 285 ioend->io_error = 0;
f6d6d4fc
CH
286 ioend->io_list = NULL;
287 ioend->io_type = type;
b677c210 288 ioend->io_inode = inode;
c1a073bd 289 ioend->io_buffer_head = NULL;
f6d6d4fc 290 ioend->io_buffer_tail = NULL;
0829c360
CH
291 ioend->io_offset = 0;
292 ioend->io_size = 0;
fb511f21
CH
293 ioend->io_iocb = NULL;
294 ioend->io_result = 0;
281627df 295 ioend->io_append_trans = NULL;
0829c360 296
5ec4fabb 297 INIT_WORK(&ioend->io_work, xfs_end_io);
0829c360
CH
298 return ioend;
299}
300
1da177e4
LT
301STATIC int
302xfs_map_blocks(
303 struct inode *inode,
304 loff_t offset,
207d0416 305 struct xfs_bmbt_irec *imap,
a206c817
CH
306 int type,
307 int nonblocking)
1da177e4 308{
a206c817
CH
309 struct xfs_inode *ip = XFS_I(inode);
310 struct xfs_mount *mp = ip->i_mount;
ed1e7b7e 311 ssize_t count = 1 << inode->i_blkbits;
a206c817
CH
312 xfs_fileoff_t offset_fsb, end_fsb;
313 int error = 0;
a206c817
CH
314 int bmapi_flags = XFS_BMAPI_ENTIRE;
315 int nimaps = 1;
316
317 if (XFS_FORCED_SHUTDOWN(mp))
318 return -XFS_ERROR(EIO);
319
0d882a36 320 if (type == XFS_IO_UNWRITTEN)
a206c817 321 bmapi_flags |= XFS_BMAPI_IGSTATE;
8ff2957d
CH
322
323 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
324 if (nonblocking)
325 return -XFS_ERROR(EAGAIN);
326 xfs_ilock(ip, XFS_ILOCK_SHARED);
a206c817
CH
327 }
328
8ff2957d
CH
329 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
330 (ip->i_df.if_flags & XFS_IFEXTENTS));
d2c28191 331 ASSERT(offset <= mp->m_super->s_maxbytes);
8ff2957d 332
d2c28191
DC
333 if (offset + count > mp->m_super->s_maxbytes)
334 count = mp->m_super->s_maxbytes - offset;
a206c817
CH
335 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
336 offset_fsb = XFS_B_TO_FSBT(mp, offset);
5c8ed202
DC
337 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
338 imap, &nimaps, bmapi_flags);
8ff2957d 339 xfs_iunlock(ip, XFS_ILOCK_SHARED);
a206c817 340
8ff2957d
CH
341 if (error)
342 return -XFS_ERROR(error);
a206c817 343
0d882a36 344 if (type == XFS_IO_DELALLOC &&
8ff2957d 345 (!nimaps || isnullstartblock(imap->br_startblock))) {
a206c817
CH
346 error = xfs_iomap_write_allocate(ip, offset, count, imap);
347 if (!error)
348 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
8ff2957d 349 return -XFS_ERROR(error);
a206c817
CH
350 }
351
8ff2957d 352#ifdef DEBUG
0d882a36 353 if (type == XFS_IO_UNWRITTEN) {
8ff2957d
CH
354 ASSERT(nimaps);
355 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
356 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
357 }
358#endif
359 if (nimaps)
360 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
361 return 0;
1da177e4
LT
362}
363
b8f82a4a 364STATIC int
558e6891 365xfs_imap_valid(
8699bb0a 366 struct inode *inode,
207d0416 367 struct xfs_bmbt_irec *imap,
558e6891 368 xfs_off_t offset)
1da177e4 369{
558e6891 370 offset >>= inode->i_blkbits;
8699bb0a 371
558e6891
CH
372 return offset >= imap->br_startoff &&
373 offset < imap->br_startoff + imap->br_blockcount;
1da177e4
LT
374}
375
f6d6d4fc
CH
376/*
377 * BIO completion handler for buffered IO.
378 */
782e3b3b 379STATIC void
f6d6d4fc
CH
380xfs_end_bio(
381 struct bio *bio,
f6d6d4fc
CH
382 int error)
383{
384 xfs_ioend_t *ioend = bio->bi_private;
385
f6d6d4fc 386 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
7d04a335 387 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
f6d6d4fc
CH
388
389 /* Toss bio and pass work off to an xfsdatad thread */
f6d6d4fc
CH
390 bio->bi_private = NULL;
391 bio->bi_end_io = NULL;
f6d6d4fc 392 bio_put(bio);
7d04a335 393
209fb87a 394 xfs_finish_ioend(ioend);
f6d6d4fc
CH
395}
396
397STATIC void
398xfs_submit_ioend_bio(
06342cf8
CH
399 struct writeback_control *wbc,
400 xfs_ioend_t *ioend,
401 struct bio *bio)
f6d6d4fc
CH
402{
403 atomic_inc(&ioend->io_remaining);
f6d6d4fc
CH
404 bio->bi_private = ioend;
405 bio->bi_end_io = xfs_end_bio;
721a9602 406 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
f6d6d4fc
CH
407}
408
409STATIC struct bio *
410xfs_alloc_ioend_bio(
411 struct buffer_head *bh)
412{
f6d6d4fc 413 int nvecs = bio_get_nr_vecs(bh->b_bdev);
221cb251 414 struct bio *bio = bio_alloc(GFP_NOIO, nvecs);
f6d6d4fc
CH
415
416 ASSERT(bio->bi_private == NULL);
417 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
418 bio->bi_bdev = bh->b_bdev;
f6d6d4fc
CH
419 return bio;
420}
421
422STATIC void
423xfs_start_buffer_writeback(
424 struct buffer_head *bh)
425{
426 ASSERT(buffer_mapped(bh));
427 ASSERT(buffer_locked(bh));
428 ASSERT(!buffer_delay(bh));
429 ASSERT(!buffer_unwritten(bh));
430
431 mark_buffer_async_write(bh);
432 set_buffer_uptodate(bh);
433 clear_buffer_dirty(bh);
434}
435
436STATIC void
437xfs_start_page_writeback(
438 struct page *page,
f6d6d4fc
CH
439 int clear_dirty,
440 int buffers)
441{
442 ASSERT(PageLocked(page));
443 ASSERT(!PageWriteback(page));
f6d6d4fc 444 if (clear_dirty)
92132021
DC
445 clear_page_dirty_for_io(page);
446 set_page_writeback(page);
f6d6d4fc 447 unlock_page(page);
1f7decf6
FW
448 /* If no buffers on the page are to be written, finish it here */
449 if (!buffers)
f6d6d4fc 450 end_page_writeback(page);
f6d6d4fc
CH
451}
452
453static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
454{
455 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
456}
457
458/*
d88992f6
DC
459 * Submit all of the bios for all of the ioends we have saved up, covering the
460 * initial writepage page and also any probed pages.
461 *
462 * Because we may have multiple ioends spanning a page, we need to start
463 * writeback on all the buffers before we submit them for I/O. If we mark the
464 * buffers as we got, then we can end up with a page that only has buffers
465 * marked async write and I/O complete on can occur before we mark the other
466 * buffers async write.
467 *
468 * The end result of this is that we trip a bug in end_page_writeback() because
469 * we call it twice for the one page as the code in end_buffer_async_write()
470 * assumes that all buffers on the page are started at the same time.
471 *
472 * The fix is two passes across the ioend list - one to start writeback on the
c41564b5 473 * buffer_heads, and then submit them for I/O on the second pass.
7bf7f352
DC
474 *
475 * If @fail is non-zero, it means that we have a situation where some part of
476 * the submission process has failed after we have marked paged for writeback
477 * and unlocked them. In this situation, we need to fail the ioend chain rather
478 * than submit it to IO. This typically only happens on a filesystem shutdown.
f6d6d4fc
CH
479 */
480STATIC void
481xfs_submit_ioend(
06342cf8 482 struct writeback_control *wbc,
7bf7f352
DC
483 xfs_ioend_t *ioend,
484 int fail)
f6d6d4fc 485{
d88992f6 486 xfs_ioend_t *head = ioend;
f6d6d4fc
CH
487 xfs_ioend_t *next;
488 struct buffer_head *bh;
489 struct bio *bio;
490 sector_t lastblock = 0;
491
d88992f6
DC
492 /* Pass 1 - start writeback */
493 do {
494 next = ioend->io_list;
221cb251 495 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
d88992f6 496 xfs_start_buffer_writeback(bh);
d88992f6
DC
497 } while ((ioend = next) != NULL);
498
499 /* Pass 2 - submit I/O */
500 ioend = head;
f6d6d4fc
CH
501 do {
502 next = ioend->io_list;
503 bio = NULL;
504
7bf7f352
DC
505 /*
506 * If we are failing the IO now, just mark the ioend with an
507 * error and finish it. This will run IO completion immediately
508 * as there is only one reference to the ioend at this point in
509 * time.
510 */
511 if (fail) {
512 ioend->io_error = -fail;
513 xfs_finish_ioend(ioend);
514 continue;
515 }
516
f6d6d4fc 517 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
f6d6d4fc
CH
518
519 if (!bio) {
520 retry:
521 bio = xfs_alloc_ioend_bio(bh);
522 } else if (bh->b_blocknr != lastblock + 1) {
06342cf8 523 xfs_submit_ioend_bio(wbc, ioend, bio);
f6d6d4fc
CH
524 goto retry;
525 }
526
527 if (bio_add_buffer(bio, bh) != bh->b_size) {
06342cf8 528 xfs_submit_ioend_bio(wbc, ioend, bio);
f6d6d4fc
CH
529 goto retry;
530 }
531
532 lastblock = bh->b_blocknr;
533 }
534 if (bio)
06342cf8 535 xfs_submit_ioend_bio(wbc, ioend, bio);
209fb87a 536 xfs_finish_ioend(ioend);
f6d6d4fc
CH
537 } while ((ioend = next) != NULL);
538}
539
540/*
541 * Cancel submission of all buffer_heads so far in this endio.
542 * Toss the endio too. Only ever called for the initial page
543 * in a writepage request, so only ever one page.
544 */
545STATIC void
546xfs_cancel_ioend(
547 xfs_ioend_t *ioend)
548{
549 xfs_ioend_t *next;
550 struct buffer_head *bh, *next_bh;
551
552 do {
553 next = ioend->io_list;
554 bh = ioend->io_buffer_head;
555 do {
556 next_bh = bh->b_private;
557 clear_buffer_async_write(bh);
558 unlock_buffer(bh);
559 } while ((bh = next_bh) != NULL);
560
f6d6d4fc
CH
561 mempool_free(ioend, xfs_ioend_pool);
562 } while ((ioend = next) != NULL);
563}
564
565/*
566 * Test to see if we've been building up a completion structure for
567 * earlier buffers -- if so, we try to append to this ioend if we
568 * can, otherwise we finish off any current ioend and start another.
569 * Return true if we've finished the given ioend.
570 */
571STATIC void
572xfs_add_to_ioend(
573 struct inode *inode,
574 struct buffer_head *bh,
7336cea8 575 xfs_off_t offset,
f6d6d4fc
CH
576 unsigned int type,
577 xfs_ioend_t **result,
578 int need_ioend)
579{
580 xfs_ioend_t *ioend = *result;
581
582 if (!ioend || need_ioend || type != ioend->io_type) {
583 xfs_ioend_t *previous = *result;
f6d6d4fc 584
f6d6d4fc
CH
585 ioend = xfs_alloc_ioend(inode, type);
586 ioend->io_offset = offset;
587 ioend->io_buffer_head = bh;
588 ioend->io_buffer_tail = bh;
589 if (previous)
590 previous->io_list = ioend;
591 *result = ioend;
592 } else {
593 ioend->io_buffer_tail->b_private = bh;
594 ioend->io_buffer_tail = bh;
595 }
596
597 bh->b_private = NULL;
598 ioend->io_size += bh->b_size;
599}
600
87cbc49c
NS
601STATIC void
602xfs_map_buffer(
046f1685 603 struct inode *inode,
87cbc49c 604 struct buffer_head *bh,
207d0416 605 struct xfs_bmbt_irec *imap,
046f1685 606 xfs_off_t offset)
87cbc49c
NS
607{
608 sector_t bn;
8699bb0a 609 struct xfs_mount *m = XFS_I(inode)->i_mount;
207d0416
CH
610 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
611 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
87cbc49c 612
207d0416
CH
613 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
614 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
87cbc49c 615
e513182d 616 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
8699bb0a 617 ((offset - iomap_offset) >> inode->i_blkbits);
87cbc49c 618
046f1685 619 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
87cbc49c
NS
620
621 bh->b_blocknr = bn;
622 set_buffer_mapped(bh);
623}
624
1da177e4
LT
625STATIC void
626xfs_map_at_offset(
046f1685 627 struct inode *inode,
1da177e4 628 struct buffer_head *bh,
207d0416 629 struct xfs_bmbt_irec *imap,
046f1685 630 xfs_off_t offset)
1da177e4 631{
207d0416
CH
632 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
633 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
1da177e4 634
207d0416 635 xfs_map_buffer(inode, bh, imap, offset);
1da177e4
LT
636 set_buffer_mapped(bh);
637 clear_buffer_delay(bh);
f6d6d4fc 638 clear_buffer_unwritten(bh);
1da177e4
LT
639}
640
1da177e4 641/*
10ce4444
CH
642 * Test if a given page is suitable for writing as part of an unwritten
643 * or delayed allocate extent.
1da177e4 644 */
10ce4444 645STATIC int
6ffc4db5 646xfs_check_page_type(
10ce4444 647 struct page *page,
f6d6d4fc 648 unsigned int type)
1da177e4 649{
1da177e4 650 if (PageWriteback(page))
10ce4444 651 return 0;
1da177e4
LT
652
653 if (page->mapping && page_has_buffers(page)) {
654 struct buffer_head *bh, *head;
655 int acceptable = 0;
656
657 bh = head = page_buffers(page);
658 do {
f6d6d4fc 659 if (buffer_unwritten(bh))
0d882a36 660 acceptable += (type == XFS_IO_UNWRITTEN);
f6d6d4fc 661 else if (buffer_delay(bh))
0d882a36 662 acceptable += (type == XFS_IO_DELALLOC);
2ddee844 663 else if (buffer_dirty(bh) && buffer_mapped(bh))
0d882a36 664 acceptable += (type == XFS_IO_OVERWRITE);
f6d6d4fc 665 else
1da177e4 666 break;
1da177e4
LT
667 } while ((bh = bh->b_this_page) != head);
668
669 if (acceptable)
10ce4444 670 return 1;
1da177e4
LT
671 }
672
10ce4444 673 return 0;
1da177e4
LT
674}
675
1da177e4
LT
676/*
677 * Allocate & map buffers for page given the extent map. Write it out.
678 * except for the original page of a writepage, this is called on
679 * delalloc/unwritten pages only, for the original page it is possible
680 * that the page has no mapping at all.
681 */
f6d6d4fc 682STATIC int
1da177e4
LT
683xfs_convert_page(
684 struct inode *inode,
685 struct page *page,
10ce4444 686 loff_t tindex,
207d0416 687 struct xfs_bmbt_irec *imap,
f6d6d4fc 688 xfs_ioend_t **ioendp,
2fa24f92 689 struct writeback_control *wbc)
1da177e4 690{
f6d6d4fc 691 struct buffer_head *bh, *head;
9260dc6b
CH
692 xfs_off_t end_offset;
693 unsigned long p_offset;
f6d6d4fc 694 unsigned int type;
24e17b5f 695 int len, page_dirty;
f6d6d4fc 696 int count = 0, done = 0, uptodate = 1;
9260dc6b 697 xfs_off_t offset = page_offset(page);
1da177e4 698
10ce4444
CH
699 if (page->index != tindex)
700 goto fail;
529ae9aa 701 if (!trylock_page(page))
10ce4444
CH
702 goto fail;
703 if (PageWriteback(page))
704 goto fail_unlock_page;
705 if (page->mapping != inode->i_mapping)
706 goto fail_unlock_page;
6ffc4db5 707 if (!xfs_check_page_type(page, (*ioendp)->io_type))
10ce4444
CH
708 goto fail_unlock_page;
709
24e17b5f
NS
710 /*
711 * page_dirty is initially a count of buffers on the page before
c41564b5 712 * EOF and is decremented as we move each into a cleanable state.
9260dc6b
CH
713 *
714 * Derivation:
715 *
716 * End offset is the highest offset that this page should represent.
717 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
718 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
719 * hence give us the correct page_dirty count. On any other page,
720 * it will be zero and in that case we need page_dirty to be the
721 * count of buffers on the page.
24e17b5f 722 */
9260dc6b
CH
723 end_offset = min_t(unsigned long long,
724 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
725 i_size_read(inode));
726
24e17b5f 727 len = 1 << inode->i_blkbits;
9260dc6b
CH
728 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
729 PAGE_CACHE_SIZE);
730 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
731 page_dirty = p_offset / len;
24e17b5f 732
1da177e4
LT
733 bh = head = page_buffers(page);
734 do {
9260dc6b 735 if (offset >= end_offset)
1da177e4 736 break;
f6d6d4fc
CH
737 if (!buffer_uptodate(bh))
738 uptodate = 0;
739 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
740 done = 1;
1da177e4 741 continue;
f6d6d4fc
CH
742 }
743
2fa24f92
CH
744 if (buffer_unwritten(bh) || buffer_delay(bh) ||
745 buffer_mapped(bh)) {
9260dc6b 746 if (buffer_unwritten(bh))
0d882a36 747 type = XFS_IO_UNWRITTEN;
2fa24f92 748 else if (buffer_delay(bh))
0d882a36 749 type = XFS_IO_DELALLOC;
2fa24f92 750 else
0d882a36 751 type = XFS_IO_OVERWRITE;
9260dc6b 752
558e6891 753 if (!xfs_imap_valid(inode, imap, offset)) {
f6d6d4fc 754 done = 1;
9260dc6b
CH
755 continue;
756 }
757
ecff71e6 758 lock_buffer(bh);
0d882a36 759 if (type != XFS_IO_OVERWRITE)
2fa24f92 760 xfs_map_at_offset(inode, bh, imap, offset);
89f3b363
CH
761 xfs_add_to_ioend(inode, bh, offset, type,
762 ioendp, done);
763
9260dc6b
CH
764 page_dirty--;
765 count++;
766 } else {
2fa24f92 767 done = 1;
1da177e4 768 }
7336cea8 769 } while (offset += len, (bh = bh->b_this_page) != head);
1da177e4 770
f6d6d4fc
CH
771 if (uptodate && bh == head)
772 SetPageUptodate(page);
773
89f3b363 774 if (count) {
efceab1d
DC
775 if (--wbc->nr_to_write <= 0 &&
776 wbc->sync_mode == WB_SYNC_NONE)
89f3b363 777 done = 1;
1da177e4 778 }
89f3b363 779 xfs_start_page_writeback(page, !page_dirty, count);
f6d6d4fc
CH
780
781 return done;
10ce4444
CH
782 fail_unlock_page:
783 unlock_page(page);
784 fail:
785 return 1;
1da177e4
LT
786}
787
788/*
789 * Convert & write out a cluster of pages in the same extent as defined
790 * by mp and following the start page.
791 */
792STATIC void
793xfs_cluster_write(
794 struct inode *inode,
795 pgoff_t tindex,
207d0416 796 struct xfs_bmbt_irec *imap,
f6d6d4fc 797 xfs_ioend_t **ioendp,
1da177e4 798 struct writeback_control *wbc,
1da177e4
LT
799 pgoff_t tlast)
800{
10ce4444
CH
801 struct pagevec pvec;
802 int done = 0, i;
1da177e4 803
10ce4444
CH
804 pagevec_init(&pvec, 0);
805 while (!done && tindex <= tlast) {
806 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
807
808 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
1da177e4 809 break;
10ce4444
CH
810
811 for (i = 0; i < pagevec_count(&pvec); i++) {
812 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
2fa24f92 813 imap, ioendp, wbc);
10ce4444
CH
814 if (done)
815 break;
816 }
817
818 pagevec_release(&pvec);
819 cond_resched();
1da177e4
LT
820 }
821}
822
3ed3a434
DC
823STATIC void
824xfs_vm_invalidatepage(
825 struct page *page,
826 unsigned long offset)
827{
828 trace_xfs_invalidatepage(page->mapping->host, page, offset);
829 block_invalidatepage(page, offset);
830}
831
832/*
833 * If the page has delalloc buffers on it, we need to punch them out before we
834 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
835 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
836 * is done on that same region - the delalloc extent is returned when none is
837 * supposed to be there.
838 *
839 * We prevent this by truncating away the delalloc regions on the page before
840 * invalidating it. Because they are delalloc, we can do this without needing a
841 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
842 * truncation without a transaction as there is no space left for block
843 * reservation (typically why we see a ENOSPC in writeback).
844 *
845 * This is not a performance critical path, so for now just do the punching a
846 * buffer head at a time.
847 */
848STATIC void
849xfs_aops_discard_page(
850 struct page *page)
851{
852 struct inode *inode = page->mapping->host;
853 struct xfs_inode *ip = XFS_I(inode);
854 struct buffer_head *bh, *head;
855 loff_t offset = page_offset(page);
3ed3a434 856
0d882a36 857 if (!xfs_check_page_type(page, XFS_IO_DELALLOC))
3ed3a434
DC
858 goto out_invalidate;
859
e8c3753c
DC
860 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
861 goto out_invalidate;
862
4f10700a 863 xfs_alert(ip->i_mount,
3ed3a434
DC
864 "page discard on page %p, inode 0x%llx, offset %llu.",
865 page, ip->i_ino, offset);
866
867 xfs_ilock(ip, XFS_ILOCK_EXCL);
868 bh = head = page_buffers(page);
869 do {
3ed3a434 870 int error;
c726de44 871 xfs_fileoff_t start_fsb;
3ed3a434
DC
872
873 if (!buffer_delay(bh))
874 goto next_buffer;
875
c726de44
DC
876 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
877 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
3ed3a434
DC
878 if (error) {
879 /* something screwed, just bail */
e8c3753c 880 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
4f10700a 881 xfs_alert(ip->i_mount,
3ed3a434 882 "page discard unable to remove delalloc mapping.");
e8c3753c 883 }
3ed3a434
DC
884 break;
885 }
886next_buffer:
c726de44 887 offset += 1 << inode->i_blkbits;
3ed3a434
DC
888
889 } while ((bh = bh->b_this_page) != head);
890
891 xfs_iunlock(ip, XFS_ILOCK_EXCL);
892out_invalidate:
893 xfs_vm_invalidatepage(page, 0);
894 return;
895}
896
1da177e4 897/*
89f3b363
CH
898 * Write out a dirty page.
899 *
900 * For delalloc space on the page we need to allocate space and flush it.
901 * For unwritten space on the page we need to start the conversion to
902 * regular allocated space.
89f3b363 903 * For any other dirty buffer heads on the page we should flush them.
1da177e4 904 */
1da177e4 905STATIC int
89f3b363
CH
906xfs_vm_writepage(
907 struct page *page,
908 struct writeback_control *wbc)
1da177e4 909{
89f3b363 910 struct inode *inode = page->mapping->host;
f6d6d4fc 911 struct buffer_head *bh, *head;
207d0416 912 struct xfs_bmbt_irec imap;
f6d6d4fc 913 xfs_ioend_t *ioend = NULL, *iohead = NULL;
1da177e4 914 loff_t offset;
f6d6d4fc 915 unsigned int type;
1da177e4 916 __uint64_t end_offset;
bd1556a1 917 pgoff_t end_index, last_index;
ed1e7b7e 918 ssize_t len;
a206c817 919 int err, imap_valid = 0, uptodate = 1;
89f3b363 920 int count = 0;
a206c817 921 int nonblocking = 0;
89f3b363
CH
922
923 trace_xfs_writepage(inode, page, 0);
924
20cb52eb
CH
925 ASSERT(page_has_buffers(page));
926
89f3b363
CH
927 /*
928 * Refuse to write the page out if we are called from reclaim context.
929 *
d4f7a5cb
CH
930 * This avoids stack overflows when called from deeply used stacks in
931 * random callers for direct reclaim or memcg reclaim. We explicitly
932 * allow reclaim from kswapd as the stack usage there is relatively low.
89f3b363 933 *
94054fa3
MG
934 * This should never happen except in the case of a VM regression so
935 * warn about it.
89f3b363 936 */
94054fa3
MG
937 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
938 PF_MEMALLOC))
b5420f23 939 goto redirty;
1da177e4 940
89f3b363 941 /*
680a647b
CH
942 * Given that we do not allow direct reclaim to call us, we should
943 * never be called while in a filesystem transaction.
89f3b363 944 */
680a647b 945 if (WARN_ON(current->flags & PF_FSTRANS))
b5420f23 946 goto redirty;
89f3b363 947
1da177e4
LT
948 /* Is this page beyond the end of the file? */
949 offset = i_size_read(inode);
950 end_index = offset >> PAGE_CACHE_SHIFT;
951 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
952 if (page->index >= end_index) {
6b7a03f0
CH
953 unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
954
955 /*
956 * Just skip the page if it is fully outside i_size, e.g. due
957 * to a truncate operation that is in progress.
958 */
959 if (page->index >= end_index + 1 || offset_into_page == 0) {
89f3b363 960 unlock_page(page);
19d5bcf3 961 return 0;
1da177e4 962 }
6b7a03f0
CH
963
964 /*
965 * The page straddles i_size. It must be zeroed out on each
966 * and every writepage invocation because it may be mmapped.
967 * "A file is mapped in multiples of the page size. For a file
968 * that is not a multiple of the page size, the remaining
969 * memory is zeroed when mapped, and writes to that region are
970 * not written out to the file."
971 */
972 zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
1da177e4
LT
973 }
974
f6d6d4fc 975 end_offset = min_t(unsigned long long,
20cb52eb
CH
976 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
977 offset);
24e17b5f 978 len = 1 << inode->i_blkbits;
24e17b5f 979
24e17b5f 980 bh = head = page_buffers(page);
f6d6d4fc 981 offset = page_offset(page);
0d882a36 982 type = XFS_IO_OVERWRITE;
a206c817 983
dbcdde3e 984 if (wbc->sync_mode == WB_SYNC_NONE)
a206c817 985 nonblocking = 1;
f6d6d4fc 986
1da177e4 987 do {
6ac7248e
CH
988 int new_ioend = 0;
989
1da177e4
LT
990 if (offset >= end_offset)
991 break;
992 if (!buffer_uptodate(bh))
993 uptodate = 0;
1da177e4 994
3d9b02e3 995 /*
ece413f5
CH
996 * set_page_dirty dirties all buffers in a page, independent
997 * of their state. The dirty state however is entirely
998 * meaningless for holes (!mapped && uptodate), so skip
999 * buffers covering holes here.
3d9b02e3
ES
1000 */
1001 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
3d9b02e3
ES
1002 imap_valid = 0;
1003 continue;
1004 }
1005
aeea1b1f 1006 if (buffer_unwritten(bh)) {
0d882a36
AR
1007 if (type != XFS_IO_UNWRITTEN) {
1008 type = XFS_IO_UNWRITTEN;
aeea1b1f 1009 imap_valid = 0;
1da177e4 1010 }
aeea1b1f 1011 } else if (buffer_delay(bh)) {
0d882a36
AR
1012 if (type != XFS_IO_DELALLOC) {
1013 type = XFS_IO_DELALLOC;
aeea1b1f 1014 imap_valid = 0;
1da177e4 1015 }
89f3b363 1016 } else if (buffer_uptodate(bh)) {
0d882a36
AR
1017 if (type != XFS_IO_OVERWRITE) {
1018 type = XFS_IO_OVERWRITE;
85da94c6
CH
1019 imap_valid = 0;
1020 }
aeea1b1f 1021 } else {
7d0fa3ec 1022 if (PageUptodate(page))
aeea1b1f 1023 ASSERT(buffer_mapped(bh));
7d0fa3ec
AR
1024 /*
1025 * This buffer is not uptodate and will not be
1026 * written to disk. Ensure that we will put any
1027 * subsequent writeable buffers into a new
1028 * ioend.
1029 */
1030 imap_valid = 0;
aeea1b1f
CH
1031 continue;
1032 }
d5cb48aa 1033
aeea1b1f
CH
1034 if (imap_valid)
1035 imap_valid = xfs_imap_valid(inode, &imap, offset);
1036 if (!imap_valid) {
1037 /*
1038 * If we didn't have a valid mapping then we need to
1039 * put the new mapping into a separate ioend structure.
1040 * This ensures non-contiguous extents always have
1041 * separate ioends, which is particularly important
1042 * for unwritten extent conversion at I/O completion
1043 * time.
1044 */
1045 new_ioend = 1;
1046 err = xfs_map_blocks(inode, offset, &imap, type,
1047 nonblocking);
1048 if (err)
1049 goto error;
1050 imap_valid = xfs_imap_valid(inode, &imap, offset);
1051 }
1052 if (imap_valid) {
ecff71e6 1053 lock_buffer(bh);
0d882a36 1054 if (type != XFS_IO_OVERWRITE)
aeea1b1f
CH
1055 xfs_map_at_offset(inode, bh, &imap, offset);
1056 xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1057 new_ioend);
1058 count++;
1da177e4 1059 }
f6d6d4fc
CH
1060
1061 if (!iohead)
1062 iohead = ioend;
1063
1064 } while (offset += len, ((bh = bh->b_this_page) != head));
1da177e4
LT
1065
1066 if (uptodate && bh == head)
1067 SetPageUptodate(page);
1068
89f3b363 1069 xfs_start_page_writeback(page, 1, count);
1da177e4 1070
7bf7f352
DC
1071 /* if there is no IO to be submitted for this page, we are done */
1072 if (!ioend)
1073 return 0;
1074
1075 ASSERT(iohead);
1076
1077 /*
1078 * Any errors from this point onwards need tobe reported through the IO
1079 * completion path as we have marked the initial page as under writeback
1080 * and unlocked it.
1081 */
1082 if (imap_valid) {
bd1556a1
CH
1083 xfs_off_t end_index;
1084
1085 end_index = imap.br_startoff + imap.br_blockcount;
1086
1087 /* to bytes */
1088 end_index <<= inode->i_blkbits;
1089
1090 /* to pages */
1091 end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
1092
1093 /* check against file size */
1094 if (end_index > last_index)
1095 end_index = last_index;
8699bb0a 1096
207d0416 1097 xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
2fa24f92 1098 wbc, end_index);
1da177e4
LT
1099 }
1100
281627df 1101
7bf7f352
DC
1102 /*
1103 * Reserve log space if we might write beyond the on-disk inode size.
1104 */
1105 err = 0;
1106 if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
1107 err = xfs_setfilesize_trans_alloc(ioend);
1108
1109 xfs_submit_ioend(wbc, iohead, err);
f6d6d4fc 1110
89f3b363 1111 return 0;
1da177e4
LT
1112
1113error:
f6d6d4fc
CH
1114 if (iohead)
1115 xfs_cancel_ioend(iohead);
1da177e4 1116
b5420f23
CH
1117 if (err == -EAGAIN)
1118 goto redirty;
1119
20cb52eb 1120 xfs_aops_discard_page(page);
89f3b363
CH
1121 ClearPageUptodate(page);
1122 unlock_page(page);
1da177e4 1123 return err;
f51623b2 1124
b5420f23 1125redirty:
f51623b2
NS
1126 redirty_page_for_writepage(wbc, page);
1127 unlock_page(page);
1128 return 0;
f51623b2
NS
1129}
1130
7d4fb40a
NS
1131STATIC int
1132xfs_vm_writepages(
1133 struct address_space *mapping,
1134 struct writeback_control *wbc)
1135{
b3aea4ed 1136 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
7d4fb40a
NS
1137 return generic_writepages(mapping, wbc);
1138}
1139
f51623b2
NS
1140/*
1141 * Called to move a page into cleanable state - and from there
89f3b363 1142 * to be released. The page should already be clean. We always
f51623b2
NS
1143 * have buffer heads in this call.
1144 *
89f3b363 1145 * Returns 1 if the page is ok to release, 0 otherwise.
f51623b2
NS
1146 */
1147STATIC int
238f4c54 1148xfs_vm_releasepage(
f51623b2
NS
1149 struct page *page,
1150 gfp_t gfp_mask)
1151{
20cb52eb 1152 int delalloc, unwritten;
f51623b2 1153
89f3b363 1154 trace_xfs_releasepage(page->mapping->host, page, 0);
238f4c54 1155
20cb52eb 1156 xfs_count_page_state(page, &delalloc, &unwritten);
f51623b2 1157
89f3b363 1158 if (WARN_ON(delalloc))
f51623b2 1159 return 0;
89f3b363 1160 if (WARN_ON(unwritten))
f51623b2
NS
1161 return 0;
1162
f51623b2
NS
1163 return try_to_free_buffers(page);
1164}
1165
1da177e4 1166STATIC int
c2536668 1167__xfs_get_blocks(
1da177e4
LT
1168 struct inode *inode,
1169 sector_t iblock,
1da177e4
LT
1170 struct buffer_head *bh_result,
1171 int create,
f2bde9b8 1172 int direct)
1da177e4 1173{
a206c817
CH
1174 struct xfs_inode *ip = XFS_I(inode);
1175 struct xfs_mount *mp = ip->i_mount;
1176 xfs_fileoff_t offset_fsb, end_fsb;
1177 int error = 0;
1178 int lockmode = 0;
207d0416 1179 struct xfs_bmbt_irec imap;
a206c817 1180 int nimaps = 1;
fdc7ed75
NS
1181 xfs_off_t offset;
1182 ssize_t size;
207d0416 1183 int new = 0;
a206c817
CH
1184
1185 if (XFS_FORCED_SHUTDOWN(mp))
1186 return -XFS_ERROR(EIO);
1da177e4 1187
fdc7ed75 1188 offset = (xfs_off_t)iblock << inode->i_blkbits;
c2536668
NS
1189 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1190 size = bh_result->b_size;
364f358a
LM
1191
1192 if (!create && direct && offset >= i_size_read(inode))
1193 return 0;
1194
507630b2
DC
1195 /*
1196 * Direct I/O is usually done on preallocated files, so try getting
1197 * a block mapping without an exclusive lock first. For buffered
1198 * writes we already have the exclusive iolock anyway, so avoiding
1199 * a lock roundtrip here by taking the ilock exclusive from the
1200 * beginning is a useful micro optimization.
1201 */
1202 if (create && !direct) {
a206c817
CH
1203 lockmode = XFS_ILOCK_EXCL;
1204 xfs_ilock(ip, lockmode);
1205 } else {
1206 lockmode = xfs_ilock_map_shared(ip);
1207 }
f2bde9b8 1208
d2c28191
DC
1209 ASSERT(offset <= mp->m_super->s_maxbytes);
1210 if (offset + size > mp->m_super->s_maxbytes)
1211 size = mp->m_super->s_maxbytes - offset;
a206c817
CH
1212 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1213 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1214
5c8ed202
DC
1215 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1216 &imap, &nimaps, XFS_BMAPI_ENTIRE);
1da177e4 1217 if (error)
a206c817
CH
1218 goto out_unlock;
1219
1220 if (create &&
1221 (!nimaps ||
1222 (imap.br_startblock == HOLESTARTBLOCK ||
1223 imap.br_startblock == DELAYSTARTBLOCK))) {
aff3a9ed 1224 if (direct || xfs_get_extsz_hint(ip)) {
507630b2
DC
1225 /*
1226 * Drop the ilock in preparation for starting the block
1227 * allocation transaction. It will be retaken
1228 * exclusively inside xfs_iomap_write_direct for the
1229 * actual allocation.
1230 */
1231 xfs_iunlock(ip, lockmode);
a206c817
CH
1232 error = xfs_iomap_write_direct(ip, offset, size,
1233 &imap, nimaps);
507630b2
DC
1234 if (error)
1235 return -error;
d3bc815a 1236 new = 1;
a206c817 1237 } else {
507630b2
DC
1238 /*
1239 * Delalloc reservations do not require a transaction,
d3bc815a
DC
1240 * we can go on without dropping the lock here. If we
1241 * are allocating a new delalloc block, make sure that
1242 * we set the new flag so that we mark the buffer new so
1243 * that we know that it is newly allocated if the write
1244 * fails.
507630b2 1245 */
d3bc815a
DC
1246 if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
1247 new = 1;
a206c817 1248 error = xfs_iomap_write_delay(ip, offset, size, &imap);
507630b2
DC
1249 if (error)
1250 goto out_unlock;
1251
1252 xfs_iunlock(ip, lockmode);
a206c817 1253 }
a206c817
CH
1254
1255 trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
1256 } else if (nimaps) {
1257 trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
507630b2 1258 xfs_iunlock(ip, lockmode);
a206c817
CH
1259 } else {
1260 trace_xfs_get_blocks_notfound(ip, offset, size);
1261 goto out_unlock;
1262 }
1da177e4 1263
207d0416
CH
1264 if (imap.br_startblock != HOLESTARTBLOCK &&
1265 imap.br_startblock != DELAYSTARTBLOCK) {
87cbc49c
NS
1266 /*
1267 * For unwritten extents do not report a disk address on
1da177e4
LT
1268 * the read case (treat as if we're reading into a hole).
1269 */
207d0416
CH
1270 if (create || !ISUNWRITTEN(&imap))
1271 xfs_map_buffer(inode, bh_result, &imap, offset);
1272 if (create && ISUNWRITTEN(&imap)) {
1da177e4
LT
1273 if (direct)
1274 bh_result->b_private = inode;
1275 set_buffer_unwritten(bh_result);
1da177e4
LT
1276 }
1277 }
1278
c2536668
NS
1279 /*
1280 * If this is a realtime file, data may be on a different device.
1281 * to that pointed to from the buffer_head b_bdev currently.
1282 */
046f1685 1283 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1da177e4 1284
c2536668 1285 /*
549054af
DC
1286 * If we previously allocated a block out beyond eof and we are now
1287 * coming back to use it then we will need to flag it as new even if it
1288 * has a disk address.
1289 *
1290 * With sub-block writes into unwritten extents we also need to mark
1291 * the buffer as new so that the unwritten parts of the buffer gets
1292 * correctly zeroed.
1da177e4
LT
1293 */
1294 if (create &&
1295 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
549054af 1296 (offset >= i_size_read(inode)) ||
207d0416 1297 (new || ISUNWRITTEN(&imap))))
1da177e4 1298 set_buffer_new(bh_result);
1da177e4 1299
207d0416 1300 if (imap.br_startblock == DELAYSTARTBLOCK) {
1da177e4
LT
1301 BUG_ON(direct);
1302 if (create) {
1303 set_buffer_uptodate(bh_result);
1304 set_buffer_mapped(bh_result);
1305 set_buffer_delay(bh_result);
1306 }
1307 }
1308
2b8f12b7
CH
1309 /*
1310 * If this is O_DIRECT or the mpage code calling tell them how large
1311 * the mapping is, so that we can avoid repeated get_blocks calls.
1312 */
c2536668 1313 if (direct || size > (1 << inode->i_blkbits)) {
2b8f12b7
CH
1314 xfs_off_t mapping_size;
1315
1316 mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
1317 mapping_size <<= inode->i_blkbits;
1318
1319 ASSERT(mapping_size > 0);
1320 if (mapping_size > size)
1321 mapping_size = size;
1322 if (mapping_size > LONG_MAX)
1323 mapping_size = LONG_MAX;
1324
1325 bh_result->b_size = mapping_size;
1da177e4
LT
1326 }
1327
1328 return 0;
a206c817
CH
1329
1330out_unlock:
1331 xfs_iunlock(ip, lockmode);
1332 return -error;
1da177e4
LT
1333}
1334
1335int
c2536668 1336xfs_get_blocks(
1da177e4
LT
1337 struct inode *inode,
1338 sector_t iblock,
1339 struct buffer_head *bh_result,
1340 int create)
1341{
f2bde9b8 1342 return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
1da177e4
LT
1343}
1344
1345STATIC int
e4c573bb 1346xfs_get_blocks_direct(
1da177e4
LT
1347 struct inode *inode,
1348 sector_t iblock,
1da177e4
LT
1349 struct buffer_head *bh_result,
1350 int create)
1351{
f2bde9b8 1352 return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
1da177e4
LT
1353}
1354
209fb87a
CH
1355/*
1356 * Complete a direct I/O write request.
1357 *
1358 * If the private argument is non-NULL __xfs_get_blocks signals us that we
1359 * need to issue a transaction to convert the range from unwritten to written
1360 * extents. In case this is regular synchronous I/O we just call xfs_end_io
25985edc 1361 * to do this and we are done. But in case this was a successful AIO
209fb87a
CH
1362 * request this handler is called from interrupt context, from which we
1363 * can't start transactions. In that case offload the I/O completion to
1364 * the workqueues we also use for buffered I/O completion.
1365 */
f0973863 1366STATIC void
209fb87a
CH
1367xfs_end_io_direct_write(
1368 struct kiocb *iocb,
1369 loff_t offset,
1370 ssize_t size,
1371 void *private,
1372 int ret,
1373 bool is_async)
f0973863 1374{
209fb87a 1375 struct xfs_ioend *ioend = iocb->private;
f0973863 1376
2813d682
CH
1377 /*
1378 * While the generic direct I/O code updates the inode size, it does
1379 * so only after the end_io handler is called, which means our
1380 * end_io handler thinks the on-disk size is outside the in-core
1381 * size. To prevent this just update it a little bit earlier here.
1382 */
1383 if (offset + size > i_size_read(ioend->io_inode))
1384 i_size_write(ioend->io_inode, offset + size);
1385
f0973863 1386 /*
209fb87a
CH
1387 * blockdev_direct_IO can return an error even after the I/O
1388 * completion handler was called. Thus we need to protect
1389 * against double-freeing.
f0973863 1390 */
209fb87a
CH
1391 iocb->private = NULL;
1392
ba87ea69
LM
1393 ioend->io_offset = offset;
1394 ioend->io_size = size;
c859cdd1
CH
1395 ioend->io_iocb = iocb;
1396 ioend->io_result = ret;
209fb87a 1397 if (private && size > 0)
0d882a36 1398 ioend->io_type = XFS_IO_UNWRITTEN;
209fb87a
CH
1399
1400 if (is_async) {
c859cdd1 1401 ioend->io_isasync = 1;
209fb87a 1402 xfs_finish_ioend(ioend);
f0973863 1403 } else {
209fb87a 1404 xfs_finish_ioend_sync(ioend);
f0973863 1405 }
f0973863
CH
1406}
1407
1da177e4 1408STATIC ssize_t
e4c573bb 1409xfs_vm_direct_IO(
1da177e4
LT
1410 int rw,
1411 struct kiocb *iocb,
1412 const struct iovec *iov,
1413 loff_t offset,
1414 unsigned long nr_segs)
1415{
209fb87a
CH
1416 struct inode *inode = iocb->ki_filp->f_mapping->host;
1417 struct block_device *bdev = xfs_find_bdev_for_inode(inode);
281627df 1418 struct xfs_ioend *ioend = NULL;
209fb87a
CH
1419 ssize_t ret;
1420
1421 if (rw & WRITE) {
281627df
CH
1422 size_t size = iov_length(iov, nr_segs);
1423
1424 /*
437a255a
DC
1425 * We cannot preallocate a size update transaction here as we
1426 * don't know whether allocation is necessary or not. Hence we
1427 * can only tell IO completion that one is necessary if we are
1428 * not doing unwritten extent conversion.
281627df 1429 */
0d882a36 1430 iocb->private = ioend = xfs_alloc_ioend(inode, XFS_IO_DIRECT);
437a255a 1431 if (offset + size > XFS_I(inode)->i_d.di_size)
281627df 1432 ioend->io_isdirect = 1;
209fb87a 1433
eafdc7d1
CH
1434 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1435 offset, nr_segs,
1436 xfs_get_blocks_direct,
1437 xfs_end_io_direct_write, NULL, 0);
209fb87a 1438 if (ret != -EIOCBQUEUED && iocb->private)
437a255a 1439 goto out_destroy_ioend;
209fb87a 1440 } else {
eafdc7d1
CH
1441 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1442 offset, nr_segs,
1443 xfs_get_blocks_direct,
1444 NULL, NULL, 0);
209fb87a 1445 }
f0973863 1446
f0973863 1447 return ret;
281627df 1448
281627df
CH
1449out_destroy_ioend:
1450 xfs_destroy_ioend(ioend);
1451 return ret;
1da177e4
LT
1452}
1453
d3bc815a
DC
1454/*
1455 * Punch out the delalloc blocks we have already allocated.
1456 *
1457 * Don't bother with xfs_setattr given that nothing can have made it to disk yet
1458 * as the page is still locked at this point.
1459 */
1460STATIC void
1461xfs_vm_kill_delalloc_range(
1462 struct inode *inode,
1463 loff_t start,
1464 loff_t end)
1465{
1466 struct xfs_inode *ip = XFS_I(inode);
1467 xfs_fileoff_t start_fsb;
1468 xfs_fileoff_t end_fsb;
1469 int error;
1470
1471 start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
1472 end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
1473 if (end_fsb <= start_fsb)
1474 return;
1475
1476 xfs_ilock(ip, XFS_ILOCK_EXCL);
1477 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1478 end_fsb - start_fsb);
1479 if (error) {
1480 /* something screwed, just bail */
1481 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1482 xfs_alert(ip->i_mount,
1483 "xfs_vm_write_failed: unable to clean up ino %lld",
1484 ip->i_ino);
1485 }
1486 }
1487 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1488}
1489
fa9b227e
CH
1490STATIC void
1491xfs_vm_write_failed(
d3bc815a
DC
1492 struct inode *inode,
1493 struct page *page,
1494 loff_t pos,
1495 unsigned len)
fa9b227e 1496{
d3bc815a
DC
1497 loff_t block_offset = pos & PAGE_MASK;
1498 loff_t block_start;
1499 loff_t block_end;
1500 loff_t from = pos & (PAGE_CACHE_SIZE - 1);
1501 loff_t to = from + len;
1502 struct buffer_head *bh, *head;
fa9b227e 1503
d3bc815a 1504 ASSERT(block_offset + from == pos);
c726de44 1505
d3bc815a
DC
1506 head = page_buffers(page);
1507 block_start = 0;
1508 for (bh = head; bh != head || !block_start;
1509 bh = bh->b_this_page, block_start = block_end,
1510 block_offset += bh->b_size) {
1511 block_end = block_start + bh->b_size;
c726de44 1512
d3bc815a
DC
1513 /* skip buffers before the write */
1514 if (block_end <= from)
1515 continue;
1516
1517 /* if the buffer is after the write, we're done */
1518 if (block_start >= to)
1519 break;
1520
1521 if (!buffer_delay(bh))
1522 continue;
1523
1524 if (!buffer_new(bh) && block_offset < i_size_read(inode))
1525 continue;
1526
1527 xfs_vm_kill_delalloc_range(inode, block_offset,
1528 block_offset + bh->b_size);
fa9b227e 1529 }
d3bc815a 1530
fa9b227e
CH
1531}
1532
d3bc815a
DC
1533/*
1534 * This used to call block_write_begin(), but it unlocks and releases the page
1535 * on error, and we need that page to be able to punch stale delalloc blocks out
1536 * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
1537 * the appropriate point.
1538 */
f51623b2 1539STATIC int
d79689c7 1540xfs_vm_write_begin(
f51623b2 1541 struct file *file,
d79689c7
NP
1542 struct address_space *mapping,
1543 loff_t pos,
1544 unsigned len,
1545 unsigned flags,
1546 struct page **pagep,
1547 void **fsdata)
f51623b2 1548{
d3bc815a
DC
1549 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1550 struct page *page;
1551 int status;
155130a4 1552
d3bc815a
DC
1553 ASSERT(len <= PAGE_CACHE_SIZE);
1554
1555 page = grab_cache_page_write_begin(mapping, index,
1556 flags | AOP_FLAG_NOFS);
1557 if (!page)
1558 return -ENOMEM;
1559
1560 status = __block_write_begin(page, pos, len, xfs_get_blocks);
1561 if (unlikely(status)) {
1562 struct inode *inode = mapping->host;
1563
1564 xfs_vm_write_failed(inode, page, pos, len);
1565 unlock_page(page);
1566
1567 if (pos + len > i_size_read(inode))
1568 truncate_pagecache(inode, pos + len, i_size_read(inode));
1569
1570 page_cache_release(page);
1571 page = NULL;
1572 }
1573
1574 *pagep = page;
1575 return status;
fa9b227e
CH
1576}
1577
d3bc815a
DC
1578/*
1579 * On failure, we only need to kill delalloc blocks beyond EOF because they
1580 * will never be written. For blocks within EOF, generic_write_end() zeros them
1581 * so they are safe to leave alone and be written with all the other valid data.
1582 */
fa9b227e
CH
1583STATIC int
1584xfs_vm_write_end(
1585 struct file *file,
1586 struct address_space *mapping,
1587 loff_t pos,
1588 unsigned len,
1589 unsigned copied,
1590 struct page *page,
1591 void *fsdata)
1592{
1593 int ret;
155130a4 1594
d3bc815a
DC
1595 ASSERT(len <= PAGE_CACHE_SIZE);
1596
fa9b227e 1597 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
d3bc815a
DC
1598 if (unlikely(ret < len)) {
1599 struct inode *inode = mapping->host;
1600 size_t isize = i_size_read(inode);
1601 loff_t to = pos + len;
1602
1603 if (to > isize) {
1604 truncate_pagecache(inode, to, isize);
1605 xfs_vm_kill_delalloc_range(inode, isize, to);
1606 }
1607 }
155130a4 1608 return ret;
f51623b2 1609}
1da177e4
LT
1610
1611STATIC sector_t
e4c573bb 1612xfs_vm_bmap(
1da177e4
LT
1613 struct address_space *mapping,
1614 sector_t block)
1615{
1616 struct inode *inode = (struct inode *)mapping->host;
739bfb2a 1617 struct xfs_inode *ip = XFS_I(inode);
1da177e4 1618
cca28fb8 1619 trace_xfs_vm_bmap(XFS_I(inode));
126468b1 1620 xfs_ilock(ip, XFS_IOLOCK_SHARED);
4bc1ea6b 1621 filemap_write_and_wait(mapping);
126468b1 1622 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
c2536668 1623 return generic_block_bmap(mapping, block, xfs_get_blocks);
1da177e4
LT
1624}
1625
1626STATIC int
e4c573bb 1627xfs_vm_readpage(
1da177e4
LT
1628 struct file *unused,
1629 struct page *page)
1630{
c2536668 1631 return mpage_readpage(page, xfs_get_blocks);
1da177e4
LT
1632}
1633
1634STATIC int
e4c573bb 1635xfs_vm_readpages(
1da177e4
LT
1636 struct file *unused,
1637 struct address_space *mapping,
1638 struct list_head *pages,
1639 unsigned nr_pages)
1640{
c2536668 1641 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1da177e4
LT
1642}
1643
f5e54d6e 1644const struct address_space_operations xfs_address_space_operations = {
e4c573bb
NS
1645 .readpage = xfs_vm_readpage,
1646 .readpages = xfs_vm_readpages,
1647 .writepage = xfs_vm_writepage,
7d4fb40a 1648 .writepages = xfs_vm_writepages,
238f4c54
NS
1649 .releasepage = xfs_vm_releasepage,
1650 .invalidatepage = xfs_vm_invalidatepage,
d79689c7 1651 .write_begin = xfs_vm_write_begin,
fa9b227e 1652 .write_end = xfs_vm_write_end,
e4c573bb
NS
1653 .bmap = xfs_vm_bmap,
1654 .direct_IO = xfs_vm_direct_IO,
e965f963 1655 .migratepage = buffer_migrate_page,
bddaafa1 1656 .is_partially_uptodate = block_is_partially_uptodate,
aa261f54 1657 .error_remove_page = generic_error_remove_page,
1da177e4 1658};