xfs: kill XBF_DONTBLOCK
[linux-2.6-block.git] / fs / xfs / xfs_trans_buf.c
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
a844f451 19#include "xfs_fs.h"
1da177e4 20#include "xfs_types.h"
a844f451 21#include "xfs_bit.h"
1da177e4 22#include "xfs_log.h"
a844f451 23#include "xfs_inum.h"
1da177e4 24#include "xfs_trans.h"
1da177e4
LT
25#include "xfs_sb.h"
26#include "xfs_ag.h"
1da177e4 27#include "xfs_mount.h"
a844f451
NS
28#include "xfs_bmap_btree.h"
29#include "xfs_alloc_btree.h"
30#include "xfs_ialloc_btree.h"
a844f451
NS
31#include "xfs_dinode.h"
32#include "xfs_inode.h"
33#include "xfs_buf_item.h"
1da177e4
LT
34#include "xfs_trans_priv.h"
35#include "xfs_error.h"
36#include "xfs_rw.h"
0b1b213f 37#include "xfs_trace.h"
1da177e4 38
4a5224d7
CH
39/*
40 * Check to see if a buffer matching the given parameters is already
41 * a part of the given transaction.
42 */
43STATIC struct xfs_buf *
44xfs_trans_buf_item_match(
45 struct xfs_trans *tp,
46 struct xfs_buftarg *target,
47 xfs_daddr_t blkno,
48 int len)
49{
e98c414f
CH
50 struct xfs_log_item_desc *lidp;
51 struct xfs_buf_log_item *blip;
1da177e4 52
4a5224d7 53 len = BBTOB(len);
e98c414f
CH
54 list_for_each_entry(lidp, &tp->t_items, lid_trans) {
55 blip = (struct xfs_buf_log_item *)lidp->lid_item;
56 if (blip->bli_item.li_type == XFS_LI_BUF &&
49074c06 57 blip->bli_buf->b_target == target &&
e98c414f 58 XFS_BUF_ADDR(blip->bli_buf) == blkno &&
aa0e8833 59 BBTOB(blip->bli_buf->b_length) == len)
e98c414f 60 return blip->bli_buf;
4a5224d7
CH
61 }
62
63 return NULL;
64}
1da177e4 65
d7e84f41
CH
66/*
67 * Add the locked buffer to the transaction.
68 *
69 * The buffer must be locked, and it cannot be associated with any
70 * transaction.
71 *
72 * If the buffer does not yet have a buf log item associated with it,
73 * then allocate one for it. Then add the buf item to the transaction.
74 */
75STATIC void
76_xfs_trans_bjoin(
77 struct xfs_trans *tp,
78 struct xfs_buf *bp,
79 int reset_recur)
80{
81 struct xfs_buf_log_item *bip;
82
bf9d9013 83 ASSERT(bp->b_transp == NULL);
d7e84f41
CH
84
85 /*
86 * The xfs_buf_log_item pointer is stored in b_fsprivate. If
87 * it doesn't have one yet, then allocate one and initialize it.
88 * The checks to see if one is there are in xfs_buf_item_init().
89 */
90 xfs_buf_item_init(bp, tp->t_mountp);
adadbeef 91 bip = bp->b_fspriv;
d7e84f41 92 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
c1155410 93 ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
d7e84f41
CH
94 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
95 if (reset_recur)
96 bip->bli_recur = 0;
97
98 /*
99 * Take a reference for this transaction on the buf item.
100 */
101 atomic_inc(&bip->bli_refcount);
102
103 /*
104 * Get a log_item_desc to point at the new item.
105 */
e98c414f 106 xfs_trans_add_item(tp, &bip->bli_item);
d7e84f41
CH
107
108 /*
109 * Initialize b_fsprivate2 so we can find it with incore_match()
110 * in xfs_trans_get_buf() and friends above.
111 */
bf9d9013 112 bp->b_transp = tp;
d7e84f41
CH
113
114}
115
116void
117xfs_trans_bjoin(
118 struct xfs_trans *tp,
119 struct xfs_buf *bp)
120{
121 _xfs_trans_bjoin(tp, bp, 0);
122 trace_xfs_trans_bjoin(bp->b_fspriv);
123}
1da177e4
LT
124
125/*
126 * Get and lock the buffer for the caller if it is not already
127 * locked within the given transaction. If it is already locked
128 * within the transaction, just increment its lock recursion count
129 * and return a pointer to it.
130 *
1da177e4
LT
131 * If the transaction pointer is NULL, make this just a normal
132 * get_buf() call.
133 */
134xfs_buf_t *
135xfs_trans_get_buf(xfs_trans_t *tp,
136 xfs_buftarg_t *target_dev,
137 xfs_daddr_t blkno,
138 int len,
139 uint flags)
140{
141 xfs_buf_t *bp;
142 xfs_buf_log_item_t *bip;
143
144 if (flags == 0)
a8acad70 145 flags = XBF_MAPPED;
1da177e4
LT
146
147 /*
148 * Default to a normal get_buf() call if the tp is NULL.
149 */
6ad112bf 150 if (tp == NULL)
aa5c158e 151 return xfs_buf_get(target_dev, blkno, len, flags);
1da177e4
LT
152
153 /*
154 * If we find the buffer in the cache with this transaction
155 * pointer in its b_fsprivate2 field, then we know we already
156 * have it locked. In this case we just increment the lock
157 * recursion count and return the buffer to the caller.
158 */
4a5224d7 159 bp = xfs_trans_buf_item_match(tp, target_dev, blkno, len);
1da177e4 160 if (bp != NULL) {
0c842ad4 161 ASSERT(xfs_buf_islocked(bp));
c867cb61
CH
162 if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
163 xfs_buf_stale(bp);
c867cb61
CH
164 XFS_BUF_DONE(bp);
165 }
0b1b213f 166
bf9d9013 167 ASSERT(bp->b_transp == tp);
adadbeef 168 bip = bp->b_fspriv;
1da177e4
LT
169 ASSERT(bip != NULL);
170 ASSERT(atomic_read(&bip->bli_refcount) > 0);
171 bip->bli_recur++;
0b1b213f 172 trace_xfs_trans_get_buf_recur(bip);
1da177e4
LT
173 return (bp);
174 }
175
aa5c158e 176 bp = xfs_buf_get(target_dev, blkno, len, flags);
1da177e4
LT
177 if (bp == NULL) {
178 return NULL;
179 }
180
5a52c2a5 181 ASSERT(!bp->b_error);
1da177e4 182
d7e84f41
CH
183 _xfs_trans_bjoin(tp, bp, 1);
184 trace_xfs_trans_get_buf(bp->b_fspriv);
1da177e4
LT
185 return (bp);
186}
187
188/*
189 * Get and lock the superblock buffer of this file system for the
190 * given transaction.
191 *
192 * We don't need to use incore_match() here, because the superblock
193 * buffer is a private buffer which we keep a pointer to in the
194 * mount structure.
195 */
196xfs_buf_t *
197xfs_trans_getsb(xfs_trans_t *tp,
198 struct xfs_mount *mp,
199 int flags)
200{
201 xfs_buf_t *bp;
202 xfs_buf_log_item_t *bip;
203
204 /*
205 * Default to just trying to lock the superblock buffer
206 * if tp is NULL.
207 */
208 if (tp == NULL) {
209 return (xfs_getsb(mp, flags));
210 }
211
212 /*
213 * If the superblock buffer already has this transaction
214 * pointer in its b_fsprivate2 field, then we know we already
215 * have it locked. In this case we just increment the lock
216 * recursion count and return the buffer to the caller.
217 */
218 bp = mp->m_sb_bp;
bf9d9013 219 if (bp->b_transp == tp) {
adadbeef 220 bip = bp->b_fspriv;
1da177e4
LT
221 ASSERT(bip != NULL);
222 ASSERT(atomic_read(&bip->bli_refcount) > 0);
223 bip->bli_recur++;
0b1b213f 224 trace_xfs_trans_getsb_recur(bip);
1da177e4
LT
225 return (bp);
226 }
227
228 bp = xfs_getsb(mp, flags);
d7e84f41 229 if (bp == NULL)
1da177e4 230 return NULL;
1da177e4 231
d7e84f41
CH
232 _xfs_trans_bjoin(tp, bp, 1);
233 trace_xfs_trans_getsb(bp->b_fspriv);
1da177e4
LT
234 return (bp);
235}
236
237#ifdef DEBUG
238xfs_buftarg_t *xfs_error_target;
239int xfs_do_error;
240int xfs_req_num;
241int xfs_error_mod = 33;
242#endif
243
244/*
245 * Get and lock the buffer for the caller if it is not already
246 * locked within the given transaction. If it has not yet been
247 * read in, read it from disk. If it is already locked
248 * within the transaction and already read in, just increment its
249 * lock recursion count and return a pointer to it.
250 *
1da177e4
LT
251 * If the transaction pointer is NULL, make this just a normal
252 * read_buf() call.
253 */
254int
255xfs_trans_read_buf(
256 xfs_mount_t *mp,
257 xfs_trans_t *tp,
258 xfs_buftarg_t *target,
259 xfs_daddr_t blkno,
260 int len,
261 uint flags,
262 xfs_buf_t **bpp)
263{
264 xfs_buf_t *bp;
265 xfs_buf_log_item_t *bip;
266 int error;
267
7ca790a5
DC
268 *bpp = NULL;
269
1da177e4 270 if (flags == 0)
a8acad70 271 flags = XBF_MAPPED;
1da177e4
LT
272
273 /*
274 * Default to a normal get_buf() call if the tp is NULL.
275 */
276 if (tp == NULL) {
aa5c158e 277 bp = xfs_buf_read(target, blkno, len, flags);
1da177e4 278 if (!bp)
0cadda1c 279 return (flags & XBF_TRYLOCK) ?
a3f74ffb 280 EAGAIN : XFS_ERROR(ENOMEM);
1da177e4 281
5a52c2a5
CS
282 if (bp->b_error) {
283 error = bp->b_error;
901796af 284 xfs_buf_ioerror_alert(bp, __func__);
7ca790a5
DC
285 XFS_BUF_UNDONE(bp);
286 xfs_buf_stale(bp);
1da177e4
LT
287 xfs_buf_relse(bp);
288 return error;
289 }
290#ifdef DEBUG
a0f7bfd3 291 if (xfs_do_error) {
1da177e4
LT
292 if (xfs_error_target == target) {
293 if (((xfs_req_num++) % xfs_error_mod) == 0) {
294 xfs_buf_relse(bp);
0b932ccc 295 xfs_debug(mp, "Returning error!");
1da177e4
LT
296 return XFS_ERROR(EIO);
297 }
298 }
299 }
300#endif
301 if (XFS_FORCED_SHUTDOWN(mp))
302 goto shutdown_abort;
303 *bpp = bp;
304 return 0;
305 }
306
307 /*
308 * If we find the buffer in the cache with this transaction
309 * pointer in its b_fsprivate2 field, then we know we already
310 * have it locked. If it is already read in we just increment
311 * the lock recursion count and return the buffer to the caller.
312 * If the buffer is not yet read in, then we read it in, increment
313 * the lock recursion count, and return it to the caller.
314 */
4a5224d7 315 bp = xfs_trans_buf_item_match(tp, target, blkno, len);
1da177e4 316 if (bp != NULL) {
0c842ad4 317 ASSERT(xfs_buf_islocked(bp));
bf9d9013 318 ASSERT(bp->b_transp == tp);
adadbeef 319 ASSERT(bp->b_fspriv != NULL);
5a52c2a5 320 ASSERT(!bp->b_error);
1da177e4 321 if (!(XFS_BUF_ISDONE(bp))) {
0b1b213f 322 trace_xfs_trans_read_buf_io(bp, _RET_IP_);
1da177e4
LT
323 ASSERT(!XFS_BUF_ISASYNC(bp));
324 XFS_BUF_READ(bp);
325 xfsbdstrat(tp->t_mountp, bp);
1a1a3e97 326 error = xfs_buf_iowait(bp);
d64e31a2 327 if (error) {
901796af 328 xfs_buf_ioerror_alert(bp, __func__);
1da177e4
LT
329 xfs_buf_relse(bp);
330 /*
d64e31a2
DC
331 * We can gracefully recover from most read
332 * errors. Ones we can't are those that happen
333 * after the transaction's already dirty.
1da177e4
LT
334 */
335 if (tp->t_flags & XFS_TRANS_DIRTY)
336 xfs_force_shutdown(tp->t_mountp,
7d04a335 337 SHUTDOWN_META_IO_ERROR);
1da177e4
LT
338 return error;
339 }
340 }
341 /*
342 * We never locked this buf ourselves, so we shouldn't
343 * brelse it either. Just get out.
344 */
345 if (XFS_FORCED_SHUTDOWN(mp)) {
0b1b213f 346 trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
1da177e4
LT
347 *bpp = NULL;
348 return XFS_ERROR(EIO);
349 }
350
351
adadbeef 352 bip = bp->b_fspriv;
1da177e4
LT
353 bip->bli_recur++;
354
355 ASSERT(atomic_read(&bip->bli_refcount) > 0);
0b1b213f 356 trace_xfs_trans_read_buf_recur(bip);
1da177e4
LT
357 *bpp = bp;
358 return 0;
359 }
360
aa5c158e 361 bp = xfs_buf_read(target, blkno, len, flags);
1da177e4
LT
362 if (bp == NULL) {
363 *bpp = NULL;
7401aafd
DC
364 return (flags & XBF_TRYLOCK) ?
365 0 : XFS_ERROR(ENOMEM);
1da177e4 366 }
5a52c2a5
CS
367 if (bp->b_error) {
368 error = bp->b_error;
c867cb61 369 xfs_buf_stale(bp);
c867cb61 370 XFS_BUF_DONE(bp);
901796af 371 xfs_buf_ioerror_alert(bp, __func__);
1da177e4 372 if (tp->t_flags & XFS_TRANS_DIRTY)
7d04a335 373 xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
1da177e4
LT
374 xfs_buf_relse(bp);
375 return error;
376 }
377#ifdef DEBUG
378 if (xfs_do_error && !(tp->t_flags & XFS_TRANS_DIRTY)) {
379 if (xfs_error_target == target) {
380 if (((xfs_req_num++) % xfs_error_mod) == 0) {
381 xfs_force_shutdown(tp->t_mountp,
7d04a335 382 SHUTDOWN_META_IO_ERROR);
1da177e4 383 xfs_buf_relse(bp);
0b932ccc 384 xfs_debug(mp, "Returning trans error!");
1da177e4
LT
385 return XFS_ERROR(EIO);
386 }
387 }
388 }
389#endif
390 if (XFS_FORCED_SHUTDOWN(mp))
391 goto shutdown_abort;
392
d7e84f41
CH
393 _xfs_trans_bjoin(tp, bp, 1);
394 trace_xfs_trans_read_buf(bp->b_fspriv);
1da177e4 395
1da177e4
LT
396 *bpp = bp;
397 return 0;
398
399shutdown_abort:
0b1b213f 400 trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
1da177e4
LT
401 xfs_buf_relse(bp);
402 *bpp = NULL;
403 return XFS_ERROR(EIO);
404}
405
406
407/*
408 * Release the buffer bp which was previously acquired with one of the
409 * xfs_trans_... buffer allocation routines if the buffer has not
410 * been modified within this transaction. If the buffer is modified
411 * within this transaction, do decrement the recursion count but do
412 * not release the buffer even if the count goes to 0. If the buffer is not
413 * modified within the transaction, decrement the recursion count and
414 * release the buffer if the recursion count goes to 0.
415 *
416 * If the buffer is to be released and it was not modified before
417 * this transaction began, then free the buf_log_item associated with it.
418 *
419 * If the transaction pointer is NULL, make this just a normal
420 * brelse() call.
421 */
422void
423xfs_trans_brelse(xfs_trans_t *tp,
424 xfs_buf_t *bp)
425{
426 xfs_buf_log_item_t *bip;
1da177e4
LT
427
428 /*
429 * Default to a normal brelse() call if the tp is NULL.
430 */
431 if (tp == NULL) {
bf9d9013 432 ASSERT(bp->b_transp == NULL);
1da177e4
LT
433 xfs_buf_relse(bp);
434 return;
435 }
436
bf9d9013 437 ASSERT(bp->b_transp == tp);
adadbeef 438 bip = bp->b_fspriv;
1da177e4
LT
439 ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
440 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
c1155410 441 ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
1da177e4
LT
442 ASSERT(atomic_read(&bip->bli_refcount) > 0);
443
0b1b213f
CH
444 trace_xfs_trans_brelse(bip);
445
1da177e4
LT
446 /*
447 * If the release is just for a recursive lock,
448 * then decrement the count and return.
449 */
450 if (bip->bli_recur > 0) {
451 bip->bli_recur--;
1da177e4
LT
452 return;
453 }
454
455 /*
456 * If the buffer is dirty within this transaction, we can't
457 * release it until we commit.
458 */
e98c414f 459 if (bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY)
1da177e4 460 return;
1da177e4
LT
461
462 /*
463 * If the buffer has been invalidated, then we can't release
464 * it until the transaction commits to disk unless it is re-dirtied
465 * as part of this transaction. This prevents us from pulling
466 * the item from the AIL before we should.
467 */
0b1b213f 468 if (bip->bli_flags & XFS_BLI_STALE)
1da177e4 469 return;
1da177e4
LT
470
471 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
1da177e4
LT
472
473 /*
474 * Free up the log item descriptor tracking the released item.
475 */
e98c414f 476 xfs_trans_del_item(&bip->bli_item);
1da177e4
LT
477
478 /*
479 * Clear the hold flag in the buf log item if it is set.
480 * We wouldn't want the next user of the buffer to
481 * get confused.
482 */
483 if (bip->bli_flags & XFS_BLI_HOLD) {
484 bip->bli_flags &= ~XFS_BLI_HOLD;
485 }
486
487 /*
488 * Drop our reference to the buf log item.
489 */
490 atomic_dec(&bip->bli_refcount);
491
492 /*
493 * If the buf item is not tracking data in the log, then
494 * we must free it before releasing the buffer back to the
495 * free pool. Before releasing the buffer to the free pool,
496 * clear the transaction pointer in b_fsprivate2 to dissolve
497 * its relation to this transaction.
498 */
499 if (!xfs_buf_item_dirty(bip)) {
500/***
501 ASSERT(bp->b_pincount == 0);
502***/
503 ASSERT(atomic_read(&bip->bli_refcount) == 0);
504 ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
505 ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF));
506 xfs_buf_item_relse(bp);
1da177e4
LT
507 }
508
5b03ff1b 509 bp->b_transp = NULL;
1da177e4 510 xfs_buf_relse(bp);
1da177e4
LT
511}
512
1da177e4
LT
513/*
514 * Mark the buffer as not needing to be unlocked when the buf item's
515 * IOP_UNLOCK() routine is called. The buffer must already be locked
516 * and associated with the given transaction.
517 */
518/* ARGSUSED */
519void
520xfs_trans_bhold(xfs_trans_t *tp,
521 xfs_buf_t *bp)
522{
adadbeef 523 xfs_buf_log_item_t *bip = bp->b_fspriv;
1da177e4 524
bf9d9013 525 ASSERT(bp->b_transp == tp);
adadbeef 526 ASSERT(bip != NULL);
1da177e4 527 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
c1155410 528 ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
1da177e4 529 ASSERT(atomic_read(&bip->bli_refcount) > 0);
adadbeef 530
1da177e4 531 bip->bli_flags |= XFS_BLI_HOLD;
0b1b213f 532 trace_xfs_trans_bhold(bip);
1da177e4
LT
533}
534
efa092f3
TS
535/*
536 * Cancel the previous buffer hold request made on this buffer
537 * for this transaction.
538 */
539void
540xfs_trans_bhold_release(xfs_trans_t *tp,
541 xfs_buf_t *bp)
542{
adadbeef 543 xfs_buf_log_item_t *bip = bp->b_fspriv;
efa092f3 544
bf9d9013 545 ASSERT(bp->b_transp == tp);
adadbeef 546 ASSERT(bip != NULL);
efa092f3 547 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
c1155410 548 ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
efa092f3
TS
549 ASSERT(atomic_read(&bip->bli_refcount) > 0);
550 ASSERT(bip->bli_flags & XFS_BLI_HOLD);
0b1b213f 551
adadbeef 552 bip->bli_flags &= ~XFS_BLI_HOLD;
0b1b213f 553 trace_xfs_trans_bhold_release(bip);
efa092f3
TS
554}
555
1da177e4
LT
556/*
557 * This is called to mark bytes first through last inclusive of the given
558 * buffer as needing to be logged when the transaction is committed.
559 * The buffer must already be associated with the given transaction.
560 *
561 * First and last are numbers relative to the beginning of this buffer,
562 * so the first byte in the buffer is numbered 0 regardless of the
563 * value of b_blkno.
564 */
565void
566xfs_trans_log_buf(xfs_trans_t *tp,
567 xfs_buf_t *bp,
568 uint first,
569 uint last)
570{
adadbeef 571 xfs_buf_log_item_t *bip = bp->b_fspriv;
1da177e4 572
bf9d9013 573 ASSERT(bp->b_transp == tp);
adadbeef 574 ASSERT(bip != NULL);
aa0e8833 575 ASSERT(first <= last && last < BBTOB(bp->b_length));
cb669ca5
CH
576 ASSERT(bp->b_iodone == NULL ||
577 bp->b_iodone == xfs_buf_iodone_callbacks);
1da177e4
LT
578
579 /*
580 * Mark the buffer as needing to be written out eventually,
581 * and set its iodone function to remove the buffer's buf log
582 * item from the AIL and free it when the buffer is flushed
583 * to disk. See xfs_buf_attach_iodone() for more details
584 * on li_cb and xfs_buf_iodone_callbacks().
585 * If we end up aborting this transaction, we trap this buffer
586 * inside the b_bdstrat callback so that this won't get written to
587 * disk.
588 */
1da177e4
LT
589 XFS_BUF_DONE(bp);
590
1da177e4 591 ASSERT(atomic_read(&bip->bli_refcount) > 0);
cb669ca5 592 bp->b_iodone = xfs_buf_iodone_callbacks;
ca30b2a7 593 bip->bli_item.li_cb = xfs_buf_iodone;
1da177e4 594
0b1b213f
CH
595 trace_xfs_trans_log_buf(bip);
596
1da177e4
LT
597 /*
598 * If we invalidated the buffer within this transaction, then
599 * cancel the invalidation now that we're dirtying the buffer
600 * again. There are no races with the code in xfs_buf_item_unpin(),
601 * because we have a reference to the buffer this entire time.
602 */
603 if (bip->bli_flags & XFS_BLI_STALE) {
1da177e4
LT
604 bip->bli_flags &= ~XFS_BLI_STALE;
605 ASSERT(XFS_BUF_ISSTALE(bp));
606 XFS_BUF_UNSTALE(bp);
c1155410 607 bip->bli_format.blf_flags &= ~XFS_BLF_CANCEL;
1da177e4
LT
608 }
609
1da177e4 610 tp->t_flags |= XFS_TRANS_DIRTY;
e98c414f 611 bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
1da177e4
LT
612 bip->bli_flags |= XFS_BLI_LOGGED;
613 xfs_buf_item_log(bip, first, last);
1da177e4
LT
614}
615
616
617/*
43ff2122
CH
618 * Invalidate a buffer that is being used within a transaction.
619 *
620 * Typically this is because the blocks in the buffer are being freed, so we
621 * need to prevent it from being written out when we're done. Allowing it
622 * to be written again might overwrite data in the free blocks if they are
623 * reallocated to a file.
1da177e4 624 *
43ff2122
CH
625 * We prevent the buffer from being written out by marking it stale. We can't
626 * get rid of the buf log item at this point because the buffer may still be
627 * pinned by another transaction. If that is the case, then we'll wait until
628 * the buffer is committed to disk for the last time (we can tell by the ref
629 * count) and free it in xfs_buf_item_unpin(). Until that happens we will
630 * keep the buffer locked so that the buffer and buf log item are not reused.
631 *
632 * We also set the XFS_BLF_CANCEL flag in the buf log format structure and log
633 * the buf item. This will be used at recovery time to determine that copies
634 * of the buffer in the log before this should not be replayed.
635 *
636 * We mark the item descriptor and the transaction dirty so that we'll hold
637 * the buffer until after the commit.
638 *
639 * Since we're invalidating the buffer, we also clear the state about which
640 * parts of the buffer have been logged. We also clear the flag indicating
641 * that this is an inode buffer since the data in the buffer will no longer
642 * be valid.
643 *
644 * We set the stale bit in the buffer as well since we're getting rid of it.
1da177e4
LT
645 */
646void
647xfs_trans_binval(
648 xfs_trans_t *tp,
649 xfs_buf_t *bp)
650{
adadbeef 651 xfs_buf_log_item_t *bip = bp->b_fspriv;
1da177e4 652
bf9d9013 653 ASSERT(bp->b_transp == tp);
adadbeef 654 ASSERT(bip != NULL);
1da177e4
LT
655 ASSERT(atomic_read(&bip->bli_refcount) > 0);
656
0b1b213f
CH
657 trace_xfs_trans_binval(bip);
658
1da177e4
LT
659 if (bip->bli_flags & XFS_BLI_STALE) {
660 /*
661 * If the buffer is already invalidated, then
662 * just return.
663 */
1da177e4
LT
664 ASSERT(XFS_BUF_ISSTALE(bp));
665 ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
c1155410
DC
666 ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_INODE_BUF));
667 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
e98c414f 668 ASSERT(bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY);
1da177e4 669 ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
1da177e4
LT
670 return;
671 }
672
c867cb61 673 xfs_buf_stale(bp);
43ff2122 674
1da177e4 675 bip->bli_flags |= XFS_BLI_STALE;
ccf7c23f 676 bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY);
c1155410
DC
677 bip->bli_format.blf_flags &= ~XFS_BLF_INODE_BUF;
678 bip->bli_format.blf_flags |= XFS_BLF_CANCEL;
1da177e4
LT
679 memset((char *)(bip->bli_format.blf_data_map), 0,
680 (bip->bli_format.blf_map_size * sizeof(uint)));
e98c414f 681 bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
1da177e4 682 tp->t_flags |= XFS_TRANS_DIRTY;
1da177e4
LT
683}
684
685/*
ccf7c23f
DC
686 * This call is used to indicate that the buffer contains on-disk inodes which
687 * must be handled specially during recovery. They require special handling
688 * because only the di_next_unlinked from the inodes in the buffer should be
689 * recovered. The rest of the data in the buffer is logged via the inodes
690 * themselves.
1da177e4 691 *
ccf7c23f
DC
692 * All we do is set the XFS_BLI_INODE_BUF flag in the items flags so it can be
693 * transferred to the buffer's log format structure so that we'll know what to
694 * do at recovery time.
1da177e4 695 */
1da177e4
LT
696void
697xfs_trans_inode_buf(
698 xfs_trans_t *tp,
699 xfs_buf_t *bp)
700{
adadbeef 701 xfs_buf_log_item_t *bip = bp->b_fspriv;
1da177e4 702
bf9d9013 703 ASSERT(bp->b_transp == tp);
adadbeef 704 ASSERT(bip != NULL);
1da177e4
LT
705 ASSERT(atomic_read(&bip->bli_refcount) > 0);
706
ccf7c23f 707 bip->bli_flags |= XFS_BLI_INODE_BUF;
1da177e4
LT
708}
709
710/*
711 * This call is used to indicate that the buffer is going to
712 * be staled and was an inode buffer. This means it gets
713 * special processing during unpin - where any inodes
714 * associated with the buffer should be removed from ail.
715 * There is also special processing during recovery,
716 * any replay of the inodes in the buffer needs to be
717 * prevented as the buffer may have been reused.
718 */
719void
720xfs_trans_stale_inode_buf(
721 xfs_trans_t *tp,
722 xfs_buf_t *bp)
723{
adadbeef 724 xfs_buf_log_item_t *bip = bp->b_fspriv;
1da177e4 725
bf9d9013 726 ASSERT(bp->b_transp == tp);
adadbeef 727 ASSERT(bip != NULL);
1da177e4
LT
728 ASSERT(atomic_read(&bip->bli_refcount) > 0);
729
730 bip->bli_flags |= XFS_BLI_STALE_INODE;
ca30b2a7 731 bip->bli_item.li_cb = xfs_buf_iodone;
1da177e4
LT
732}
733
1da177e4
LT
734/*
735 * Mark the buffer as being one which contains newly allocated
736 * inodes. We need to make sure that even if this buffer is
737 * relogged as an 'inode buf' we still recover all of the inode
738 * images in the face of a crash. This works in coordination with
739 * xfs_buf_item_committed() to ensure that the buffer remains in the
740 * AIL at its original location even after it has been relogged.
741 */
742/* ARGSUSED */
743void
744xfs_trans_inode_alloc_buf(
745 xfs_trans_t *tp,
746 xfs_buf_t *bp)
747{
adadbeef 748 xfs_buf_log_item_t *bip = bp->b_fspriv;
1da177e4 749
bf9d9013 750 ASSERT(bp->b_transp == tp);
adadbeef 751 ASSERT(bip != NULL);
1da177e4
LT
752 ASSERT(atomic_read(&bip->bli_refcount) > 0);
753
754 bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
755}
756
757
758/*
759 * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of
760 * dquots. However, unlike in inode buffer recovery, dquot buffers get
761 * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag).
762 * The only thing that makes dquot buffers different from regular
763 * buffers is that we must not replay dquot bufs when recovering
764 * if a _corresponding_ quotaoff has happened. We also have to distinguish
765 * between usr dquot bufs and grp dquot bufs, because usr and grp quotas
766 * can be turned off independently.
767 */
768/* ARGSUSED */
769void
770xfs_trans_dquot_buf(
771 xfs_trans_t *tp,
772 xfs_buf_t *bp,
773 uint type)
774{
adadbeef 775 xfs_buf_log_item_t *bip = bp->b_fspriv;
1da177e4 776
bf9d9013 777 ASSERT(bp->b_transp == tp);
adadbeef 778 ASSERT(bip != NULL);
c1155410
DC
779 ASSERT(type == XFS_BLF_UDQUOT_BUF ||
780 type == XFS_BLF_PDQUOT_BUF ||
781 type == XFS_BLF_GDQUOT_BUF);
1da177e4
LT
782 ASSERT(atomic_read(&bip->bli_refcount) > 0);
783
784 bip->bli_format.blf_flags |= type;
785}