1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
17 #include "xfs_inode.h"
18 #include "xfs_btree.h"
19 #include "xfs_trans.h"
20 #include "xfs_alloc.h"
22 #include "xfs_bmap_util.h"
23 #include "xfs_bmap_btree.h"
24 #include "xfs_rtalloc.h"
25 #include "xfs_errortag.h"
26 #include "xfs_error.h"
27 #include "xfs_quota.h"
28 #include "xfs_trans_space.h"
29 #include "xfs_buf_item.h"
30 #include "xfs_trace.h"
31 #include "xfs_attr_leaf.h"
32 #include "xfs_filestream.h"
35 #include "xfs_ag_resv.h"
36 #include "xfs_refcount.h"
37 #include "xfs_icache.h"
38 #include "xfs_iomap.h"
40 struct kmem_cache *xfs_bmap_intent_cache;
43 * Miscellaneous helper functions
47 * Compute and fill in the value of the maximum depth of a bmap btree
48 * in this filesystem. Done once, during mount.
51 xfs_bmap_compute_maxlevels(
52 xfs_mount_t *mp, /* file system mount structure */
53 int whichfork) /* data or attr fork */
55 int level; /* btree level */
56 uint maxblocks; /* max blocks at this level */
57 uint maxleafents; /* max leaf entries possible */
58 int maxrootrecs; /* max records in root block */
59 int minleafrecs; /* min records in leaf block */
60 int minnoderecs; /* min records in node block */
61 int sz; /* root block size */
64 * The maximum number of extents in a file, hence the maximum number of
65 * leaf entries, is controlled by the size of the on-disk extent count,
66 * either a signed 32-bit number for the data fork, or a signed 16-bit
67 * number for the attr fork.
69 * Note that we can no longer assume that if we are in ATTR1 that the
70 * fork offset of all the inodes will be
71 * (xfs_default_attroffset(ip) >> 3) because we could have mounted with
72 * ATTR2 and then mounted back with ATTR1, keeping the i_forkoff's fixed
73 * but probably at various positions. Therefore, for both ATTR1 and
74 * ATTR2 we have to assume the worst case scenario of a minimum size
77 if (whichfork == XFS_DATA_FORK) {
78 maxleafents = MAXEXTNUM;
79 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
81 maxleafents = MAXAEXTNUM;
82 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
84 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
85 minleafrecs = mp->m_bmap_dmnr[0];
86 minnoderecs = mp->m_bmap_dmnr[1];
87 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
88 for (level = 1; maxblocks > 1; level++) {
89 if (maxblocks <= maxrootrecs)
92 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
94 mp->m_bm_maxlevels[whichfork] = level;
95 ASSERT(mp->m_bm_maxlevels[whichfork] <= xfs_bmbt_maxlevels_ondisk());
99 xfs_bmap_compute_attr_offset(
100 struct xfs_mount *mp)
102 if (mp->m_sb.sb_inodesize == 256)
103 return XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
104 return XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
107 STATIC int /* error */
109 struct xfs_btree_cur *cur,
110 struct xfs_bmbt_irec *irec,
111 int *stat) /* success/failure */
113 cur->bc_rec.b = *irec;
114 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
117 STATIC int /* error */
118 xfs_bmbt_lookup_first(
119 struct xfs_btree_cur *cur,
120 int *stat) /* success/failure */
122 cur->bc_rec.b.br_startoff = 0;
123 cur->bc_rec.b.br_startblock = 0;
124 cur->bc_rec.b.br_blockcount = 0;
125 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
129 * Check if the inode needs to be converted to btree format.
131 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
133 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
135 return whichfork != XFS_COW_FORK &&
136 ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
137 ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork);
141 * Check if the inode should be converted to extent format.
143 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
145 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
147 return whichfork != XFS_COW_FORK &&
148 ifp->if_format == XFS_DINODE_FMT_BTREE &&
149 ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork);
153 * Update the record referred to by cur to the value given by irec
154 * This either works (return 0) or gets an EFSCORRUPTED error.
158 struct xfs_btree_cur *cur,
159 struct xfs_bmbt_irec *irec)
161 union xfs_btree_rec rec;
163 xfs_bmbt_disk_set_all(&rec.bmbt, irec);
164 return xfs_btree_update(cur, &rec);
168 * Compute the worst-case number of indirect blocks that will be used
169 * for ip's delayed extent of length "len".
172 xfs_bmap_worst_indlen(
173 xfs_inode_t *ip, /* incore inode pointer */
174 xfs_filblks_t len) /* delayed extent length */
176 int level; /* btree level number */
177 int maxrecs; /* maximum record count at this level */
178 xfs_mount_t *mp; /* mount structure */
179 xfs_filblks_t rval; /* return value */
182 maxrecs = mp->m_bmap_dmxr[0];
183 for (level = 0, rval = 0;
184 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
187 do_div(len, maxrecs);
190 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
193 maxrecs = mp->m_bmap_dmxr[1];
199 * Calculate the default attribute fork offset for newly created inodes.
202 xfs_default_attroffset(
203 struct xfs_inode *ip)
205 if (ip->i_df.if_format == XFS_DINODE_FMT_DEV)
206 return roundup(sizeof(xfs_dev_t), 8);
207 return M_IGEO(ip->i_mount)->attr_fork_offset;
211 * Helper routine to reset inode i_forkoff field when switching attribute fork
212 * from local to extent format - we reset it where possible to make space
213 * available for inline data fork extents.
216 xfs_bmap_forkoff_reset(
220 if (whichfork == XFS_ATTR_FORK &&
221 ip->i_df.if_format != XFS_DINODE_FMT_DEV &&
222 ip->i_df.if_format != XFS_DINODE_FMT_BTREE) {
223 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
225 if (dfl_forkoff > ip->i_forkoff)
226 ip->i_forkoff = dfl_forkoff;
231 STATIC struct xfs_buf *
233 struct xfs_btree_cur *cur,
236 struct xfs_log_item *lip;
242 for (i = 0; i < cur->bc_maxlevels; i++) {
243 if (!cur->bc_levels[i].bp)
245 if (xfs_buf_daddr(cur->bc_levels[i].bp) == bno)
246 return cur->bc_levels[i].bp;
249 /* Chase down all the log items to see if the bp is there */
250 list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) {
251 struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip;
253 if (bip->bli_item.li_type == XFS_LI_BUF &&
254 xfs_buf_daddr(bip->bli_buf) == bno)
263 struct xfs_btree_block *block,
269 __be64 *pp, *thispa; /* pointer to block address */
270 xfs_bmbt_key_t *prevp, *keyp;
272 ASSERT(be16_to_cpu(block->bb_level) > 0);
275 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
276 dmxr = mp->m_bmap_dmxr[0];
277 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
280 ASSERT(be64_to_cpu(prevp->br_startoff) <
281 be64_to_cpu(keyp->br_startoff));
286 * Compare the block numbers to see if there are dups.
289 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
291 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
293 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
295 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
297 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
298 if (*thispa == *pp) {
299 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
301 (unsigned long long)be64_to_cpu(*thispa));
302 xfs_err(mp, "%s: ptrs are equal in node\n",
304 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
311 * Check that the extents for the inode ip are in the right order in all
312 * btree leaves. THis becomes prohibitively expensive for large extent count
313 * files, so don't bother with inodes that have more than 10,000 extents in
314 * them. The btree record ordering checks will still be done, so for such large
315 * bmapbt constructs that is going to catch most corruptions.
318 xfs_bmap_check_leaf_extents(
319 struct xfs_btree_cur *cur, /* btree cursor or null */
320 xfs_inode_t *ip, /* incore inode pointer */
321 int whichfork) /* data or attr fork */
323 struct xfs_mount *mp = ip->i_mount;
324 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
325 struct xfs_btree_block *block; /* current btree block */
326 xfs_fsblock_t bno; /* block # of "block" */
327 struct xfs_buf *bp; /* buffer for "block" */
328 int error; /* error return value */
329 xfs_extnum_t i=0, j; /* index into the extents list */
330 int level; /* btree level, for checking */
331 __be64 *pp; /* pointer to block address */
332 xfs_bmbt_rec_t *ep; /* pointer to current extent */
333 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
334 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
337 if (ifp->if_format != XFS_DINODE_FMT_BTREE)
340 /* skip large extent count inodes */
341 if (ip->i_df.if_nextents > 10000)
345 block = ifp->if_broot;
347 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
349 level = be16_to_cpu(block->bb_level);
351 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
352 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
353 bno = be64_to_cpu(*pp);
355 ASSERT(bno != NULLFSBLOCK);
356 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
357 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
360 * Go down the tree until leaf level is reached, following the first
361 * pointer (leftmost) at each level.
363 while (level-- > 0) {
364 /* See if buf is in cur first */
366 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
369 error = xfs_btree_read_bufl(mp, NULL, bno, &bp,
375 block = XFS_BUF_TO_BLOCK(bp);
380 * Check this block for basic sanity (increasing keys and
381 * no duplicate blocks).
384 xfs_check_block(block, mp, 0, 0);
385 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
386 bno = be64_to_cpu(*pp);
387 if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, bno))) {
388 error = -EFSCORRUPTED;
393 xfs_trans_brelse(NULL, bp);
398 * Here with bp and block set to the leftmost leaf node in the tree.
403 * Loop over all leaf nodes checking that all extents are in the right order.
406 xfs_fsblock_t nextbno;
407 xfs_extnum_t num_recs;
410 num_recs = xfs_btree_get_numrecs(block);
413 * Read-ahead the next leaf block, if any.
416 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
419 * Check all the extents to make sure they are OK.
420 * If we had a previous block, the last entry should
421 * conform with the first entry in this one.
424 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
426 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
427 xfs_bmbt_disk_get_blockcount(&last) <=
428 xfs_bmbt_disk_get_startoff(ep));
430 for (j = 1; j < num_recs; j++) {
431 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
432 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
433 xfs_bmbt_disk_get_blockcount(ep) <=
434 xfs_bmbt_disk_get_startoff(nextp));
442 xfs_trans_brelse(NULL, bp);
446 * If we've reached the end, stop.
448 if (bno == NULLFSBLOCK)
452 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
455 error = xfs_btree_read_bufl(mp, NULL, bno, &bp,
461 block = XFS_BUF_TO_BLOCK(bp);
467 xfs_warn(mp, "%s: at error0", __func__);
469 xfs_trans_brelse(NULL, bp);
471 xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
473 xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__);
474 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
479 * Validate that the bmbt_irecs being returned from bmapi are valid
480 * given the caller's original parameters. Specifically check the
481 * ranges of the returned irecs to ensure that they only extend beyond
482 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
485 xfs_bmap_validate_ret(
489 xfs_bmbt_irec_t *mval,
493 int i; /* index to map values */
495 ASSERT(ret_nmap <= nmap);
497 for (i = 0; i < ret_nmap; i++) {
498 ASSERT(mval[i].br_blockcount > 0);
499 if (!(flags & XFS_BMAPI_ENTIRE)) {
500 ASSERT(mval[i].br_startoff >= bno);
501 ASSERT(mval[i].br_blockcount <= len);
502 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
505 ASSERT(mval[i].br_startoff < bno + len);
506 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
510 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
511 mval[i].br_startoff);
512 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
513 mval[i].br_startblock != HOLESTARTBLOCK);
514 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
515 mval[i].br_state == XFS_EXT_UNWRITTEN);
520 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
521 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
525 * Inode fork format manipulation functions
529 * Convert the inode format to extent format if it currently is in btree format,
530 * but the extent list is small enough that it fits into the extent format.
532 * Since the extents are already in-core, all we have to do is give up the space
533 * for the btree root and pitch the leaf block.
535 STATIC int /* error */
536 xfs_bmap_btree_to_extents(
537 struct xfs_trans *tp, /* transaction pointer */
538 struct xfs_inode *ip, /* incore inode pointer */
539 struct xfs_btree_cur *cur, /* btree cursor */
540 int *logflagsp, /* inode logging flags */
541 int whichfork) /* data or attr fork */
543 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
544 struct xfs_mount *mp = ip->i_mount;
545 struct xfs_btree_block *rblock = ifp->if_broot;
546 struct xfs_btree_block *cblock;/* child btree block */
547 xfs_fsblock_t cbno; /* child block number */
548 struct xfs_buf *cbp; /* child block's buffer */
549 int error; /* error return value */
550 __be64 *pp; /* ptr to block address */
551 struct xfs_owner_info oinfo;
553 /* check if we actually need the extent format first: */
554 if (!xfs_bmap_wants_extents(ip, whichfork))
558 ASSERT(whichfork != XFS_COW_FORK);
559 ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
560 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
561 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
562 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
564 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
565 cbno = be64_to_cpu(*pp);
567 if (XFS_IS_CORRUPT(cur->bc_mp, !xfs_btree_check_lptr(cur, cbno, 1)))
568 return -EFSCORRUPTED;
570 error = xfs_btree_read_bufl(mp, tp, cbno, &cbp, XFS_BMAP_BTREE_REF,
574 cblock = XFS_BUF_TO_BLOCK(cbp);
575 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
577 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
578 xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo);
580 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
581 xfs_trans_binval(tp, cbp);
582 if (cur->bc_levels[0].bp == cbp)
583 cur->bc_levels[0].bp = NULL;
584 xfs_iroot_realloc(ip, -1, whichfork);
585 ASSERT(ifp->if_broot == NULL);
586 ifp->if_format = XFS_DINODE_FMT_EXTENTS;
587 *logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
592 * Convert an extents-format file into a btree-format file.
593 * The new file will have a root block (in the inode) and a single child block.
595 STATIC int /* error */
596 xfs_bmap_extents_to_btree(
597 struct xfs_trans *tp, /* transaction pointer */
598 struct xfs_inode *ip, /* incore inode pointer */
599 struct xfs_btree_cur **curp, /* cursor returned to caller */
600 int wasdel, /* converting a delayed alloc */
601 int *logflagsp, /* inode logging flags */
602 int whichfork) /* data or attr fork */
604 struct xfs_btree_block *ablock; /* allocated (child) bt block */
605 struct xfs_buf *abp; /* buffer for ablock */
606 struct xfs_alloc_arg args; /* allocation arguments */
607 struct xfs_bmbt_rec *arp; /* child record pointer */
608 struct xfs_btree_block *block; /* btree root block */
609 struct xfs_btree_cur *cur; /* bmap btree cursor */
610 int error; /* error return value */
611 struct xfs_ifork *ifp; /* inode fork pointer */
612 struct xfs_bmbt_key *kp; /* root block key pointer */
613 struct xfs_mount *mp; /* mount structure */
614 xfs_bmbt_ptr_t *pp; /* root block address pointer */
615 struct xfs_iext_cursor icur;
616 struct xfs_bmbt_irec rec;
617 xfs_extnum_t cnt = 0;
620 ASSERT(whichfork != XFS_COW_FORK);
621 ifp = XFS_IFORK_PTR(ip, whichfork);
622 ASSERT(ifp->if_format == XFS_DINODE_FMT_EXTENTS);
625 * Make space in the inode incore. This needs to be undone if we fail
626 * to expand the root.
628 xfs_iroot_realloc(ip, 1, whichfork);
633 block = ifp->if_broot;
634 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
635 XFS_BTNUM_BMAP, 1, 1, ip->i_ino,
636 XFS_BTREE_LONG_PTRS);
638 * Need a cursor. Can't allocate until bb_level is filled in.
640 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
641 cur->bc_ino.flags = wasdel ? XFS_BTCUR_BMBT_WASDEL : 0;
643 * Convert to a btree with two levels, one record in root.
645 ifp->if_format = XFS_DINODE_FMT_BTREE;
646 memset(&args, 0, sizeof(args));
649 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
650 if (tp->t_firstblock == NULLFSBLOCK) {
651 args.type = XFS_ALLOCTYPE_START_BNO;
652 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
653 } else if (tp->t_flags & XFS_TRANS_LOWMODE) {
654 args.type = XFS_ALLOCTYPE_START_BNO;
655 args.fsbno = tp->t_firstblock;
657 args.type = XFS_ALLOCTYPE_NEAR_BNO;
658 args.fsbno = tp->t_firstblock;
660 args.minlen = args.maxlen = args.prod = 1;
661 args.wasdel = wasdel;
663 error = xfs_alloc_vextent(&args);
665 goto out_root_realloc;
667 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
669 goto out_root_realloc;
673 * Allocation can't fail, the space was reserved.
675 ASSERT(tp->t_firstblock == NULLFSBLOCK ||
676 args.agno >= XFS_FSB_TO_AGNO(mp, tp->t_firstblock));
677 tp->t_firstblock = args.fsbno;
678 cur->bc_ino.allocated++;
680 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
681 error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
682 XFS_FSB_TO_DADDR(mp, args.fsbno),
683 mp->m_bsize, 0, &abp);
685 goto out_unreserve_dquot;
688 * Fill in the child block.
690 abp->b_ops = &xfs_bmbt_buf_ops;
691 ablock = XFS_BUF_TO_BLOCK(abp);
692 xfs_btree_init_block_int(mp, ablock, xfs_buf_daddr(abp),
693 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
694 XFS_BTREE_LONG_PTRS);
696 for_each_xfs_iext(ifp, &icur, &rec) {
697 if (isnullstartblock(rec.br_startblock))
699 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt);
700 xfs_bmbt_disk_set_all(arp, &rec);
703 ASSERT(cnt == ifp->if_nextents);
704 xfs_btree_set_numrecs(ablock, cnt);
707 * Fill in the root key and pointer.
709 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
710 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
711 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
712 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
713 be16_to_cpu(block->bb_level)));
714 *pp = cpu_to_be64(args.fsbno);
717 * Do all this logging at the end so that
718 * the root is at the right level.
720 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
721 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
722 ASSERT(*curp == NULL);
724 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
728 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
730 xfs_iroot_realloc(ip, -1, whichfork);
731 ifp->if_format = XFS_DINODE_FMT_EXTENTS;
732 ASSERT(ifp->if_broot == NULL);
733 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
739 * Convert a local file to an extents file.
740 * This code is out of bounds for data forks of regular files,
741 * since the file data needs to get logged so things will stay consistent.
742 * (The bmap-level manipulations are ok, though).
745 xfs_bmap_local_to_extents_empty(
746 struct xfs_trans *tp,
747 struct xfs_inode *ip,
750 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
752 ASSERT(whichfork != XFS_COW_FORK);
753 ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
754 ASSERT(ifp->if_bytes == 0);
755 ASSERT(ifp->if_nextents == 0);
757 xfs_bmap_forkoff_reset(ip, whichfork);
758 ifp->if_u1.if_root = NULL;
760 ifp->if_format = XFS_DINODE_FMT_EXTENTS;
761 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
765 STATIC int /* error */
766 xfs_bmap_local_to_extents(
767 xfs_trans_t *tp, /* transaction pointer */
768 xfs_inode_t *ip, /* incore inode pointer */
769 xfs_extlen_t total, /* total blocks needed by transaction */
770 int *logflagsp, /* inode logging flags */
772 void (*init_fn)(struct xfs_trans *tp,
774 struct xfs_inode *ip,
775 struct xfs_ifork *ifp))
778 int flags; /* logging flags returned */
779 struct xfs_ifork *ifp; /* inode fork pointer */
780 xfs_alloc_arg_t args; /* allocation arguments */
781 struct xfs_buf *bp; /* buffer for extent block */
782 struct xfs_bmbt_irec rec;
783 struct xfs_iext_cursor icur;
786 * We don't want to deal with the case of keeping inode data inline yet.
787 * So sending the data fork of a regular inode is invalid.
789 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
790 ifp = XFS_IFORK_PTR(ip, whichfork);
791 ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
793 if (!ifp->if_bytes) {
794 xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
795 flags = XFS_ILOG_CORE;
801 memset(&args, 0, sizeof(args));
803 args.mp = ip->i_mount;
804 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
806 * Allocate a block. We know we need only one, since the
807 * file currently fits in an inode.
809 if (tp->t_firstblock == NULLFSBLOCK) {
810 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
811 args.type = XFS_ALLOCTYPE_START_BNO;
813 args.fsbno = tp->t_firstblock;
814 args.type = XFS_ALLOCTYPE_NEAR_BNO;
817 args.minlen = args.maxlen = args.prod = 1;
818 error = xfs_alloc_vextent(&args);
822 /* Can't fail, the space was reserved. */
823 ASSERT(args.fsbno != NULLFSBLOCK);
824 ASSERT(args.len == 1);
825 tp->t_firstblock = args.fsbno;
826 error = xfs_trans_get_buf(tp, args.mp->m_ddev_targp,
827 XFS_FSB_TO_DADDR(args.mp, args.fsbno),
828 args.mp->m_bsize, 0, &bp);
833 * Initialize the block, copy the data and log the remote buffer.
835 * The callout is responsible for logging because the remote format
836 * might differ from the local format and thus we don't know how much to
837 * log here. Note that init_fn must also set the buffer log item type
840 init_fn(tp, bp, ip, ifp);
842 /* account for the change in fork size */
843 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
844 xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
845 flags |= XFS_ILOG_CORE;
847 ifp->if_u1.if_root = NULL;
851 rec.br_startblock = args.fsbno;
852 rec.br_blockcount = 1;
853 rec.br_state = XFS_EXT_NORM;
854 xfs_iext_first(ifp, &icur);
855 xfs_iext_insert(ip, &icur, &rec, 0);
857 ifp->if_nextents = 1;
859 xfs_trans_mod_dquot_byino(tp, ip,
860 XFS_TRANS_DQ_BCOUNT, 1L);
861 flags |= xfs_ilog_fext(whichfork);
869 * Called from xfs_bmap_add_attrfork to handle btree format files.
871 STATIC int /* error */
872 xfs_bmap_add_attrfork_btree(
873 xfs_trans_t *tp, /* transaction pointer */
874 xfs_inode_t *ip, /* incore inode pointer */
875 int *flags) /* inode logging flags */
877 struct xfs_btree_block *block = ip->i_df.if_broot;
878 struct xfs_btree_cur *cur; /* btree cursor */
879 int error; /* error return value */
880 xfs_mount_t *mp; /* file system mount struct */
881 int stat; /* newroot status */
885 if (XFS_BMAP_BMDR_SPACE(block) <= XFS_IFORK_DSIZE(ip))
886 *flags |= XFS_ILOG_DBROOT;
888 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
889 error = xfs_bmbt_lookup_first(cur, &stat);
892 /* must be at least one entry */
893 if (XFS_IS_CORRUPT(mp, stat != 1)) {
894 error = -EFSCORRUPTED;
897 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
900 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
903 cur->bc_ino.allocated = 0;
904 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
908 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
913 * Called from xfs_bmap_add_attrfork to handle extents format files.
915 STATIC int /* error */
916 xfs_bmap_add_attrfork_extents(
917 struct xfs_trans *tp, /* transaction pointer */
918 struct xfs_inode *ip, /* incore inode pointer */
919 int *flags) /* inode logging flags */
921 struct xfs_btree_cur *cur; /* bmap btree cursor */
922 int error; /* error return value */
924 if (ip->i_df.if_nextents * sizeof(struct xfs_bmbt_rec) <=
928 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
931 cur->bc_ino.allocated = 0;
932 xfs_btree_del_cursor(cur, error);
938 * Called from xfs_bmap_add_attrfork to handle local format files. Each
939 * different data fork content type needs a different callout to do the
940 * conversion. Some are basic and only require special block initialisation
941 * callouts for the data formating, others (directories) are so specialised they
942 * handle everything themselves.
944 * XXX (dgc): investigate whether directory conversion can use the generic
945 * formatting callout. It should be possible - it's just a very complex
948 STATIC int /* error */
949 xfs_bmap_add_attrfork_local(
950 struct xfs_trans *tp, /* transaction pointer */
951 struct xfs_inode *ip, /* incore inode pointer */
952 int *flags) /* inode logging flags */
954 struct xfs_da_args dargs; /* args for dir/attr code */
956 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
959 if (S_ISDIR(VFS_I(ip)->i_mode)) {
960 memset(&dargs, 0, sizeof(dargs));
961 dargs.geo = ip->i_mount->m_dir_geo;
963 dargs.total = dargs.geo->fsbcount;
964 dargs.whichfork = XFS_DATA_FORK;
966 return xfs_dir2_sf_to_block(&dargs);
969 if (S_ISLNK(VFS_I(ip)->i_mode))
970 return xfs_bmap_local_to_extents(tp, ip, 1, flags,
972 xfs_symlink_local_to_remote);
974 /* should only be called for types that support local format data */
976 return -EFSCORRUPTED;
980 * Set an inode attr fork offset based on the format of the data fork.
983 xfs_bmap_set_attrforkoff(
984 struct xfs_inode *ip,
988 int default_size = xfs_default_attroffset(ip) >> 3;
990 switch (ip->i_df.if_format) {
991 case XFS_DINODE_FMT_DEV:
992 ip->i_forkoff = default_size;
994 case XFS_DINODE_FMT_LOCAL:
995 case XFS_DINODE_FMT_EXTENTS:
996 case XFS_DINODE_FMT_BTREE:
997 ip->i_forkoff = xfs_attr_shortform_bytesfit(ip, size);
999 ip->i_forkoff = default_size;
1000 else if (xfs_has_attr2(ip->i_mount) && version)
1012 * Convert inode from non-attributed to attributed.
1013 * Must not be in a transaction, ip must not be locked.
1015 int /* error code */
1016 xfs_bmap_add_attrfork(
1017 xfs_inode_t *ip, /* incore inode pointer */
1018 int size, /* space new attribute needs */
1019 int rsvd) /* xact may use reserved blks */
1021 xfs_mount_t *mp; /* mount structure */
1022 xfs_trans_t *tp; /* transaction pointer */
1023 int blks; /* space reservation */
1024 int version = 1; /* superblock attr version */
1025 int logflags; /* logging flags */
1026 int error; /* error return value */
1028 ASSERT(XFS_IFORK_Q(ip) == 0);
1031 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1033 blks = XFS_ADDAFORK_SPACE_RES(mp);
1035 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_addafork, blks, 0,
1039 if (XFS_IFORK_Q(ip))
1042 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1043 error = xfs_bmap_set_attrforkoff(ip, size, &version);
1046 ASSERT(ip->i_afp == NULL);
1048 ip->i_afp = xfs_ifork_alloc(XFS_DINODE_FMT_EXTENTS, 0);
1050 switch (ip->i_df.if_format) {
1051 case XFS_DINODE_FMT_LOCAL:
1052 error = xfs_bmap_add_attrfork_local(tp, ip, &logflags);
1054 case XFS_DINODE_FMT_EXTENTS:
1055 error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags);
1057 case XFS_DINODE_FMT_BTREE:
1058 error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags);
1065 xfs_trans_log_inode(tp, ip, logflags);
1068 if (!xfs_has_attr(mp) ||
1069 (!xfs_has_attr2(mp) && version == 2)) {
1070 bool log_sb = false;
1072 spin_lock(&mp->m_sb_lock);
1073 if (!xfs_has_attr(mp)) {
1077 if (!xfs_has_attr2(mp) && version == 2) {
1081 spin_unlock(&mp->m_sb_lock);
1086 error = xfs_trans_commit(tp);
1087 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1091 xfs_trans_cancel(tp);
1092 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1097 * Internal and external extent tree search functions.
1100 struct xfs_iread_state {
1101 struct xfs_iext_cursor icur;
1102 xfs_extnum_t loaded;
1105 /* Stuff every bmbt record from this block into the incore extent map. */
1107 xfs_iread_bmbt_block(
1108 struct xfs_btree_cur *cur,
1112 struct xfs_iread_state *ir = priv;
1113 struct xfs_mount *mp = cur->bc_mp;
1114 struct xfs_inode *ip = cur->bc_ino.ip;
1115 struct xfs_btree_block *block;
1117 struct xfs_bmbt_rec *frp;
1118 xfs_extnum_t num_recs;
1120 int whichfork = cur->bc_ino.whichfork;
1121 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1123 block = xfs_btree_get_block(cur, level, &bp);
1125 /* Abort if we find more records than nextents. */
1126 num_recs = xfs_btree_get_numrecs(block);
1127 if (unlikely(ir->loaded + num_recs > ifp->if_nextents)) {
1128 xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).",
1129 (unsigned long long)ip->i_ino);
1130 xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block,
1131 sizeof(*block), __this_address);
1132 return -EFSCORRUPTED;
1135 /* Copy records into the incore cache. */
1136 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1137 for (j = 0; j < num_recs; j++, frp++, ir->loaded++) {
1138 struct xfs_bmbt_irec new;
1141 xfs_bmbt_disk_get_all(frp, &new);
1142 fa = xfs_bmap_validate_extent(ip, whichfork, &new);
1144 xfs_inode_verifier_error(ip, -EFSCORRUPTED,
1145 "xfs_iread_extents(2)", frp,
1147 return -EFSCORRUPTED;
1149 xfs_iext_insert(ip, &ir->icur, &new,
1150 xfs_bmap_fork_to_state(whichfork));
1151 trace_xfs_read_extent(ip, &ir->icur,
1152 xfs_bmap_fork_to_state(whichfork), _THIS_IP_);
1153 xfs_iext_next(ifp, &ir->icur);
1160 * Read in extents from a btree-format inode.
1164 struct xfs_trans *tp,
1165 struct xfs_inode *ip,
1168 struct xfs_iread_state ir;
1169 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1170 struct xfs_mount *mp = ip->i_mount;
1171 struct xfs_btree_cur *cur;
1174 if (!xfs_need_iread_extents(ifp))
1177 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1180 xfs_iext_first(ifp, &ir.icur);
1181 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
1182 error = xfs_btree_visit_blocks(cur, xfs_iread_bmbt_block,
1183 XFS_BTREE_VISIT_RECORDS, &ir);
1184 xfs_btree_del_cursor(cur, error);
1188 if (XFS_IS_CORRUPT(mp, ir.loaded != ifp->if_nextents)) {
1189 error = -EFSCORRUPTED;
1192 ASSERT(ir.loaded == xfs_iext_count(ifp));
1195 xfs_iext_destroy(ifp);
1200 * Returns the relative block number of the first unused block(s) in the given
1201 * fork with at least "len" logically contiguous blocks free. This is the
1202 * lowest-address hole if the fork has holes, else the first block past the end
1203 * of fork. Return 0 if the fork is currently local (in-inode).
1206 xfs_bmap_first_unused(
1207 struct xfs_trans *tp, /* transaction pointer */
1208 struct xfs_inode *ip, /* incore inode */
1209 xfs_extlen_t len, /* size of hole to find */
1210 xfs_fileoff_t *first_unused, /* unused block */
1211 int whichfork) /* data or attr fork */
1213 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1214 struct xfs_bmbt_irec got;
1215 struct xfs_iext_cursor icur;
1216 xfs_fileoff_t lastaddr = 0;
1217 xfs_fileoff_t lowest, max;
1220 if (ifp->if_format == XFS_DINODE_FMT_LOCAL) {
1225 ASSERT(xfs_ifork_has_extents(ifp));
1227 error = xfs_iread_extents(tp, ip, whichfork);
1231 lowest = max = *first_unused;
1232 for_each_xfs_iext(ifp, &icur, &got) {
1234 * See if the hole before this extent will work.
1236 if (got.br_startoff >= lowest + len &&
1237 got.br_startoff - max >= len)
1239 lastaddr = got.br_startoff + got.br_blockcount;
1240 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1243 *first_unused = max;
1248 * Returns the file-relative block number of the last block - 1 before
1249 * last_block (input value) in the file.
1250 * This is not based on i_size, it is based on the extent records.
1251 * Returns 0 for local files, as they do not have extent records.
1254 xfs_bmap_last_before(
1255 struct xfs_trans *tp, /* transaction pointer */
1256 struct xfs_inode *ip, /* incore inode */
1257 xfs_fileoff_t *last_block, /* last block */
1258 int whichfork) /* data or attr fork */
1260 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1261 struct xfs_bmbt_irec got;
1262 struct xfs_iext_cursor icur;
1265 switch (ifp->if_format) {
1266 case XFS_DINODE_FMT_LOCAL:
1269 case XFS_DINODE_FMT_BTREE:
1270 case XFS_DINODE_FMT_EXTENTS:
1274 return -EFSCORRUPTED;
1277 error = xfs_iread_extents(tp, ip, whichfork);
1281 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
1287 xfs_bmap_last_extent(
1288 struct xfs_trans *tp,
1289 struct xfs_inode *ip,
1291 struct xfs_bmbt_irec *rec,
1294 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1295 struct xfs_iext_cursor icur;
1298 error = xfs_iread_extents(tp, ip, whichfork);
1302 xfs_iext_last(ifp, &icur);
1303 if (!xfs_iext_get_extent(ifp, &icur, rec))
1311 * Check the last inode extent to determine whether this allocation will result
1312 * in blocks being allocated at the end of the file. When we allocate new data
1313 * blocks at the end of the file which do not start at the previous data block,
1314 * we will try to align the new blocks at stripe unit boundaries.
1316 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1317 * at, or past the EOF.
1321 struct xfs_bmalloca *bma,
1324 struct xfs_bmbt_irec rec;
1329 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1340 * Check if we are allocation or past the last extent, or at least into
1341 * the last delayed allocated extent.
1343 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1344 (bma->offset >= rec.br_startoff &&
1345 isnullstartblock(rec.br_startblock));
1350 * Returns the file-relative block number of the first block past eof in
1351 * the file. This is not based on i_size, it is based on the extent records.
1352 * Returns 0 for local files, as they do not have extent records.
1355 xfs_bmap_last_offset(
1356 struct xfs_inode *ip,
1357 xfs_fileoff_t *last_block,
1360 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1361 struct xfs_bmbt_irec rec;
1367 if (ifp->if_format == XFS_DINODE_FMT_LOCAL)
1370 if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ifp)))
1371 return -EFSCORRUPTED;
1373 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1374 if (error || is_empty)
1377 *last_block = rec.br_startoff + rec.br_blockcount;
1382 * Extent tree manipulation functions used during allocation.
1386 * Convert a delayed allocation to a real allocation.
1388 STATIC int /* error */
1389 xfs_bmap_add_extent_delay_real(
1390 struct xfs_bmalloca *bma,
1393 struct xfs_mount *mp = bma->ip->i_mount;
1394 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1395 struct xfs_bmbt_irec *new = &bma->got;
1396 int error; /* error return value */
1397 int i; /* temp state */
1398 xfs_fileoff_t new_endoff; /* end offset of new entry */
1399 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1400 /* left is 0, right is 1, prev is 2 */
1401 int rval=0; /* return value (logging flags) */
1402 int state = xfs_bmap_fork_to_state(whichfork);
1403 xfs_filblks_t da_new; /* new count del alloc blocks used */
1404 xfs_filblks_t da_old; /* old count del alloc blocks used */
1405 xfs_filblks_t temp=0; /* value for da_new calculations */
1406 int tmp_rval; /* partial logging flags */
1407 struct xfs_bmbt_irec old;
1409 ASSERT(whichfork != XFS_ATTR_FORK);
1410 ASSERT(!isnullstartblock(new->br_startblock));
1412 (bma->cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL));
1414 XFS_STATS_INC(mp, xs_add_exlist);
1421 * Set up a bunch of variables to make the tests simpler.
1423 xfs_iext_get_extent(ifp, &bma->icur, &PREV);
1424 new_endoff = new->br_startoff + new->br_blockcount;
1425 ASSERT(isnullstartblock(PREV.br_startblock));
1426 ASSERT(PREV.br_startoff <= new->br_startoff);
1427 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1429 da_old = startblockval(PREV.br_startblock);
1433 * Set flags determining what part of the previous delayed allocation
1434 * extent is being replaced by a real allocation.
1436 if (PREV.br_startoff == new->br_startoff)
1437 state |= BMAP_LEFT_FILLING;
1438 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1439 state |= BMAP_RIGHT_FILLING;
1442 * Check and set flags if this segment has a left neighbor.
1443 * Don't set contiguous if the combined extent would be too large.
1445 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
1446 state |= BMAP_LEFT_VALID;
1447 if (isnullstartblock(LEFT.br_startblock))
1448 state |= BMAP_LEFT_DELAY;
1451 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1452 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1453 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1454 LEFT.br_state == new->br_state &&
1455 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1456 state |= BMAP_LEFT_CONTIG;
1459 * Check and set flags if this segment has a right neighbor.
1460 * Don't set contiguous if the combined extent would be too large.
1461 * Also check for all-three-contiguous being too large.
1463 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
1464 state |= BMAP_RIGHT_VALID;
1465 if (isnullstartblock(RIGHT.br_startblock))
1466 state |= BMAP_RIGHT_DELAY;
1469 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1470 new_endoff == RIGHT.br_startoff &&
1471 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1472 new->br_state == RIGHT.br_state &&
1473 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1474 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1475 BMAP_RIGHT_FILLING)) !=
1476 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1477 BMAP_RIGHT_FILLING) ||
1478 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1480 state |= BMAP_RIGHT_CONTIG;
1484 * Switch out based on the FILLING and CONTIG state bits.
1486 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1487 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1488 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1489 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1491 * Filling in all of a previously delayed allocation extent.
1492 * The left and right neighbors are both contiguous with new.
1494 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
1496 xfs_iext_remove(bma->ip, &bma->icur, state);
1497 xfs_iext_remove(bma->ip, &bma->icur, state);
1498 xfs_iext_prev(ifp, &bma->icur);
1499 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1502 if (bma->cur == NULL)
1503 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1505 rval = XFS_ILOG_CORE;
1506 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1509 if (XFS_IS_CORRUPT(mp, i != 1)) {
1510 error = -EFSCORRUPTED;
1513 error = xfs_btree_delete(bma->cur, &i);
1516 if (XFS_IS_CORRUPT(mp, i != 1)) {
1517 error = -EFSCORRUPTED;
1520 error = xfs_btree_decrement(bma->cur, 0, &i);
1523 if (XFS_IS_CORRUPT(mp, i != 1)) {
1524 error = -EFSCORRUPTED;
1527 error = xfs_bmbt_update(bma->cur, &LEFT);
1533 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1535 * Filling in all of a previously delayed allocation extent.
1536 * The left neighbor is contiguous, the right is not.
1539 LEFT.br_blockcount += PREV.br_blockcount;
1541 xfs_iext_remove(bma->ip, &bma->icur, state);
1542 xfs_iext_prev(ifp, &bma->icur);
1543 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1545 if (bma->cur == NULL)
1546 rval = XFS_ILOG_DEXT;
1549 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1552 if (XFS_IS_CORRUPT(mp, i != 1)) {
1553 error = -EFSCORRUPTED;
1556 error = xfs_bmbt_update(bma->cur, &LEFT);
1562 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1564 * Filling in all of a previously delayed allocation extent.
1565 * The right neighbor is contiguous, the left is not. Take care
1566 * with delay -> unwritten extent allocation here because the
1567 * delalloc record we are overwriting is always written.
1569 PREV.br_startblock = new->br_startblock;
1570 PREV.br_blockcount += RIGHT.br_blockcount;
1571 PREV.br_state = new->br_state;
1573 xfs_iext_next(ifp, &bma->icur);
1574 xfs_iext_remove(bma->ip, &bma->icur, state);
1575 xfs_iext_prev(ifp, &bma->icur);
1576 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1578 if (bma->cur == NULL)
1579 rval = XFS_ILOG_DEXT;
1582 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1585 if (XFS_IS_CORRUPT(mp, i != 1)) {
1586 error = -EFSCORRUPTED;
1589 error = xfs_bmbt_update(bma->cur, &PREV);
1595 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1597 * Filling in all of a previously delayed allocation extent.
1598 * Neither the left nor right neighbors are contiguous with
1601 PREV.br_startblock = new->br_startblock;
1602 PREV.br_state = new->br_state;
1603 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1606 if (bma->cur == NULL)
1607 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1609 rval = XFS_ILOG_CORE;
1610 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1613 if (XFS_IS_CORRUPT(mp, i != 0)) {
1614 error = -EFSCORRUPTED;
1617 error = xfs_btree_insert(bma->cur, &i);
1620 if (XFS_IS_CORRUPT(mp, i != 1)) {
1621 error = -EFSCORRUPTED;
1627 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1629 * Filling in the first part of a previous delayed allocation.
1630 * The left neighbor is contiguous.
1633 temp = PREV.br_blockcount - new->br_blockcount;
1634 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1635 startblockval(PREV.br_startblock));
1637 LEFT.br_blockcount += new->br_blockcount;
1639 PREV.br_blockcount = temp;
1640 PREV.br_startoff += new->br_blockcount;
1641 PREV.br_startblock = nullstartblock(da_new);
1643 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1644 xfs_iext_prev(ifp, &bma->icur);
1645 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1647 if (bma->cur == NULL)
1648 rval = XFS_ILOG_DEXT;
1651 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1654 if (XFS_IS_CORRUPT(mp, i != 1)) {
1655 error = -EFSCORRUPTED;
1658 error = xfs_bmbt_update(bma->cur, &LEFT);
1664 case BMAP_LEFT_FILLING:
1666 * Filling in the first part of a previous delayed allocation.
1667 * The left neighbor is not contiguous.
1669 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1672 if (bma->cur == NULL)
1673 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1675 rval = XFS_ILOG_CORE;
1676 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1679 if (XFS_IS_CORRUPT(mp, i != 0)) {
1680 error = -EFSCORRUPTED;
1683 error = xfs_btree_insert(bma->cur, &i);
1686 if (XFS_IS_CORRUPT(mp, i != 1)) {
1687 error = -EFSCORRUPTED;
1692 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1693 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1694 &bma->cur, 1, &tmp_rval, whichfork);
1700 temp = PREV.br_blockcount - new->br_blockcount;
1701 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1702 startblockval(PREV.br_startblock) -
1703 (bma->cur ? bma->cur->bc_ino.allocated : 0));
1705 PREV.br_startoff = new_endoff;
1706 PREV.br_blockcount = temp;
1707 PREV.br_startblock = nullstartblock(da_new);
1708 xfs_iext_next(ifp, &bma->icur);
1709 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1710 xfs_iext_prev(ifp, &bma->icur);
1713 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1715 * Filling in the last part of a previous delayed allocation.
1716 * The right neighbor is contiguous with the new allocation.
1719 RIGHT.br_startoff = new->br_startoff;
1720 RIGHT.br_startblock = new->br_startblock;
1721 RIGHT.br_blockcount += new->br_blockcount;
1723 if (bma->cur == NULL)
1724 rval = XFS_ILOG_DEXT;
1727 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1730 if (XFS_IS_CORRUPT(mp, i != 1)) {
1731 error = -EFSCORRUPTED;
1734 error = xfs_bmbt_update(bma->cur, &RIGHT);
1739 temp = PREV.br_blockcount - new->br_blockcount;
1740 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1741 startblockval(PREV.br_startblock));
1743 PREV.br_blockcount = temp;
1744 PREV.br_startblock = nullstartblock(da_new);
1746 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1747 xfs_iext_next(ifp, &bma->icur);
1748 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
1751 case BMAP_RIGHT_FILLING:
1753 * Filling in the last part of a previous delayed allocation.
1754 * The right neighbor is not contiguous.
1756 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1759 if (bma->cur == NULL)
1760 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1762 rval = XFS_ILOG_CORE;
1763 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1766 if (XFS_IS_CORRUPT(mp, i != 0)) {
1767 error = -EFSCORRUPTED;
1770 error = xfs_btree_insert(bma->cur, &i);
1773 if (XFS_IS_CORRUPT(mp, i != 1)) {
1774 error = -EFSCORRUPTED;
1779 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1780 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1781 &bma->cur, 1, &tmp_rval, whichfork);
1787 temp = PREV.br_blockcount - new->br_blockcount;
1788 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1789 startblockval(PREV.br_startblock) -
1790 (bma->cur ? bma->cur->bc_ino.allocated : 0));
1792 PREV.br_startblock = nullstartblock(da_new);
1793 PREV.br_blockcount = temp;
1794 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1795 xfs_iext_next(ifp, &bma->icur);
1800 * Filling in the middle part of a previous delayed allocation.
1801 * Contiguity is impossible here.
1802 * This case is avoided almost all the time.
1804 * We start with a delayed allocation:
1806 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1809 * and we are allocating:
1810 * +rrrrrrrrrrrrrrrrr+
1813 * and we set it up for insertion as:
1814 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1816 * PREV @ idx LEFT RIGHT
1817 * inserted at idx + 1
1821 /* LEFT is the new middle */
1824 /* RIGHT is the new right */
1825 RIGHT.br_state = PREV.br_state;
1826 RIGHT.br_startoff = new_endoff;
1827 RIGHT.br_blockcount =
1828 PREV.br_startoff + PREV.br_blockcount - new_endoff;
1829 RIGHT.br_startblock =
1830 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1831 RIGHT.br_blockcount));
1834 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
1835 PREV.br_startblock =
1836 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1837 PREV.br_blockcount));
1838 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1840 xfs_iext_next(ifp, &bma->icur);
1841 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
1842 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
1845 if (bma->cur == NULL)
1846 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1848 rval = XFS_ILOG_CORE;
1849 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1852 if (XFS_IS_CORRUPT(mp, i != 0)) {
1853 error = -EFSCORRUPTED;
1856 error = xfs_btree_insert(bma->cur, &i);
1859 if (XFS_IS_CORRUPT(mp, i != 1)) {
1860 error = -EFSCORRUPTED;
1865 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1866 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1867 &bma->cur, 1, &tmp_rval, whichfork);
1873 da_new = startblockval(PREV.br_startblock) +
1874 startblockval(RIGHT.br_startblock);
1877 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1878 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1879 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1880 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1881 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1882 case BMAP_LEFT_CONTIG:
1883 case BMAP_RIGHT_CONTIG:
1885 * These cases are all impossible.
1890 /* add reverse mapping unless caller opted out */
1891 if (!(bma->flags & XFS_BMAPI_NORMAP))
1892 xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
1894 /* convert to a btree if necessary */
1895 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1896 int tmp_logflags; /* partial log flag return val */
1898 ASSERT(bma->cur == NULL);
1899 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1900 &bma->cur, da_old > 0, &tmp_logflags,
1902 bma->logflags |= tmp_logflags;
1907 if (da_new != da_old)
1908 xfs_mod_delalloc(mp, (int64_t)da_new - da_old);
1911 da_new += bma->cur->bc_ino.allocated;
1912 bma->cur->bc_ino.allocated = 0;
1915 /* adjust for changes in reserved delayed indirect blocks */
1916 if (da_new != da_old) {
1917 ASSERT(state == 0 || da_new < da_old);
1918 error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new),
1922 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
1924 if (whichfork != XFS_COW_FORK)
1925 bma->logflags |= rval;
1933 * Convert an unwritten allocation to a real allocation or vice versa.
1936 xfs_bmap_add_extent_unwritten_real(
1937 struct xfs_trans *tp,
1938 xfs_inode_t *ip, /* incore inode pointer */
1940 struct xfs_iext_cursor *icur,
1941 struct xfs_btree_cur **curp, /* if *curp is null, not a btree */
1942 xfs_bmbt_irec_t *new, /* new data to add to file extents */
1943 int *logflagsp) /* inode logging flags */
1945 struct xfs_btree_cur *cur; /* btree cursor */
1946 int error; /* error return value */
1947 int i; /* temp state */
1948 struct xfs_ifork *ifp; /* inode fork pointer */
1949 xfs_fileoff_t new_endoff; /* end offset of new entry */
1950 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1951 /* left is 0, right is 1, prev is 2 */
1952 int rval=0; /* return value (logging flags) */
1953 int state = xfs_bmap_fork_to_state(whichfork);
1954 struct xfs_mount *mp = ip->i_mount;
1955 struct xfs_bmbt_irec old;
1960 ifp = XFS_IFORK_PTR(ip, whichfork);
1962 ASSERT(!isnullstartblock(new->br_startblock));
1964 XFS_STATS_INC(mp, xs_add_exlist);
1971 * Set up a bunch of variables to make the tests simpler.
1974 xfs_iext_get_extent(ifp, icur, &PREV);
1975 ASSERT(new->br_state != PREV.br_state);
1976 new_endoff = new->br_startoff + new->br_blockcount;
1977 ASSERT(PREV.br_startoff <= new->br_startoff);
1978 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1981 * Set flags determining what part of the previous oldext allocation
1982 * extent is being replaced by a newext allocation.
1984 if (PREV.br_startoff == new->br_startoff)
1985 state |= BMAP_LEFT_FILLING;
1986 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1987 state |= BMAP_RIGHT_FILLING;
1990 * Check and set flags if this segment has a left neighbor.
1991 * Don't set contiguous if the combined extent would be too large.
1993 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
1994 state |= BMAP_LEFT_VALID;
1995 if (isnullstartblock(LEFT.br_startblock))
1996 state |= BMAP_LEFT_DELAY;
1999 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2000 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2001 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2002 LEFT.br_state == new->br_state &&
2003 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2004 state |= BMAP_LEFT_CONTIG;
2007 * Check and set flags if this segment has a right neighbor.
2008 * Don't set contiguous if the combined extent would be too large.
2009 * Also check for all-three-contiguous being too large.
2011 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
2012 state |= BMAP_RIGHT_VALID;
2013 if (isnullstartblock(RIGHT.br_startblock))
2014 state |= BMAP_RIGHT_DELAY;
2017 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2018 new_endoff == RIGHT.br_startoff &&
2019 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2020 new->br_state == RIGHT.br_state &&
2021 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
2022 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2023 BMAP_RIGHT_FILLING)) !=
2024 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2025 BMAP_RIGHT_FILLING) ||
2026 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2028 state |= BMAP_RIGHT_CONTIG;
2031 * Switch out based on the FILLING and CONTIG state bits.
2033 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2034 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2035 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2036 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2038 * Setting all of a previous oldext extent to newext.
2039 * The left and right neighbors are both contiguous with new.
2041 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
2043 xfs_iext_remove(ip, icur, state);
2044 xfs_iext_remove(ip, icur, state);
2045 xfs_iext_prev(ifp, icur);
2046 xfs_iext_update_extent(ip, state, icur, &LEFT);
2047 ifp->if_nextents -= 2;
2049 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2051 rval = XFS_ILOG_CORE;
2052 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2055 if (XFS_IS_CORRUPT(mp, i != 1)) {
2056 error = -EFSCORRUPTED;
2059 if ((error = xfs_btree_delete(cur, &i)))
2061 if (XFS_IS_CORRUPT(mp, i != 1)) {
2062 error = -EFSCORRUPTED;
2065 if ((error = xfs_btree_decrement(cur, 0, &i)))
2067 if (XFS_IS_CORRUPT(mp, i != 1)) {
2068 error = -EFSCORRUPTED;
2071 if ((error = xfs_btree_delete(cur, &i)))
2073 if (XFS_IS_CORRUPT(mp, i != 1)) {
2074 error = -EFSCORRUPTED;
2077 if ((error = xfs_btree_decrement(cur, 0, &i)))
2079 if (XFS_IS_CORRUPT(mp, i != 1)) {
2080 error = -EFSCORRUPTED;
2083 error = xfs_bmbt_update(cur, &LEFT);
2089 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2091 * Setting all of a previous oldext extent to newext.
2092 * The left neighbor is contiguous, the right is not.
2094 LEFT.br_blockcount += PREV.br_blockcount;
2096 xfs_iext_remove(ip, icur, state);
2097 xfs_iext_prev(ifp, icur);
2098 xfs_iext_update_extent(ip, state, icur, &LEFT);
2101 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2103 rval = XFS_ILOG_CORE;
2104 error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
2107 if (XFS_IS_CORRUPT(mp, i != 1)) {
2108 error = -EFSCORRUPTED;
2111 if ((error = xfs_btree_delete(cur, &i)))
2113 if (XFS_IS_CORRUPT(mp, i != 1)) {
2114 error = -EFSCORRUPTED;
2117 if ((error = xfs_btree_decrement(cur, 0, &i)))
2119 if (XFS_IS_CORRUPT(mp, i != 1)) {
2120 error = -EFSCORRUPTED;
2123 error = xfs_bmbt_update(cur, &LEFT);
2129 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2131 * Setting all of a previous oldext extent to newext.
2132 * The right neighbor is contiguous, the left is not.
2134 PREV.br_blockcount += RIGHT.br_blockcount;
2135 PREV.br_state = new->br_state;
2137 xfs_iext_next(ifp, icur);
2138 xfs_iext_remove(ip, icur, state);
2139 xfs_iext_prev(ifp, icur);
2140 xfs_iext_update_extent(ip, state, icur, &PREV);
2144 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2146 rval = XFS_ILOG_CORE;
2147 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2150 if (XFS_IS_CORRUPT(mp, i != 1)) {
2151 error = -EFSCORRUPTED;
2154 if ((error = xfs_btree_delete(cur, &i)))
2156 if (XFS_IS_CORRUPT(mp, i != 1)) {
2157 error = -EFSCORRUPTED;
2160 if ((error = xfs_btree_decrement(cur, 0, &i)))
2162 if (XFS_IS_CORRUPT(mp, i != 1)) {
2163 error = -EFSCORRUPTED;
2166 error = xfs_bmbt_update(cur, &PREV);
2172 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2174 * Setting all of a previous oldext extent to newext.
2175 * Neither the left nor right neighbors are contiguous with
2178 PREV.br_state = new->br_state;
2179 xfs_iext_update_extent(ip, state, icur, &PREV);
2182 rval = XFS_ILOG_DEXT;
2185 error = xfs_bmbt_lookup_eq(cur, new, &i);
2188 if (XFS_IS_CORRUPT(mp, i != 1)) {
2189 error = -EFSCORRUPTED;
2192 error = xfs_bmbt_update(cur, &PREV);
2198 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2200 * Setting the first part of a previous oldext extent to newext.
2201 * The left neighbor is contiguous.
2203 LEFT.br_blockcount += new->br_blockcount;
2206 PREV.br_startoff += new->br_blockcount;
2207 PREV.br_startblock += new->br_blockcount;
2208 PREV.br_blockcount -= new->br_blockcount;
2210 xfs_iext_update_extent(ip, state, icur, &PREV);
2211 xfs_iext_prev(ifp, icur);
2212 xfs_iext_update_extent(ip, state, icur, &LEFT);
2215 rval = XFS_ILOG_DEXT;
2218 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2221 if (XFS_IS_CORRUPT(mp, i != 1)) {
2222 error = -EFSCORRUPTED;
2225 error = xfs_bmbt_update(cur, &PREV);
2228 error = xfs_btree_decrement(cur, 0, &i);
2231 error = xfs_bmbt_update(cur, &LEFT);
2237 case BMAP_LEFT_FILLING:
2239 * Setting the first part of a previous oldext extent to newext.
2240 * The left neighbor is not contiguous.
2243 PREV.br_startoff += new->br_blockcount;
2244 PREV.br_startblock += new->br_blockcount;
2245 PREV.br_blockcount -= new->br_blockcount;
2247 xfs_iext_update_extent(ip, state, icur, &PREV);
2248 xfs_iext_insert(ip, icur, new, state);
2252 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2254 rval = XFS_ILOG_CORE;
2255 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2258 if (XFS_IS_CORRUPT(mp, i != 1)) {
2259 error = -EFSCORRUPTED;
2262 error = xfs_bmbt_update(cur, &PREV);
2265 cur->bc_rec.b = *new;
2266 if ((error = xfs_btree_insert(cur, &i)))
2268 if (XFS_IS_CORRUPT(mp, i != 1)) {
2269 error = -EFSCORRUPTED;
2275 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2277 * Setting the last part of a previous oldext extent to newext.
2278 * The right neighbor is contiguous with the new allocation.
2281 PREV.br_blockcount -= new->br_blockcount;
2283 RIGHT.br_startoff = new->br_startoff;
2284 RIGHT.br_startblock = new->br_startblock;
2285 RIGHT.br_blockcount += new->br_blockcount;
2287 xfs_iext_update_extent(ip, state, icur, &PREV);
2288 xfs_iext_next(ifp, icur);
2289 xfs_iext_update_extent(ip, state, icur, &RIGHT);
2292 rval = XFS_ILOG_DEXT;
2295 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2298 if (XFS_IS_CORRUPT(mp, i != 1)) {
2299 error = -EFSCORRUPTED;
2302 error = xfs_bmbt_update(cur, &PREV);
2305 error = xfs_btree_increment(cur, 0, &i);
2308 error = xfs_bmbt_update(cur, &RIGHT);
2314 case BMAP_RIGHT_FILLING:
2316 * Setting the last part of a previous oldext extent to newext.
2317 * The right neighbor is not contiguous.
2320 PREV.br_blockcount -= new->br_blockcount;
2322 xfs_iext_update_extent(ip, state, icur, &PREV);
2323 xfs_iext_next(ifp, icur);
2324 xfs_iext_insert(ip, icur, new, state);
2328 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2330 rval = XFS_ILOG_CORE;
2331 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2334 if (XFS_IS_CORRUPT(mp, i != 1)) {
2335 error = -EFSCORRUPTED;
2338 error = xfs_bmbt_update(cur, &PREV);
2341 error = xfs_bmbt_lookup_eq(cur, new, &i);
2344 if (XFS_IS_CORRUPT(mp, i != 0)) {
2345 error = -EFSCORRUPTED;
2348 if ((error = xfs_btree_insert(cur, &i)))
2350 if (XFS_IS_CORRUPT(mp, i != 1)) {
2351 error = -EFSCORRUPTED;
2359 * Setting the middle part of a previous oldext extent to
2360 * newext. Contiguity is impossible here.
2361 * One extent becomes three extents.
2364 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
2367 r[1].br_startoff = new_endoff;
2368 r[1].br_blockcount =
2369 old.br_startoff + old.br_blockcount - new_endoff;
2370 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2371 r[1].br_state = PREV.br_state;
2373 xfs_iext_update_extent(ip, state, icur, &PREV);
2374 xfs_iext_next(ifp, icur);
2375 xfs_iext_insert(ip, icur, &r[1], state);
2376 xfs_iext_insert(ip, icur, &r[0], state);
2377 ifp->if_nextents += 2;
2380 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2382 rval = XFS_ILOG_CORE;
2383 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2386 if (XFS_IS_CORRUPT(mp, i != 1)) {
2387 error = -EFSCORRUPTED;
2390 /* new right extent - oldext */
2391 error = xfs_bmbt_update(cur, &r[1]);
2394 /* new left extent - oldext */
2395 cur->bc_rec.b = PREV;
2396 if ((error = xfs_btree_insert(cur, &i)))
2398 if (XFS_IS_CORRUPT(mp, i != 1)) {
2399 error = -EFSCORRUPTED;
2403 * Reset the cursor to the position of the new extent
2404 * we are about to insert as we can't trust it after
2405 * the previous insert.
2407 error = xfs_bmbt_lookup_eq(cur, new, &i);
2410 if (XFS_IS_CORRUPT(mp, i != 0)) {
2411 error = -EFSCORRUPTED;
2414 /* new middle extent - newext */
2415 if ((error = xfs_btree_insert(cur, &i)))
2417 if (XFS_IS_CORRUPT(mp, i != 1)) {
2418 error = -EFSCORRUPTED;
2424 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2425 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2426 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2427 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2428 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2429 case BMAP_LEFT_CONTIG:
2430 case BMAP_RIGHT_CONTIG:
2432 * These cases are all impossible.
2437 /* update reverse mappings */
2438 xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
2440 /* convert to a btree if necessary */
2441 if (xfs_bmap_needs_btree(ip, whichfork)) {
2442 int tmp_logflags; /* partial log flag return val */
2444 ASSERT(cur == NULL);
2445 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
2446 &tmp_logflags, whichfork);
2447 *logflagsp |= tmp_logflags;
2452 /* clear out the allocated field, done with it now in any case. */
2454 cur->bc_ino.allocated = 0;
2458 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2468 * Convert a hole to a delayed allocation.
2471 xfs_bmap_add_extent_hole_delay(
2472 xfs_inode_t *ip, /* incore inode pointer */
2474 struct xfs_iext_cursor *icur,
2475 xfs_bmbt_irec_t *new) /* new data to add to file extents */
2477 struct xfs_ifork *ifp; /* inode fork pointer */
2478 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2479 xfs_filblks_t newlen=0; /* new indirect size */
2480 xfs_filblks_t oldlen=0; /* old indirect size */
2481 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2482 int state = xfs_bmap_fork_to_state(whichfork);
2483 xfs_filblks_t temp; /* temp for indirect calculations */
2485 ifp = XFS_IFORK_PTR(ip, whichfork);
2486 ASSERT(isnullstartblock(new->br_startblock));
2489 * Check and set flags if this segment has a left neighbor
2491 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2492 state |= BMAP_LEFT_VALID;
2493 if (isnullstartblock(left.br_startblock))
2494 state |= BMAP_LEFT_DELAY;
2498 * Check and set flags if the current (right) segment exists.
2499 * If it doesn't exist, we're converting the hole at end-of-file.
2501 if (xfs_iext_get_extent(ifp, icur, &right)) {
2502 state |= BMAP_RIGHT_VALID;
2503 if (isnullstartblock(right.br_startblock))
2504 state |= BMAP_RIGHT_DELAY;
2508 * Set contiguity flags on the left and right neighbors.
2509 * Don't let extents get too large, even if the pieces are contiguous.
2511 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2512 left.br_startoff + left.br_blockcount == new->br_startoff &&
2513 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2514 state |= BMAP_LEFT_CONTIG;
2516 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2517 new->br_startoff + new->br_blockcount == right.br_startoff &&
2518 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2519 (!(state & BMAP_LEFT_CONTIG) ||
2520 (left.br_blockcount + new->br_blockcount +
2521 right.br_blockcount <= MAXEXTLEN)))
2522 state |= BMAP_RIGHT_CONTIG;
2525 * Switch out based on the contiguity flags.
2527 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2528 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2530 * New allocation is contiguous with delayed allocations
2531 * on the left and on the right.
2532 * Merge all three into a single extent record.
2534 temp = left.br_blockcount + new->br_blockcount +
2535 right.br_blockcount;
2537 oldlen = startblockval(left.br_startblock) +
2538 startblockval(new->br_startblock) +
2539 startblockval(right.br_startblock);
2540 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2542 left.br_startblock = nullstartblock(newlen);
2543 left.br_blockcount = temp;
2545 xfs_iext_remove(ip, icur, state);
2546 xfs_iext_prev(ifp, icur);
2547 xfs_iext_update_extent(ip, state, icur, &left);
2550 case BMAP_LEFT_CONTIG:
2552 * New allocation is contiguous with a delayed allocation
2554 * Merge the new allocation with the left neighbor.
2556 temp = left.br_blockcount + new->br_blockcount;
2558 oldlen = startblockval(left.br_startblock) +
2559 startblockval(new->br_startblock);
2560 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2562 left.br_blockcount = temp;
2563 left.br_startblock = nullstartblock(newlen);
2565 xfs_iext_prev(ifp, icur);
2566 xfs_iext_update_extent(ip, state, icur, &left);
2569 case BMAP_RIGHT_CONTIG:
2571 * New allocation is contiguous with a delayed allocation
2573 * Merge the new allocation with the right neighbor.
2575 temp = new->br_blockcount + right.br_blockcount;
2576 oldlen = startblockval(new->br_startblock) +
2577 startblockval(right.br_startblock);
2578 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2580 right.br_startoff = new->br_startoff;
2581 right.br_startblock = nullstartblock(newlen);
2582 right.br_blockcount = temp;
2583 xfs_iext_update_extent(ip, state, icur, &right);
2588 * New allocation is not contiguous with another
2589 * delayed allocation.
2590 * Insert a new entry.
2592 oldlen = newlen = 0;
2593 xfs_iext_insert(ip, icur, new, state);
2596 if (oldlen != newlen) {
2597 ASSERT(oldlen > newlen);
2598 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2601 * Nothing to do for disk quota accounting here.
2603 xfs_mod_delalloc(ip->i_mount, (int64_t)newlen - oldlen);
2608 * Convert a hole to a real allocation.
2610 STATIC int /* error */
2611 xfs_bmap_add_extent_hole_real(
2612 struct xfs_trans *tp,
2613 struct xfs_inode *ip,
2615 struct xfs_iext_cursor *icur,
2616 struct xfs_btree_cur **curp,
2617 struct xfs_bmbt_irec *new,
2621 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
2622 struct xfs_mount *mp = ip->i_mount;
2623 struct xfs_btree_cur *cur = *curp;
2624 int error; /* error return value */
2625 int i; /* temp state */
2626 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2627 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2628 int rval=0; /* return value (logging flags) */
2629 int state = xfs_bmap_fork_to_state(whichfork);
2630 struct xfs_bmbt_irec old;
2632 ASSERT(!isnullstartblock(new->br_startblock));
2633 ASSERT(!cur || !(cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL));
2635 XFS_STATS_INC(mp, xs_add_exlist);
2638 * Check and set flags if this segment has a left neighbor.
2640 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2641 state |= BMAP_LEFT_VALID;
2642 if (isnullstartblock(left.br_startblock))
2643 state |= BMAP_LEFT_DELAY;
2647 * Check and set flags if this segment has a current value.
2648 * Not true if we're inserting into the "hole" at eof.
2650 if (xfs_iext_get_extent(ifp, icur, &right)) {
2651 state |= BMAP_RIGHT_VALID;
2652 if (isnullstartblock(right.br_startblock))
2653 state |= BMAP_RIGHT_DELAY;
2657 * We're inserting a real allocation between "left" and "right".
2658 * Set the contiguity flags. Don't let extents get too large.
2660 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2661 left.br_startoff + left.br_blockcount == new->br_startoff &&
2662 left.br_startblock + left.br_blockcount == new->br_startblock &&
2663 left.br_state == new->br_state &&
2664 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2665 state |= BMAP_LEFT_CONTIG;
2667 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2668 new->br_startoff + new->br_blockcount == right.br_startoff &&
2669 new->br_startblock + new->br_blockcount == right.br_startblock &&
2670 new->br_state == right.br_state &&
2671 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2672 (!(state & BMAP_LEFT_CONTIG) ||
2673 left.br_blockcount + new->br_blockcount +
2674 right.br_blockcount <= MAXEXTLEN))
2675 state |= BMAP_RIGHT_CONTIG;
2679 * Select which case we're in here, and implement it.
2681 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2682 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2684 * New allocation is contiguous with real allocations on the
2685 * left and on the right.
2686 * Merge all three into a single extent record.
2688 left.br_blockcount += new->br_blockcount + right.br_blockcount;
2690 xfs_iext_remove(ip, icur, state);
2691 xfs_iext_prev(ifp, icur);
2692 xfs_iext_update_extent(ip, state, icur, &left);
2696 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2698 rval = XFS_ILOG_CORE;
2699 error = xfs_bmbt_lookup_eq(cur, &right, &i);
2702 if (XFS_IS_CORRUPT(mp, i != 1)) {
2703 error = -EFSCORRUPTED;
2706 error = xfs_btree_delete(cur, &i);
2709 if (XFS_IS_CORRUPT(mp, i != 1)) {
2710 error = -EFSCORRUPTED;
2713 error = xfs_btree_decrement(cur, 0, &i);
2716 if (XFS_IS_CORRUPT(mp, i != 1)) {
2717 error = -EFSCORRUPTED;
2720 error = xfs_bmbt_update(cur, &left);
2726 case BMAP_LEFT_CONTIG:
2728 * New allocation is contiguous with a real allocation
2730 * Merge the new allocation with the left neighbor.
2733 left.br_blockcount += new->br_blockcount;
2735 xfs_iext_prev(ifp, icur);
2736 xfs_iext_update_extent(ip, state, icur, &left);
2739 rval = xfs_ilog_fext(whichfork);
2742 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2745 if (XFS_IS_CORRUPT(mp, i != 1)) {
2746 error = -EFSCORRUPTED;
2749 error = xfs_bmbt_update(cur, &left);
2755 case BMAP_RIGHT_CONTIG:
2757 * New allocation is contiguous with a real allocation
2759 * Merge the new allocation with the right neighbor.
2763 right.br_startoff = new->br_startoff;
2764 right.br_startblock = new->br_startblock;
2765 right.br_blockcount += new->br_blockcount;
2766 xfs_iext_update_extent(ip, state, icur, &right);
2769 rval = xfs_ilog_fext(whichfork);
2772 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2775 if (XFS_IS_CORRUPT(mp, i != 1)) {
2776 error = -EFSCORRUPTED;
2779 error = xfs_bmbt_update(cur, &right);
2787 * New allocation is not contiguous with another
2789 * Insert a new entry.
2791 xfs_iext_insert(ip, icur, new, state);
2795 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2797 rval = XFS_ILOG_CORE;
2798 error = xfs_bmbt_lookup_eq(cur, new, &i);
2801 if (XFS_IS_CORRUPT(mp, i != 0)) {
2802 error = -EFSCORRUPTED;
2805 error = xfs_btree_insert(cur, &i);
2808 if (XFS_IS_CORRUPT(mp, i != 1)) {
2809 error = -EFSCORRUPTED;
2816 /* add reverse mapping unless caller opted out */
2817 if (!(flags & XFS_BMAPI_NORMAP))
2818 xfs_rmap_map_extent(tp, ip, whichfork, new);
2820 /* convert to a btree if necessary */
2821 if (xfs_bmap_needs_btree(ip, whichfork)) {
2822 int tmp_logflags; /* partial log flag return val */
2824 ASSERT(cur == NULL);
2825 error = xfs_bmap_extents_to_btree(tp, ip, curp, 0,
2826 &tmp_logflags, whichfork);
2827 *logflagsp |= tmp_logflags;
2833 /* clear out the allocated field, done with it now in any case. */
2835 cur->bc_ino.allocated = 0;
2837 xfs_bmap_check_leaf_extents(cur, ip, whichfork);
2844 * Functions used in the extent read, allocate and remove paths
2848 * Adjust the size of the new extent based on i_extsize and rt extsize.
2851 xfs_bmap_extsize_align(
2853 xfs_bmbt_irec_t *gotp, /* next extent pointer */
2854 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
2855 xfs_extlen_t extsz, /* align to this extent size */
2856 int rt, /* is this a realtime inode? */
2857 int eof, /* is extent at end-of-file? */
2858 int delay, /* creating delalloc extent? */
2859 int convert, /* overwriting unwritten extent? */
2860 xfs_fileoff_t *offp, /* in/out: aligned offset */
2861 xfs_extlen_t *lenp) /* in/out: aligned length */
2863 xfs_fileoff_t orig_off; /* original offset */
2864 xfs_extlen_t orig_alen; /* original length */
2865 xfs_fileoff_t orig_end; /* original off+len */
2866 xfs_fileoff_t nexto; /* next file offset */
2867 xfs_fileoff_t prevo; /* previous file offset */
2868 xfs_fileoff_t align_off; /* temp for offset */
2869 xfs_extlen_t align_alen; /* temp for length */
2870 xfs_extlen_t temp; /* temp for calculations */
2875 orig_off = align_off = *offp;
2876 orig_alen = align_alen = *lenp;
2877 orig_end = orig_off + orig_alen;
2880 * If this request overlaps an existing extent, then don't
2881 * attempt to perform any additional alignment.
2883 if (!delay && !eof &&
2884 (orig_off >= gotp->br_startoff) &&
2885 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2890 * If the file offset is unaligned vs. the extent size
2891 * we need to align it. This will be possible unless
2892 * the file was previously written with a kernel that didn't
2893 * perform this alignment, or if a truncate shot us in the
2896 div_u64_rem(orig_off, extsz, &temp);
2902 /* Same adjustment for the end of the requested area. */
2903 temp = (align_alen % extsz);
2905 align_alen += extsz - temp;
2908 * For large extent hint sizes, the aligned extent might be larger than
2909 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
2910 * the length back under MAXEXTLEN. The outer allocation loops handle
2911 * short allocation just fine, so it is safe to do this. We only want to
2912 * do it when we are forced to, though, because it means more allocation
2913 * operations are required.
2915 while (align_alen > MAXEXTLEN)
2916 align_alen -= extsz;
2917 ASSERT(align_alen <= MAXEXTLEN);
2920 * If the previous block overlaps with this proposed allocation
2921 * then move the start forward without adjusting the length.
2923 if (prevp->br_startoff != NULLFILEOFF) {
2924 if (prevp->br_startblock == HOLESTARTBLOCK)
2925 prevo = prevp->br_startoff;
2927 prevo = prevp->br_startoff + prevp->br_blockcount;
2930 if (align_off != orig_off && align_off < prevo)
2933 * If the next block overlaps with this proposed allocation
2934 * then move the start back without adjusting the length,
2935 * but not before offset 0.
2936 * This may of course make the start overlap previous block,
2937 * and if we hit the offset 0 limit then the next block
2938 * can still overlap too.
2940 if (!eof && gotp->br_startoff != NULLFILEOFF) {
2941 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
2942 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
2943 nexto = gotp->br_startoff + gotp->br_blockcount;
2945 nexto = gotp->br_startoff;
2947 nexto = NULLFILEOFF;
2949 align_off + align_alen != orig_end &&
2950 align_off + align_alen > nexto)
2951 align_off = nexto > align_alen ? nexto - align_alen : 0;
2953 * If we're now overlapping the next or previous extent that
2954 * means we can't fit an extsz piece in this hole. Just move
2955 * the start forward to the first valid spot and set
2956 * the length so we hit the end.
2958 if (align_off != orig_off && align_off < prevo)
2960 if (align_off + align_alen != orig_end &&
2961 align_off + align_alen > nexto &&
2962 nexto != NULLFILEOFF) {
2963 ASSERT(nexto > prevo);
2964 align_alen = nexto - align_off;
2968 * If realtime, and the result isn't a multiple of the realtime
2969 * extent size we need to remove blocks until it is.
2971 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
2973 * We're not covering the original request, or
2974 * we won't be able to once we fix the length.
2976 if (orig_off < align_off ||
2977 orig_end > align_off + align_alen ||
2978 align_alen - temp < orig_alen)
2981 * Try to fix it by moving the start up.
2983 if (align_off + temp <= orig_off) {
2988 * Try to fix it by moving the end in.
2990 else if (align_off + align_alen - temp >= orig_end)
2993 * Set the start to the minimum then trim the length.
2996 align_alen -= orig_off - align_off;
2997 align_off = orig_off;
2998 align_alen -= align_alen % mp->m_sb.sb_rextsize;
3001 * Result doesn't cover the request, fail it.
3003 if (orig_off < align_off || orig_end > align_off + align_alen)
3006 ASSERT(orig_off >= align_off);
3007 /* see MAXEXTLEN handling above */
3008 ASSERT(orig_end <= align_off + align_alen ||
3009 align_alen + extsz > MAXEXTLEN);
3013 if (!eof && gotp->br_startoff != NULLFILEOFF)
3014 ASSERT(align_off + align_alen <= gotp->br_startoff);
3015 if (prevp->br_startoff != NULLFILEOFF)
3016 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3024 #define XFS_ALLOC_GAP_UNITS 4
3028 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3030 xfs_fsblock_t adjust; /* adjustment to block numbers */
3031 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3032 xfs_mount_t *mp; /* mount point structure */
3033 int nullfb; /* true if ap->firstblock isn't set */
3034 int rt; /* true if inode is realtime */
3036 #define ISVALID(x,y) \
3038 (x) < mp->m_sb.sb_rblocks : \
3039 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3040 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3041 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3043 mp = ap->ip->i_mount;
3044 nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
3045 rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3046 (ap->datatype & XFS_ALLOC_USERDATA);
3047 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
3048 ap->tp->t_firstblock);
3050 * If allocating at eof, and there's a previous real block,
3051 * try to use its last block as our starting point.
3053 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3054 !isnullstartblock(ap->prev.br_startblock) &&
3055 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3056 ap->prev.br_startblock)) {
3057 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3059 * Adjust for the gap between prevp and us.
3061 adjust = ap->offset -
3062 (ap->prev.br_startoff + ap->prev.br_blockcount);
3064 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3065 ap->blkno += adjust;
3068 * If not at eof, then compare the two neighbor blocks.
3069 * Figure out whether either one gives us a good starting point,
3070 * and pick the better one.
3072 else if (!ap->eof) {
3073 xfs_fsblock_t gotbno; /* right side block number */
3074 xfs_fsblock_t gotdiff=0; /* right side difference */
3075 xfs_fsblock_t prevbno; /* left side block number */
3076 xfs_fsblock_t prevdiff=0; /* left side difference */
3079 * If there's a previous (left) block, select a requested
3080 * start block based on it.
3082 if (ap->prev.br_startoff != NULLFILEOFF &&
3083 !isnullstartblock(ap->prev.br_startblock) &&
3084 (prevbno = ap->prev.br_startblock +
3085 ap->prev.br_blockcount) &&
3086 ISVALID(prevbno, ap->prev.br_startblock)) {
3088 * Calculate gap to end of previous block.
3090 adjust = prevdiff = ap->offset -
3091 (ap->prev.br_startoff +
3092 ap->prev.br_blockcount);
3094 * Figure the startblock based on the previous block's
3095 * end and the gap size.
3097 * If the gap is large relative to the piece we're
3098 * allocating, or using it gives us an invalid block
3099 * number, then just use the end of the previous block.
3101 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3102 ISVALID(prevbno + prevdiff,
3103 ap->prev.br_startblock))
3108 * If the firstblock forbids it, can't use it,
3111 if (!rt && !nullfb &&
3112 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
3113 prevbno = NULLFSBLOCK;
3116 * No previous block or can't follow it, just default.
3119 prevbno = NULLFSBLOCK;
3121 * If there's a following (right) block, select a requested
3122 * start block based on it.
3124 if (!isnullstartblock(ap->got.br_startblock)) {
3126 * Calculate gap to start of next block.
3128 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3130 * Figure the startblock based on the next block's
3131 * start and the gap size.
3133 gotbno = ap->got.br_startblock;
3136 * If the gap is large relative to the piece we're
3137 * allocating, or using it gives us an invalid block
3138 * number, then just use the start of the next block
3139 * offset by our length.
3141 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3142 ISVALID(gotbno - gotdiff, gotbno))
3144 else if (ISVALID(gotbno - ap->length, gotbno)) {
3145 gotbno -= ap->length;
3146 gotdiff += adjust - ap->length;
3150 * If the firstblock forbids it, can't use it,
3153 if (!rt && !nullfb &&
3154 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
3155 gotbno = NULLFSBLOCK;
3158 * No next block, just default.
3161 gotbno = NULLFSBLOCK;
3163 * If both valid, pick the better one, else the only good
3164 * one, else ap->blkno is already set (to 0 or the inode block).
3166 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
3167 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3168 else if (prevbno != NULLFSBLOCK)
3169 ap->blkno = prevbno;
3170 else if (gotbno != NULLFSBLOCK)
3177 xfs_bmap_longest_free_extent(
3178 struct xfs_trans *tp,
3183 struct xfs_mount *mp = tp->t_mountp;
3184 struct xfs_perag *pag;
3185 xfs_extlen_t longest;
3188 pag = xfs_perag_get(mp, ag);
3189 if (!pag->pagf_init) {
3190 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
3192 /* Couldn't lock the AGF, so skip this AG. */
3193 if (error == -EAGAIN) {
3201 longest = xfs_alloc_longest_free_extent(pag,
3202 xfs_alloc_min_freelist(mp, pag),
3203 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3204 if (*blen < longest)
3213 xfs_bmap_select_minlen(
3214 struct xfs_bmalloca *ap,
3215 struct xfs_alloc_arg *args,
3219 if (notinit || *blen < ap->minlen) {
3221 * Since we did a BUF_TRYLOCK above, it is possible that
3222 * there is space for this request.
3224 args->minlen = ap->minlen;
3225 } else if (*blen < args->maxlen) {
3227 * If the best seen length is less than the request length,
3228 * use the best as the minimum.
3230 args->minlen = *blen;
3233 * Otherwise we've seen an extent as big as maxlen, use that
3236 args->minlen = args->maxlen;
3241 xfs_bmap_btalloc_nullfb(
3242 struct xfs_bmalloca *ap,
3243 struct xfs_alloc_arg *args,
3246 struct xfs_mount *mp = ap->ip->i_mount;
3247 xfs_agnumber_t ag, startag;
3251 args->type = XFS_ALLOCTYPE_START_BNO;
3252 args->total = ap->total;
3254 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3255 if (startag == NULLAGNUMBER)
3258 while (*blen < args->maxlen) {
3259 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3264 if (++ag == mp->m_sb.sb_agcount)
3270 xfs_bmap_select_minlen(ap, args, blen, notinit);
3275 xfs_bmap_btalloc_filestreams(
3276 struct xfs_bmalloca *ap,
3277 struct xfs_alloc_arg *args,
3280 struct xfs_mount *mp = ap->ip->i_mount;
3285 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3286 args->total = ap->total;
3288 ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3289 if (ag == NULLAGNUMBER)
3292 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init);
3296 if (*blen < args->maxlen) {
3297 error = xfs_filestream_new_ag(ap, &ag);
3301 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3308 xfs_bmap_select_minlen(ap, args, blen, notinit);
3311 * Set the failure fallback case to look in the selected AG as stream
3314 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
3318 /* Update all inode and quota accounting for the allocation we just did. */
3320 xfs_bmap_btalloc_accounting(
3321 struct xfs_bmalloca *ap,
3322 struct xfs_alloc_arg *args)
3324 if (ap->flags & XFS_BMAPI_COWFORK) {
3326 * COW fork blocks are in-core only and thus are treated as
3327 * in-core quota reservation (like delalloc blocks) even when
3328 * converted to real blocks. The quota reservation is not
3329 * accounted to disk until blocks are remapped to the data
3330 * fork. So if these blocks were previously delalloc, we
3331 * already have quota reservation and there's nothing to do
3335 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len);
3340 * Otherwise, we've allocated blocks in a hole. The transaction
3341 * has acquired in-core quota reservation for this extent.
3342 * Rather than account these as real blocks, however, we reduce
3343 * the transaction quota reservation based on the allocation.
3344 * This essentially transfers the transaction quota reservation
3345 * to that of a delalloc extent.
3347 ap->ip->i_delayed_blks += args->len;
3348 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS,
3353 /* data/attr fork only */
3354 ap->ip->i_nblocks += args->len;
3355 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3357 ap->ip->i_delayed_blks -= args->len;
3358 xfs_mod_delalloc(ap->ip->i_mount, -(int64_t)args->len);
3360 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3361 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT,
3366 xfs_bmap_compute_alignments(
3367 struct xfs_bmalloca *ap,
3368 struct xfs_alloc_arg *args)
3370 struct xfs_mount *mp = args->mp;
3371 xfs_extlen_t align = 0; /* minimum allocation alignment */
3372 int stripe_align = 0;
3374 /* stripe alignment for allocation is determined by mount parameters */
3375 if (mp->m_swidth && xfs_has_swalloc(mp))
3376 stripe_align = mp->m_swidth;
3377 else if (mp->m_dalign)
3378 stripe_align = mp->m_dalign;
3380 if (ap->flags & XFS_BMAPI_COWFORK)
3381 align = xfs_get_cowextsz_hint(ap->ip);
3382 else if (ap->datatype & XFS_ALLOC_USERDATA)
3383 align = xfs_get_extsz_hint(ap->ip);
3385 if (xfs_bmap_extsize_align(mp, &ap->got, &ap->prev, align, 0,
3386 ap->eof, 0, ap->conv, &ap->offset,
3392 /* apply extent size hints if obtained earlier */
3395 div_u64_rem(ap->offset, args->prod, &args->mod);
3397 args->mod = args->prod - args->mod;
3398 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3402 args->prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3403 div_u64_rem(ap->offset, args->prod, &args->mod);
3405 args->mod = args->prod - args->mod;
3408 return stripe_align;
3412 xfs_bmap_process_allocated_extent(
3413 struct xfs_bmalloca *ap,
3414 struct xfs_alloc_arg *args,
3415 xfs_fileoff_t orig_offset,
3416 xfs_extlen_t orig_length)
3420 nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
3423 * check the allocation happened at the same or higher AG than
3424 * the first block that was allocated.
3427 XFS_FSB_TO_AGNO(args->mp, ap->tp->t_firstblock) <=
3428 XFS_FSB_TO_AGNO(args->mp, args->fsbno));
3430 ap->blkno = args->fsbno;
3432 ap->tp->t_firstblock = args->fsbno;
3433 ap->length = args->len;
3435 * If the extent size hint is active, we tried to round the
3436 * caller's allocation request offset down to extsz and the
3437 * length up to another extsz boundary. If we found a free
3438 * extent we mapped it in starting at this new offset. If the
3439 * newly mapped space isn't long enough to cover any of the
3440 * range of offsets that was originally requested, move the
3441 * mapping up so that we can fill as much of the caller's
3442 * original request as possible. Free space is apparently
3443 * very fragmented so we're unlikely to be able to satisfy the
3446 if (ap->length <= orig_length)
3447 ap->offset = orig_offset;
3448 else if (ap->offset + ap->length < orig_offset + orig_length)
3449 ap->offset = orig_offset + orig_length - ap->length;
3450 xfs_bmap_btalloc_accounting(ap, args);
3455 xfs_bmap_exact_minlen_extent_alloc(
3456 struct xfs_bmalloca *ap)
3458 struct xfs_mount *mp = ap->ip->i_mount;
3459 struct xfs_alloc_arg args = { .tp = ap->tp, .mp = mp };
3460 xfs_fileoff_t orig_offset;
3461 xfs_extlen_t orig_length;
3466 if (ap->minlen != 1) {
3467 ap->blkno = NULLFSBLOCK;
3472 orig_offset = ap->offset;
3473 orig_length = ap->length;
3475 args.alloc_minlen_only = 1;
3477 xfs_bmap_compute_alignments(ap, &args);
3479 if (ap->tp->t_firstblock == NULLFSBLOCK) {
3481 * Unlike the longest extent available in an AG, we don't track
3482 * the length of an AG's shortest extent.
3483 * XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT is a debug only knob and
3484 * hence we can afford to start traversing from the 0th AG since
3485 * we need not be concerned about a drop in performance in
3486 * "debug only" code paths.
3488 ap->blkno = XFS_AGB_TO_FSB(mp, 0, 0);
3490 ap->blkno = ap->tp->t_firstblock;
3493 args.fsbno = ap->blkno;
3494 args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
3495 args.type = XFS_ALLOCTYPE_FIRST_AG;
3496 args.minlen = args.maxlen = ap->minlen;
3497 args.total = ap->total;
3500 args.minalignslop = 0;
3502 args.minleft = ap->minleft;
3503 args.wasdel = ap->wasdel;
3504 args.resv = XFS_AG_RESV_NONE;
3505 args.datatype = ap->datatype;
3507 error = xfs_alloc_vextent(&args);
3511 if (args.fsbno != NULLFSBLOCK) {
3512 xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
3515 ap->blkno = NULLFSBLOCK;
3523 #define xfs_bmap_exact_minlen_extent_alloc(bma) (-EFSCORRUPTED)
3529 struct xfs_bmalloca *ap)
3531 struct xfs_mount *mp = ap->ip->i_mount;
3532 struct xfs_alloc_arg args = { .tp = ap->tp, .mp = mp };
3533 xfs_alloctype_t atype = 0;
3534 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3536 xfs_fileoff_t orig_offset;
3537 xfs_extlen_t orig_length;
3539 xfs_extlen_t nextminlen = 0;
3540 int nullfb; /* true if ap->firstblock isn't set */
3547 orig_offset = ap->offset;
3548 orig_length = ap->length;
3550 stripe_align = xfs_bmap_compute_alignments(ap, &args);
3552 nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
3553 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
3554 ap->tp->t_firstblock);
3556 if ((ap->datatype & XFS_ALLOC_USERDATA) &&
3557 xfs_inode_is_filestream(ap->ip)) {
3558 ag = xfs_filestream_lookup_ag(ap->ip);
3559 ag = (ag != NULLAGNUMBER) ? ag : 0;
3560 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
3562 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
3565 ap->blkno = ap->tp->t_firstblock;
3567 xfs_bmap_adjacent(ap);
3570 * If allowed, use ap->blkno; otherwise must use firstblock since
3571 * it's in the right allocation group.
3573 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
3576 ap->blkno = ap->tp->t_firstblock;
3578 * Normal allocation, done through xfs_alloc_vextent.
3580 tryagain = isaligned = 0;
3581 args.fsbno = ap->blkno;
3582 args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
3584 /* Trim the allocation back to the maximum an AG can fit. */
3585 args.maxlen = min(ap->length, mp->m_ag_max_usable);
3589 * Search for an allocation group with a single extent large
3590 * enough for the request. If one isn't found, then adjust
3591 * the minimum allocation size to the largest space found.
3593 if ((ap->datatype & XFS_ALLOC_USERDATA) &&
3594 xfs_inode_is_filestream(ap->ip))
3595 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
3597 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
3600 } else if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3601 if (xfs_inode_is_filestream(ap->ip))
3602 args.type = XFS_ALLOCTYPE_FIRST_AG;
3604 args.type = XFS_ALLOCTYPE_START_BNO;
3605 args.total = args.minlen = ap->minlen;
3607 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3608 args.total = ap->total;
3609 args.minlen = ap->minlen;
3613 * If we are not low on available data blocks, and the underlying
3614 * logical volume manager is a stripe, and the file offset is zero then
3615 * try to allocate data blocks on stripe unit boundary. NOTE: ap->aeof
3616 * is only set if the allocation length is >= the stripe unit and the
3617 * allocation offset is at the end of file.
3619 if (!(ap->tp->t_flags & XFS_TRANS_LOWMODE) && ap->aeof) {
3621 args.alignment = stripe_align;
3625 * Adjust minlen to try and preserve alignment if we
3626 * can't guarantee an aligned maxlen extent.
3628 if (blen > args.alignment &&
3629 blen <= args.maxlen + args.alignment)
3630 args.minlen = blen - args.alignment;
3631 args.minalignslop = 0;
3634 * First try an exact bno allocation.
3635 * If it fails then do a near or start bno
3636 * allocation with alignment turned on.
3640 args.type = XFS_ALLOCTYPE_THIS_BNO;
3643 * Compute the minlen+alignment for the
3644 * next case. Set slop so that the value
3645 * of minlen+alignment+slop doesn't go up
3646 * between the calls.
3648 if (blen > stripe_align && blen <= args.maxlen)
3649 nextminlen = blen - stripe_align;
3651 nextminlen = args.minlen;
3652 if (nextminlen + stripe_align > args.minlen + 1)
3654 nextminlen + stripe_align -
3657 args.minalignslop = 0;
3661 args.minalignslop = 0;
3663 args.minleft = ap->minleft;
3664 args.wasdel = ap->wasdel;
3665 args.resv = XFS_AG_RESV_NONE;
3666 args.datatype = ap->datatype;
3668 error = xfs_alloc_vextent(&args);
3672 if (tryagain && args.fsbno == NULLFSBLOCK) {
3674 * Exact allocation failed. Now try with alignment
3678 args.fsbno = ap->blkno;
3679 args.alignment = stripe_align;
3680 args.minlen = nextminlen;
3681 args.minalignslop = 0;
3683 if ((error = xfs_alloc_vextent(&args)))
3686 if (isaligned && args.fsbno == NULLFSBLOCK) {
3688 * allocation failed, so turn off alignment and
3692 args.fsbno = ap->blkno;
3694 if ((error = xfs_alloc_vextent(&args)))
3697 if (args.fsbno == NULLFSBLOCK && nullfb &&
3698 args.minlen > ap->minlen) {
3699 args.minlen = ap->minlen;
3700 args.type = XFS_ALLOCTYPE_START_BNO;
3701 args.fsbno = ap->blkno;
3702 if ((error = xfs_alloc_vextent(&args)))
3705 if (args.fsbno == NULLFSBLOCK && nullfb) {
3707 args.type = XFS_ALLOCTYPE_FIRST_AG;
3708 args.total = ap->minlen;
3709 if ((error = xfs_alloc_vextent(&args)))
3711 ap->tp->t_flags |= XFS_TRANS_LOWMODE;
3714 if (args.fsbno != NULLFSBLOCK) {
3715 xfs_bmap_process_allocated_extent(ap, &args, orig_offset,
3718 ap->blkno = NULLFSBLOCK;
3724 /* Trim extent to fit a logical block range. */
3727 struct xfs_bmbt_irec *irec,
3731 xfs_fileoff_t distance;
3732 xfs_fileoff_t end = bno + len;
3734 if (irec->br_startoff + irec->br_blockcount <= bno ||
3735 irec->br_startoff >= end) {
3736 irec->br_blockcount = 0;
3740 if (irec->br_startoff < bno) {
3741 distance = bno - irec->br_startoff;
3742 if (isnullstartblock(irec->br_startblock))
3743 irec->br_startblock = DELAYSTARTBLOCK;
3744 if (irec->br_startblock != DELAYSTARTBLOCK &&
3745 irec->br_startblock != HOLESTARTBLOCK)
3746 irec->br_startblock += distance;
3747 irec->br_startoff += distance;
3748 irec->br_blockcount -= distance;
3751 if (end < irec->br_startoff + irec->br_blockcount) {
3752 distance = irec->br_startoff + irec->br_blockcount - end;
3753 irec->br_blockcount -= distance;
3758 * Trim the returned map to the required bounds
3762 struct xfs_bmbt_irec *mval,
3763 struct xfs_bmbt_irec *got,
3771 if ((flags & XFS_BMAPI_ENTIRE) ||
3772 got->br_startoff + got->br_blockcount <= obno) {
3774 if (isnullstartblock(got->br_startblock))
3775 mval->br_startblock = DELAYSTARTBLOCK;
3781 ASSERT((*bno >= obno) || (n == 0));
3783 mval->br_startoff = *bno;
3784 if (isnullstartblock(got->br_startblock))
3785 mval->br_startblock = DELAYSTARTBLOCK;
3787 mval->br_startblock = got->br_startblock +
3788 (*bno - got->br_startoff);
3790 * Return the minimum of what we got and what we asked for for
3791 * the length. We can use the len variable here because it is
3792 * modified below and we could have been there before coming
3793 * here if the first part of the allocation didn't overlap what
3796 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3797 got->br_blockcount - (*bno - got->br_startoff));
3798 mval->br_state = got->br_state;
3799 ASSERT(mval->br_blockcount <= len);
3804 * Update and validate the extent map to return
3807 xfs_bmapi_update_map(
3808 struct xfs_bmbt_irec **map,
3816 xfs_bmbt_irec_t *mval = *map;
3818 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3819 ((mval->br_startoff + mval->br_blockcount) <= end));
3820 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3821 (mval->br_startoff < obno));
3823 *bno = mval->br_startoff + mval->br_blockcount;
3825 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3826 /* update previous map with new information */
3827 ASSERT(mval->br_startblock == mval[-1].br_startblock);
3828 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3829 ASSERT(mval->br_state == mval[-1].br_state);
3830 mval[-1].br_blockcount = mval->br_blockcount;
3831 mval[-1].br_state = mval->br_state;
3832 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3833 mval[-1].br_startblock != DELAYSTARTBLOCK &&
3834 mval[-1].br_startblock != HOLESTARTBLOCK &&
3835 mval->br_startblock == mval[-1].br_startblock +
3836 mval[-1].br_blockcount &&
3837 mval[-1].br_state == mval->br_state) {
3838 ASSERT(mval->br_startoff ==
3839 mval[-1].br_startoff + mval[-1].br_blockcount);
3840 mval[-1].br_blockcount += mval->br_blockcount;
3841 } else if (*n > 0 &&
3842 mval->br_startblock == DELAYSTARTBLOCK &&
3843 mval[-1].br_startblock == DELAYSTARTBLOCK &&
3844 mval->br_startoff ==
3845 mval[-1].br_startoff + mval[-1].br_blockcount) {
3846 mval[-1].br_blockcount += mval->br_blockcount;
3847 mval[-1].br_state = mval->br_state;
3848 } else if (!((*n == 0) &&
3849 ((mval->br_startoff + mval->br_blockcount) <=
3858 * Map file blocks to filesystem blocks without allocation.
3862 struct xfs_inode *ip,
3865 struct xfs_bmbt_irec *mval,
3869 struct xfs_mount *mp = ip->i_mount;
3870 int whichfork = xfs_bmapi_whichfork(flags);
3871 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
3872 struct xfs_bmbt_irec got;
3875 struct xfs_iext_cursor icur;
3881 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_ENTIRE)));
3882 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
3884 if (WARN_ON_ONCE(!ifp))
3885 return -EFSCORRUPTED;
3887 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
3888 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT))
3889 return -EFSCORRUPTED;
3891 if (xfs_is_shutdown(mp))
3894 XFS_STATS_INC(mp, xs_blk_mapr);
3896 error = xfs_iread_extents(NULL, ip, whichfork);
3900 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
3905 while (bno < end && n < *nmap) {
3906 /* Reading past eof, act as though there's a hole up to end. */
3908 got.br_startoff = end;
3909 if (got.br_startoff > bno) {
3910 /* Reading in a hole. */
3911 mval->br_startoff = bno;
3912 mval->br_startblock = HOLESTARTBLOCK;
3913 mval->br_blockcount =
3914 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
3915 mval->br_state = XFS_EXT_NORM;
3916 bno += mval->br_blockcount;
3917 len -= mval->br_blockcount;
3923 /* set up the extent map to return. */
3924 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
3925 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
3927 /* If we're done, stop now. */
3928 if (bno >= end || n >= *nmap)
3931 /* Else go on to the next record. */
3932 if (!xfs_iext_next_extent(ifp, &icur, &got))
3940 * Add a delayed allocation extent to an inode. Blocks are reserved from the
3941 * global pool and the extent inserted into the inode in-core extent tree.
3943 * On entry, got refers to the first extent beyond the offset of the extent to
3944 * allocate or eof is specified if no such extent exists. On return, got refers
3945 * to the extent record that was inserted to the inode fork.
3947 * Note that the allocated extent may have been merged with contiguous extents
3948 * during insertion into the inode fork. Thus, got does not reflect the current
3949 * state of the inode fork on return. If necessary, the caller can use lastx to
3950 * look up the updated record in the inode fork.
3953 xfs_bmapi_reserve_delalloc(
3954 struct xfs_inode *ip,
3958 xfs_filblks_t prealloc,
3959 struct xfs_bmbt_irec *got,
3960 struct xfs_iext_cursor *icur,
3963 struct xfs_mount *mp = ip->i_mount;
3964 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
3966 xfs_extlen_t indlen;
3968 xfs_fileoff_t aoff = off;
3971 * Cap the alloc length. Keep track of prealloc so we know whether to
3972 * tag the inode before we return.
3974 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN);
3976 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
3977 if (prealloc && alen >= len)
3978 prealloc = alen - len;
3980 /* Figure out the extent size, adjust alen */
3981 if (whichfork == XFS_COW_FORK) {
3982 struct xfs_bmbt_irec prev;
3983 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip);
3985 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
3986 prev.br_startoff = NULLFILEOFF;
3988 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
3989 1, 0, &aoff, &alen);
3994 * Make a transaction-less quota reservation for delayed allocation
3995 * blocks. This number gets adjusted later. We return if we haven't
3996 * allocated blocks already inside this loop.
3998 error = xfs_quota_reserve_blkres(ip, alen);
4003 * Split changing sb for alen and indlen since they could be coming
4004 * from different places.
4006 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4009 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
4011 goto out_unreserve_quota;
4013 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
4015 goto out_unreserve_blocks;
4018 ip->i_delayed_blks += alen;
4019 xfs_mod_delalloc(ip->i_mount, alen + indlen);
4021 got->br_startoff = aoff;
4022 got->br_startblock = nullstartblock(indlen);
4023 got->br_blockcount = alen;
4024 got->br_state = XFS_EXT_NORM;
4026 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
4029 * Tag the inode if blocks were preallocated. Note that COW fork
4030 * preallocation can occur at the start or end of the extent, even when
4031 * prealloc == 0, so we must also check the aligned offset and length.
4033 if (whichfork == XFS_DATA_FORK && prealloc)
4034 xfs_inode_set_eofblocks_tag(ip);
4035 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4036 xfs_inode_set_cowblocks_tag(ip);
4040 out_unreserve_blocks:
4041 xfs_mod_fdblocks(mp, alen, false);
4042 out_unreserve_quota:
4043 if (XFS_IS_QUOTA_ON(mp))
4044 xfs_quota_unreserve_blkres(ip, alen);
4049 xfs_bmap_alloc_userdata(
4050 struct xfs_bmalloca *bma)
4052 struct xfs_mount *mp = bma->ip->i_mount;
4053 int whichfork = xfs_bmapi_whichfork(bma->flags);
4057 * Set the data type being allocated. For the data fork, the first data
4058 * in the file is treated differently to all other allocations. For the
4059 * attribute fork, we only need to ensure the allocated range is not on
4062 bma->datatype = XFS_ALLOC_NOBUSY;
4063 if (whichfork == XFS_DATA_FORK) {
4064 bma->datatype |= XFS_ALLOC_USERDATA;
4065 if (bma->offset == 0)
4066 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4068 if (mp->m_dalign && bma->length >= mp->m_dalign) {
4069 error = xfs_bmap_isaeof(bma, whichfork);
4074 if (XFS_IS_REALTIME_INODE(bma->ip))
4075 return xfs_bmap_rtalloc(bma);
4078 if (unlikely(XFS_TEST_ERROR(false, mp,
4079 XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
4080 return xfs_bmap_exact_minlen_extent_alloc(bma);
4082 return xfs_bmap_btalloc(bma);
4087 struct xfs_bmalloca *bma)
4089 struct xfs_mount *mp = bma->ip->i_mount;
4090 int whichfork = xfs_bmapi_whichfork(bma->flags);
4091 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4092 int tmp_logflags = 0;
4095 ASSERT(bma->length > 0);
4098 * For the wasdelay case, we could also just allocate the stuff asked
4099 * for in this bmap call but that wouldn't be as good.
4102 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4103 bma->offset = bma->got.br_startoff;
4104 if (!xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev))
4105 bma->prev.br_startoff = NULLFILEOFF;
4107 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4109 bma->length = XFS_FILBLKS_MIN(bma->length,
4110 bma->got.br_startoff - bma->offset);
4113 if (bma->flags & XFS_BMAPI_CONTIG)
4114 bma->minlen = bma->length;
4118 if (bma->flags & XFS_BMAPI_METADATA) {
4119 if (unlikely(XFS_TEST_ERROR(false, mp,
4120 XFS_ERRTAG_BMAP_ALLOC_MINLEN_EXTENT)))
4121 error = xfs_bmap_exact_minlen_extent_alloc(bma);
4123 error = xfs_bmap_btalloc(bma);
4125 error = xfs_bmap_alloc_userdata(bma);
4127 if (error || bma->blkno == NULLFSBLOCK)
4130 if (bma->flags & XFS_BMAPI_ZERO) {
4131 error = xfs_zero_extent(bma->ip, bma->blkno, bma->length);
4136 if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur)
4137 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4139 * Bump the number of extents we've allocated
4145 bma->cur->bc_ino.flags =
4146 bma->wasdel ? XFS_BTCUR_BMBT_WASDEL : 0;
4148 bma->got.br_startoff = bma->offset;
4149 bma->got.br_startblock = bma->blkno;
4150 bma->got.br_blockcount = bma->length;
4151 bma->got.br_state = XFS_EXT_NORM;
4153 if (bma->flags & XFS_BMAPI_PREALLOC)
4154 bma->got.br_state = XFS_EXT_UNWRITTEN;
4157 error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4159 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4160 whichfork, &bma->icur, &bma->cur, &bma->got,
4161 &bma->logflags, bma->flags);
4163 bma->logflags |= tmp_logflags;
4168 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4169 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4170 * the neighbouring ones.
4172 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4174 ASSERT(bma->got.br_startoff <= bma->offset);
4175 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4176 bma->offset + bma->length);
4177 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4178 bma->got.br_state == XFS_EXT_UNWRITTEN);
4183 xfs_bmapi_convert_unwritten(
4184 struct xfs_bmalloca *bma,
4185 struct xfs_bmbt_irec *mval,
4189 int whichfork = xfs_bmapi_whichfork(flags);
4190 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4191 int tmp_logflags = 0;
4194 /* check if we need to do unwritten->real conversion */
4195 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4196 (flags & XFS_BMAPI_PREALLOC))
4199 /* check if we need to do real->unwritten conversion */
4200 if (mval->br_state == XFS_EXT_NORM &&
4201 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4202 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4206 * Modify (by adding) the state flag, if writing.
4208 ASSERT(mval->br_blockcount <= len);
4209 if (ifp->if_format == XFS_DINODE_FMT_BTREE && !bma->cur) {
4210 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4211 bma->ip, whichfork);
4213 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4214 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4217 * Before insertion into the bmbt, zero the range being converted
4220 if (flags & XFS_BMAPI_ZERO) {
4221 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4222 mval->br_blockcount);
4227 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4228 &bma->icur, &bma->cur, mval, &tmp_logflags);
4230 * Log the inode core unconditionally in the unwritten extent conversion
4231 * path because the conversion might not have done so (e.g., if the
4232 * extent count hasn't changed). We need to make sure the inode is dirty
4233 * in the transaction for the sake of fsync(), even if nothing has
4234 * changed, because fsync() will not force the log for this transaction
4235 * unless it sees the inode pinned.
4237 * Note: If we're only converting cow fork extents, there aren't
4238 * any on-disk updates to make, so we don't need to log anything.
4240 if (whichfork != XFS_COW_FORK)
4241 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4246 * Update our extent pointer, given that
4247 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4248 * of the neighbouring ones.
4250 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4253 * We may have combined previously unwritten space with written space,
4254 * so generate another request.
4256 if (mval->br_blockcount < len)
4261 static inline xfs_extlen_t
4263 struct xfs_trans *tp,
4264 struct xfs_inode *ip,
4267 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, fork);
4269 if (tp && tp->t_firstblock != NULLFSBLOCK)
4271 if (ifp->if_format != XFS_DINODE_FMT_BTREE)
4273 return be16_to_cpu(ifp->if_broot->bb_level) + 1;
4277 * Log whatever the flags say, even if error. Otherwise we might miss detecting
4278 * a case where the data is changed, there's an error, and it's not logged so we
4279 * don't shutdown when we should. Don't bother logging extents/btree changes if
4280 * we converted to the other format.
4284 struct xfs_bmalloca *bma,
4288 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4290 if ((bma->logflags & xfs_ilog_fext(whichfork)) &&
4291 ifp->if_format != XFS_DINODE_FMT_EXTENTS)
4292 bma->logflags &= ~xfs_ilog_fext(whichfork);
4293 else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) &&
4294 ifp->if_format != XFS_DINODE_FMT_BTREE)
4295 bma->logflags &= ~xfs_ilog_fbroot(whichfork);
4298 xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags);
4300 xfs_btree_del_cursor(bma->cur, error);
4304 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4305 * extent state if necessary. Details behaviour is controlled by the flags
4306 * parameter. Only allocates blocks from a single allocation group, to avoid
4311 struct xfs_trans *tp, /* transaction pointer */
4312 struct xfs_inode *ip, /* incore inode */
4313 xfs_fileoff_t bno, /* starting file offs. mapped */
4314 xfs_filblks_t len, /* length to map in file */
4315 int flags, /* XFS_BMAPI_... */
4316 xfs_extlen_t total, /* total blocks needed */
4317 struct xfs_bmbt_irec *mval, /* output: map values */
4318 int *nmap) /* i/o: mval size/count */
4320 struct xfs_bmalloca bma = {
4325 struct xfs_mount *mp = ip->i_mount;
4326 int whichfork = xfs_bmapi_whichfork(flags);
4327 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4328 xfs_fileoff_t end; /* end of mapped file region */
4329 bool eof = false; /* after the end of extents */
4330 int error; /* error return */
4331 int n; /* current extent index */
4332 xfs_fileoff_t obno; /* old block number (offset) */
4335 xfs_fileoff_t orig_bno; /* original block number value */
4336 int orig_flags; /* original flags arg value */
4337 xfs_filblks_t orig_len; /* original value of len arg */
4338 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4339 int orig_nmap; /* original value of *nmap */
4349 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4352 ASSERT(ifp->if_format != XFS_DINODE_FMT_LOCAL);
4353 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4354 ASSERT(!(flags & XFS_BMAPI_REMAP));
4356 /* zeroing is for currently only for data extents, not metadata */
4357 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4358 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4360 * we can allocate unwritten extents or pre-zero allocated blocks,
4361 * but it makes no sense to do both at once. This would result in
4362 * zeroing the unwritten extent twice, but it still being an
4363 * unwritten extent....
4365 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4366 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4368 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4369 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4370 return -EFSCORRUPTED;
4373 if (xfs_is_shutdown(mp))
4376 XFS_STATS_INC(mp, xs_blk_mapw);
4378 error = xfs_iread_extents(tp, ip, whichfork);
4382 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
4384 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4385 bma.prev.br_startoff = NULLFILEOFF;
4386 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4391 while (bno < end && n < *nmap) {
4392 bool need_alloc = false, wasdelay = false;
4394 /* in hole or beyond EOF? */
4395 if (eof || bma.got.br_startoff > bno) {
4397 * CoW fork conversions should /never/ hit EOF or
4398 * holes. There should always be something for us
4401 ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
4402 (flags & XFS_BMAPI_COWFORK)));
4405 } else if (isnullstartblock(bma.got.br_startblock)) {
4410 * First, deal with the hole before the allocated space
4411 * that we found, if any.
4413 if (need_alloc || wasdelay) {
4415 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4416 bma.wasdel = wasdelay;
4421 * There's a 32/64 bit type mismatch between the
4422 * allocation length request (which can be 64 bits in
4423 * length) and the bma length request, which is
4424 * xfs_extlen_t and therefore 32 bits. Hence we have to
4425 * check for 32-bit overflows and handle them here.
4427 if (len > (xfs_filblks_t)MAXEXTLEN)
4428 bma.length = MAXEXTLEN;
4433 ASSERT(bma.length > 0);
4434 error = xfs_bmapi_allocate(&bma);
4437 if (bma.blkno == NULLFSBLOCK)
4441 * If this is a CoW allocation, record the data in
4442 * the refcount btree for orphan recovery.
4444 if (whichfork == XFS_COW_FORK)
4445 xfs_refcount_alloc_cow_extent(tp, bma.blkno,
4449 /* Deal with the allocated space we found. */
4450 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4453 /* Execute unwritten extent conversion if necessary */
4454 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4455 if (error == -EAGAIN)
4460 /* update the extent map to return */
4461 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4464 * If we're done, stop now. Stop when we've allocated
4465 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4466 * the transaction may get too big.
4468 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4471 /* Else go on to the next record. */
4473 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
4478 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4483 ASSERT(ifp->if_format != XFS_DINODE_FMT_BTREE ||
4484 ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork));
4485 xfs_bmapi_finish(&bma, whichfork, 0);
4486 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4490 xfs_bmapi_finish(&bma, whichfork, error);
4495 * Convert an existing delalloc extent to real blocks based on file offset. This
4496 * attempts to allocate the entire delalloc extent and may require multiple
4497 * invocations to allocate the target offset if a large enough physical extent
4501 xfs_bmapi_convert_delalloc(
4502 struct xfs_inode *ip,
4505 struct iomap *iomap,
4508 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4509 struct xfs_mount *mp = ip->i_mount;
4510 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
4511 struct xfs_bmalloca bma = { NULL };
4513 struct xfs_trans *tp;
4516 if (whichfork == XFS_COW_FORK)
4517 flags |= IOMAP_F_SHARED;
4520 * Space for the extent and indirect blocks was reserved when the
4521 * delalloc extent was created so there's no need to do so here.
4523 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0,
4524 XFS_TRANS_RESERVE, &tp);
4528 xfs_ilock(ip, XFS_ILOCK_EXCL);
4530 error = xfs_iext_count_may_overflow(ip, whichfork,
4531 XFS_IEXT_ADD_NOSPLIT_CNT);
4533 goto out_trans_cancel;
4535 xfs_trans_ijoin(tp, ip, 0);
4537 if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) ||
4538 bma.got.br_startoff > offset_fsb) {
4540 * No extent found in the range we are trying to convert. This
4541 * should only happen for the COW fork, where another thread
4542 * might have moved the extent to the data fork in the meantime.
4544 WARN_ON_ONCE(whichfork != XFS_COW_FORK);
4546 goto out_trans_cancel;
4550 * If we find a real extent here we raced with another thread converting
4551 * the extent. Just return the real extent at this offset.
4553 if (!isnullstartblock(bma.got.br_startblock)) {
4554 xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags);
4555 *seq = READ_ONCE(ifp->if_seq);
4556 goto out_trans_cancel;
4562 bma.offset = bma.got.br_startoff;
4563 bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount, MAXEXTLEN);
4564 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4567 * When we're converting the delalloc reservations backing dirty pages
4568 * in the page cache, we must be careful about how we create the new
4571 * New CoW fork extents are created unwritten, turned into real extents
4572 * when we're about to write the data to disk, and mapped into the data
4573 * fork after the write finishes. End of story.
4575 * New data fork extents must be mapped in as unwritten and converted
4576 * to real extents after the write succeeds to avoid exposing stale
4577 * disk contents if we crash.
4579 bma.flags = XFS_BMAPI_PREALLOC;
4580 if (whichfork == XFS_COW_FORK)
4581 bma.flags |= XFS_BMAPI_COWFORK;
4583 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4584 bma.prev.br_startoff = NULLFILEOFF;
4586 error = xfs_bmapi_allocate(&bma);
4591 if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK))
4593 error = -EFSCORRUPTED;
4594 if (WARN_ON_ONCE(!xfs_valid_startblock(ip, bma.got.br_startblock)))
4597 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length));
4598 XFS_STATS_INC(mp, xs_xstrat_quick);
4600 ASSERT(!isnullstartblock(bma.got.br_startblock));
4601 xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags);
4602 *seq = READ_ONCE(ifp->if_seq);
4604 if (whichfork == XFS_COW_FORK)
4605 xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length);
4607 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4612 xfs_bmapi_finish(&bma, whichfork, 0);
4613 error = xfs_trans_commit(tp);
4614 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4618 xfs_bmapi_finish(&bma, whichfork, error);
4620 xfs_trans_cancel(tp);
4621 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4627 struct xfs_trans *tp,
4628 struct xfs_inode *ip,
4631 xfs_fsblock_t startblock,
4634 struct xfs_mount *mp = ip->i_mount;
4635 struct xfs_ifork *ifp;
4636 struct xfs_btree_cur *cur = NULL;
4637 struct xfs_bmbt_irec got;
4638 struct xfs_iext_cursor icur;
4639 int whichfork = xfs_bmapi_whichfork(flags);
4640 int logflags = 0, error;
4642 ifp = XFS_IFORK_PTR(ip, whichfork);
4644 ASSERT(len <= (xfs_filblks_t)MAXEXTLEN);
4645 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4646 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC |
4647 XFS_BMAPI_NORMAP)));
4648 ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) !=
4649 (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC));
4651 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
4652 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
4653 return -EFSCORRUPTED;
4656 if (xfs_is_shutdown(mp))
4659 error = xfs_iread_extents(tp, ip, whichfork);
4663 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
4664 /* make sure we only reflink into a hole. */
4665 ASSERT(got.br_startoff > bno);
4666 ASSERT(got.br_startoff - bno >= len);
4669 ip->i_nblocks += len;
4670 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4672 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
4673 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
4674 cur->bc_ino.flags = 0;
4677 got.br_startoff = bno;
4678 got.br_startblock = startblock;
4679 got.br_blockcount = len;
4680 if (flags & XFS_BMAPI_PREALLOC)
4681 got.br_state = XFS_EXT_UNWRITTEN;
4683 got.br_state = XFS_EXT_NORM;
4685 error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur,
4686 &cur, &got, &logflags, flags);
4690 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork);
4693 if (ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS)
4694 logflags &= ~XFS_ILOG_DEXT;
4695 else if (ip->i_df.if_format != XFS_DINODE_FMT_BTREE)
4696 logflags &= ~XFS_ILOG_DBROOT;
4699 xfs_trans_log_inode(tp, ip, logflags);
4701 xfs_btree_del_cursor(cur, error);
4706 * When a delalloc extent is split (e.g., due to a hole punch), the original
4707 * indlen reservation must be shared across the two new extents that are left
4710 * Given the original reservation and the worst case indlen for the two new
4711 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4712 * reservation fairly across the two new extents. If necessary, steal available
4713 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4714 * ores == 1). The number of stolen blocks is returned. The availability and
4715 * subsequent accounting of stolen blocks is the responsibility of the caller.
4717 static xfs_filblks_t
4718 xfs_bmap_split_indlen(
4719 xfs_filblks_t ores, /* original res. */
4720 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4721 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4722 xfs_filblks_t avail) /* stealable blocks */
4724 xfs_filblks_t len1 = *indlen1;
4725 xfs_filblks_t len2 = *indlen2;
4726 xfs_filblks_t nres = len1 + len2; /* new total res. */
4727 xfs_filblks_t stolen = 0;
4728 xfs_filblks_t resfactor;
4731 * Steal as many blocks as we can to try and satisfy the worst case
4732 * indlen for both new extents.
4734 if (ores < nres && avail)
4735 stolen = XFS_FILBLKS_MIN(nres - ores, avail);
4738 /* nothing else to do if we've satisfied the new reservation */
4743 * We can't meet the total required reservation for the two extents.
4744 * Calculate the percent of the overall shortage between both extents
4745 * and apply this percentage to each of the requested indlen values.
4746 * This distributes the shortage fairly and reduces the chances that one
4747 * of the two extents is left with nothing when extents are repeatedly
4750 resfactor = (ores * 100);
4751 do_div(resfactor, nres);
4756 ASSERT(len1 + len2 <= ores);
4757 ASSERT(len1 < *indlen1 && len2 < *indlen2);
4760 * Hand out the remainder to each extent. If one of the two reservations
4761 * is zero, we want to make sure that one gets a block first. The loop
4762 * below starts with len1, so hand len2 a block right off the bat if it
4765 ores -= (len1 + len2);
4766 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4767 if (ores && !len2 && *indlen2) {
4772 if (len1 < *indlen1) {
4778 if (len2 < *indlen2) {
4791 xfs_bmap_del_extent_delay(
4792 struct xfs_inode *ip,
4794 struct xfs_iext_cursor *icur,
4795 struct xfs_bmbt_irec *got,
4796 struct xfs_bmbt_irec *del)
4798 struct xfs_mount *mp = ip->i_mount;
4799 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4800 struct xfs_bmbt_irec new;
4801 int64_t da_old, da_new, da_diff = 0;
4802 xfs_fileoff_t del_endoff, got_endoff;
4803 xfs_filblks_t got_indlen, new_indlen, stolen;
4804 int state = xfs_bmap_fork_to_state(whichfork);
4808 XFS_STATS_INC(mp, xs_del_exlist);
4810 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4811 del_endoff = del->br_startoff + del->br_blockcount;
4812 got_endoff = got->br_startoff + got->br_blockcount;
4813 da_old = startblockval(got->br_startblock);
4816 ASSERT(del->br_blockcount > 0);
4817 ASSERT(got->br_startoff <= del->br_startoff);
4818 ASSERT(got_endoff >= del_endoff);
4821 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
4823 do_div(rtexts, mp->m_sb.sb_rextsize);
4824 xfs_mod_frextents(mp, rtexts);
4828 * Update the inode delalloc counter now and wait to update the
4829 * sb counters as we might have to borrow some blocks for the
4830 * indirect block accounting.
4833 error = xfs_quota_unreserve_blkres(ip, del->br_blockcount);
4836 ip->i_delayed_blks -= del->br_blockcount;
4838 if (got->br_startoff == del->br_startoff)
4839 state |= BMAP_LEFT_FILLING;
4840 if (got_endoff == del_endoff)
4841 state |= BMAP_RIGHT_FILLING;
4843 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4844 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4846 * Matches the whole extent. Delete the entry.
4848 xfs_iext_remove(ip, icur, state);
4849 xfs_iext_prev(ifp, icur);
4851 case BMAP_LEFT_FILLING:
4853 * Deleting the first part of the extent.
4855 got->br_startoff = del_endoff;
4856 got->br_blockcount -= del->br_blockcount;
4857 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4858 got->br_blockcount), da_old);
4859 got->br_startblock = nullstartblock((int)da_new);
4860 xfs_iext_update_extent(ip, state, icur, got);
4862 case BMAP_RIGHT_FILLING:
4864 * Deleting the last part of the extent.
4866 got->br_blockcount = got->br_blockcount - del->br_blockcount;
4867 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4868 got->br_blockcount), da_old);
4869 got->br_startblock = nullstartblock((int)da_new);
4870 xfs_iext_update_extent(ip, state, icur, got);
4874 * Deleting the middle of the extent.
4876 * Distribute the original indlen reservation across the two new
4877 * extents. Steal blocks from the deleted extent if necessary.
4878 * Stealing blocks simply fudges the fdblocks accounting below.
4879 * Warn if either of the new indlen reservations is zero as this
4880 * can lead to delalloc problems.
4882 got->br_blockcount = del->br_startoff - got->br_startoff;
4883 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4885 new.br_blockcount = got_endoff - del_endoff;
4886 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4888 WARN_ON_ONCE(!got_indlen || !new_indlen);
4889 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
4890 del->br_blockcount);
4892 got->br_startblock = nullstartblock((int)got_indlen);
4894 new.br_startoff = del_endoff;
4895 new.br_state = got->br_state;
4896 new.br_startblock = nullstartblock((int)new_indlen);
4898 xfs_iext_update_extent(ip, state, icur, got);
4899 xfs_iext_next(ifp, icur);
4900 xfs_iext_insert(ip, icur, &new, state);
4902 da_new = got_indlen + new_indlen - stolen;
4903 del->br_blockcount -= stolen;
4907 ASSERT(da_old >= da_new);
4908 da_diff = da_old - da_new;
4910 da_diff += del->br_blockcount;
4912 xfs_mod_fdblocks(mp, da_diff, false);
4913 xfs_mod_delalloc(mp, -da_diff);
4919 xfs_bmap_del_extent_cow(
4920 struct xfs_inode *ip,
4921 struct xfs_iext_cursor *icur,
4922 struct xfs_bmbt_irec *got,
4923 struct xfs_bmbt_irec *del)
4925 struct xfs_mount *mp = ip->i_mount;
4926 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
4927 struct xfs_bmbt_irec new;
4928 xfs_fileoff_t del_endoff, got_endoff;
4929 int state = BMAP_COWFORK;
4931 XFS_STATS_INC(mp, xs_del_exlist);
4933 del_endoff = del->br_startoff + del->br_blockcount;
4934 got_endoff = got->br_startoff + got->br_blockcount;
4936 ASSERT(del->br_blockcount > 0);
4937 ASSERT(got->br_startoff <= del->br_startoff);
4938 ASSERT(got_endoff >= del_endoff);
4939 ASSERT(!isnullstartblock(got->br_startblock));
4941 if (got->br_startoff == del->br_startoff)
4942 state |= BMAP_LEFT_FILLING;
4943 if (got_endoff == del_endoff)
4944 state |= BMAP_RIGHT_FILLING;
4946 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4947 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4949 * Matches the whole extent. Delete the entry.
4951 xfs_iext_remove(ip, icur, state);
4952 xfs_iext_prev(ifp, icur);
4954 case BMAP_LEFT_FILLING:
4956 * Deleting the first part of the extent.
4958 got->br_startoff = del_endoff;
4959 got->br_blockcount -= del->br_blockcount;
4960 got->br_startblock = del->br_startblock + del->br_blockcount;
4961 xfs_iext_update_extent(ip, state, icur, got);
4963 case BMAP_RIGHT_FILLING:
4965 * Deleting the last part of the extent.
4967 got->br_blockcount -= del->br_blockcount;
4968 xfs_iext_update_extent(ip, state, icur, got);
4972 * Deleting the middle of the extent.
4974 got->br_blockcount = del->br_startoff - got->br_startoff;
4976 new.br_startoff = del_endoff;
4977 new.br_blockcount = got_endoff - del_endoff;
4978 new.br_state = got->br_state;
4979 new.br_startblock = del->br_startblock + del->br_blockcount;
4981 xfs_iext_update_extent(ip, state, icur, got);
4982 xfs_iext_next(ifp, icur);
4983 xfs_iext_insert(ip, icur, &new, state);
4986 ip->i_delayed_blks -= del->br_blockcount;
4990 * Called by xfs_bmapi to update file extent records and the btree
4991 * after removing space.
4993 STATIC int /* error */
4994 xfs_bmap_del_extent_real(
4995 xfs_inode_t *ip, /* incore inode pointer */
4996 xfs_trans_t *tp, /* current transaction pointer */
4997 struct xfs_iext_cursor *icur,
4998 struct xfs_btree_cur *cur, /* if null, not a btree */
4999 xfs_bmbt_irec_t *del, /* data to remove from extents */
5000 int *logflagsp, /* inode logging flags */
5001 int whichfork, /* data or attr fork */
5002 int bflags) /* bmapi flags */
5004 xfs_fsblock_t del_endblock=0; /* first block past del */
5005 xfs_fileoff_t del_endoff; /* first offset past del */
5006 int do_fx; /* free extent at end of routine */
5007 int error; /* error return value */
5008 int flags = 0;/* inode logging flags */
5009 struct xfs_bmbt_irec got; /* current extent entry */
5010 xfs_fileoff_t got_endoff; /* first offset past got */
5011 int i; /* temp state */
5012 struct xfs_ifork *ifp; /* inode fork pointer */
5013 xfs_mount_t *mp; /* mount structure */
5014 xfs_filblks_t nblks; /* quota/sb block count */
5015 xfs_bmbt_irec_t new; /* new record to be inserted */
5017 uint qfield; /* quota field to update */
5018 int state = xfs_bmap_fork_to_state(whichfork);
5019 struct xfs_bmbt_irec old;
5022 XFS_STATS_INC(mp, xs_del_exlist);
5024 ifp = XFS_IFORK_PTR(ip, whichfork);
5025 ASSERT(del->br_blockcount > 0);
5026 xfs_iext_get_extent(ifp, icur, &got);
5027 ASSERT(got.br_startoff <= del->br_startoff);
5028 del_endoff = del->br_startoff + del->br_blockcount;
5029 got_endoff = got.br_startoff + got.br_blockcount;
5030 ASSERT(got_endoff >= del_endoff);
5031 ASSERT(!isnullstartblock(got.br_startblock));
5036 * If it's the case where the directory code is running with no block
5037 * reservation, and the deleted block is in the middle of its extent,
5038 * and the resulting insert of an extent would cause transformation to
5039 * btree format, then reject it. The calling code will then swap blocks
5040 * around instead. We have to do this now, rather than waiting for the
5041 * conversion to btree format, since the transaction will be dirty then.
5043 if (tp->t_blk_res == 0 &&
5044 ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
5045 ifp->if_nextents >= XFS_IFORK_MAXEXT(ip, whichfork) &&
5046 del->br_startoff > got.br_startoff && del_endoff < got_endoff)
5049 flags = XFS_ILOG_CORE;
5050 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
5054 len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize,
5058 if (!(bflags & XFS_BMAPI_REMAP)) {
5061 bno = div_u64_rem(del->br_startblock,
5062 mp->m_sb.sb_rextsize, &mod);
5065 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
5071 nblks = len * mp->m_sb.sb_rextsize;
5072 qfield = XFS_TRANS_DQ_RTBCOUNT;
5075 nblks = del->br_blockcount;
5076 qfield = XFS_TRANS_DQ_BCOUNT;
5079 del_endblock = del->br_startblock + del->br_blockcount;
5081 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5084 if (XFS_IS_CORRUPT(mp, i != 1)) {
5085 error = -EFSCORRUPTED;
5090 if (got.br_startoff == del->br_startoff)
5091 state |= BMAP_LEFT_FILLING;
5092 if (got_endoff == del_endoff)
5093 state |= BMAP_RIGHT_FILLING;
5095 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
5096 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
5098 * Matches the whole extent. Delete the entry.
5100 xfs_iext_remove(ip, icur, state);
5101 xfs_iext_prev(ifp, icur);
5104 flags |= XFS_ILOG_CORE;
5106 flags |= xfs_ilog_fext(whichfork);
5109 if ((error = xfs_btree_delete(cur, &i)))
5111 if (XFS_IS_CORRUPT(mp, i != 1)) {
5112 error = -EFSCORRUPTED;
5116 case BMAP_LEFT_FILLING:
5118 * Deleting the first part of the extent.
5120 got.br_startoff = del_endoff;
5121 got.br_startblock = del_endblock;
5122 got.br_blockcount -= del->br_blockcount;
5123 xfs_iext_update_extent(ip, state, icur, &got);
5125 flags |= xfs_ilog_fext(whichfork);
5128 error = xfs_bmbt_update(cur, &got);
5132 case BMAP_RIGHT_FILLING:
5134 * Deleting the last part of the extent.
5136 got.br_blockcount -= del->br_blockcount;
5137 xfs_iext_update_extent(ip, state, icur, &got);
5139 flags |= xfs_ilog_fext(whichfork);
5142 error = xfs_bmbt_update(cur, &got);
5148 * Deleting the middle of the extent.
5152 * For directories, -ENOSPC is returned since a directory entry
5153 * remove operation must not fail due to low extent count
5154 * availability. -ENOSPC will be handled by higher layers of XFS
5155 * by letting the corresponding empty Data/Free blocks to linger
5156 * until a future remove operation. Dabtree blocks would be
5157 * swapped with the last block in the leaf space and then the
5158 * new last block will be unmapped.
5160 * The above logic also applies to the source directory entry of
5161 * a rename operation.
5163 error = xfs_iext_count_may_overflow(ip, whichfork, 1);
5165 ASSERT(S_ISDIR(VFS_I(ip)->i_mode) &&
5166 whichfork == XFS_DATA_FORK);
5173 got.br_blockcount = del->br_startoff - got.br_startoff;
5174 xfs_iext_update_extent(ip, state, icur, &got);
5176 new.br_startoff = del_endoff;
5177 new.br_blockcount = got_endoff - del_endoff;
5178 new.br_state = got.br_state;
5179 new.br_startblock = del_endblock;
5181 flags |= XFS_ILOG_CORE;
5183 error = xfs_bmbt_update(cur, &got);
5186 error = xfs_btree_increment(cur, 0, &i);
5189 cur->bc_rec.b = new;
5190 error = xfs_btree_insert(cur, &i);
5191 if (error && error != -ENOSPC)
5194 * If get no-space back from btree insert, it tried a
5195 * split, and we have a zero block reservation. Fix up
5196 * our state and return the error.
5198 if (error == -ENOSPC) {
5200 * Reset the cursor, don't trust it after any
5203 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5206 if (XFS_IS_CORRUPT(mp, i != 1)) {
5207 error = -EFSCORRUPTED;
5211 * Update the btree record back
5212 * to the original value.
5214 error = xfs_bmbt_update(cur, &old);
5218 * Reset the extent record back
5219 * to the original value.
5221 xfs_iext_update_extent(ip, state, icur, &old);
5226 if (XFS_IS_CORRUPT(mp, i != 1)) {
5227 error = -EFSCORRUPTED;
5231 flags |= xfs_ilog_fext(whichfork);
5234 xfs_iext_next(ifp, icur);
5235 xfs_iext_insert(ip, icur, &new, state);
5239 /* remove reverse mapping */
5240 xfs_rmap_unmap_extent(tp, ip, whichfork, del);
5243 * If we need to, add to list of extents to delete.
5245 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
5246 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5247 xfs_refcount_decrease_extent(tp, del);
5249 __xfs_free_extent_later(tp, del->br_startblock,
5250 del->br_blockcount, NULL,
5251 (bflags & XFS_BMAPI_NODISCARD) ||
5252 del->br_state == XFS_EXT_UNWRITTEN);
5257 * Adjust inode # blocks in the file.
5260 ip->i_nblocks -= nblks;
5262 * Adjust quota data.
5264 if (qfield && !(bflags & XFS_BMAPI_REMAP))
5265 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5273 * Unmap (remove) blocks from a file.
5274 * If nexts is nonzero then the number of extents to remove is limited to
5275 * that value. If not all extents in the block range can be removed then
5280 struct xfs_trans *tp, /* transaction pointer */
5281 struct xfs_inode *ip, /* incore inode */
5282 xfs_fileoff_t start, /* first file offset deleted */
5283 xfs_filblks_t *rlen, /* i/o: amount remaining */
5284 int flags, /* misc flags */
5285 xfs_extnum_t nexts) /* number of extents max */
5287 struct xfs_btree_cur *cur; /* bmap btree cursor */
5288 struct xfs_bmbt_irec del; /* extent being deleted */
5289 int error; /* error return value */
5290 xfs_extnum_t extno; /* extent number in list */
5291 struct xfs_bmbt_irec got; /* current extent record */
5292 struct xfs_ifork *ifp; /* inode fork pointer */
5293 int isrt; /* freeing in rt area */
5294 int logflags; /* transaction logging flags */
5295 xfs_extlen_t mod; /* rt extent offset */
5296 struct xfs_mount *mp = ip->i_mount;
5297 int tmp_logflags; /* partial logging flags */
5298 int wasdel; /* was a delayed alloc extent */
5299 int whichfork; /* data or attribute fork */
5301 xfs_filblks_t len = *rlen; /* length to unmap in file */
5302 xfs_fileoff_t max_len;
5304 struct xfs_iext_cursor icur;
5307 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
5309 whichfork = xfs_bmapi_whichfork(flags);
5310 ASSERT(whichfork != XFS_COW_FORK);
5311 ifp = XFS_IFORK_PTR(ip, whichfork);
5312 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)))
5313 return -EFSCORRUPTED;
5314 if (xfs_is_shutdown(mp))
5317 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5322 * Guesstimate how many blocks we can unmap without running the risk of
5323 * blowing out the transaction with a mix of EFIs and reflink
5326 if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
5327 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
5331 error = xfs_iread_extents(tp, ip, whichfork);
5335 if (xfs_iext_count(ifp) == 0) {
5339 XFS_STATS_INC(mp, xs_blk_unmap);
5340 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5343 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
5350 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5351 ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
5352 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5353 cur->bc_ino.flags = 0;
5359 * Synchronize by locking the bitmap inode.
5361 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
5362 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5363 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5364 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
5368 while (end != (xfs_fileoff_t)-1 && end >= start &&
5369 (nexts == 0 || extno < nexts) && max_len > 0) {
5371 * Is the found extent after a hole in which end lives?
5372 * Just back up to the previous extent, if so.
5374 if (got.br_startoff > end &&
5375 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5380 * Is the last block of this extent before the range
5381 * we're supposed to delete? If so, we're done.
5383 end = XFS_FILEOFF_MIN(end,
5384 got.br_startoff + got.br_blockcount - 1);
5388 * Then deal with the (possibly delayed) allocated space
5392 wasdel = isnullstartblock(del.br_startblock);
5394 if (got.br_startoff < start) {
5395 del.br_startoff = start;
5396 del.br_blockcount -= start - got.br_startoff;
5398 del.br_startblock += start - got.br_startoff;
5400 if (del.br_startoff + del.br_blockcount > end + 1)
5401 del.br_blockcount = end + 1 - del.br_startoff;
5403 /* How much can we safely unmap? */
5404 if (max_len < del.br_blockcount) {
5405 del.br_startoff += del.br_blockcount - max_len;
5407 del.br_startblock += del.br_blockcount - max_len;
5408 del.br_blockcount = max_len;
5414 sum = del.br_startblock + del.br_blockcount;
5415 div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod);
5418 * Realtime extent not lined up at the end.
5419 * The extent could have been split into written
5420 * and unwritten pieces, or we could just be
5421 * unmapping part of it. But we can't really
5422 * get rid of part of a realtime extent.
5424 if (del.br_state == XFS_EXT_UNWRITTEN) {
5426 * This piece is unwritten, or we're not
5427 * using unwritten extents. Skip over it.
5430 end -= mod > del.br_blockcount ?
5431 del.br_blockcount : mod;
5432 if (end < got.br_startoff &&
5433 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5440 * It's written, turn it unwritten.
5441 * This is better than zeroing it.
5443 ASSERT(del.br_state == XFS_EXT_NORM);
5444 ASSERT(tp->t_blk_res > 0);
5446 * If this spans a realtime extent boundary,
5447 * chop it back to the start of the one we end at.
5449 if (del.br_blockcount > mod) {
5450 del.br_startoff += del.br_blockcount - mod;
5451 del.br_startblock += del.br_blockcount - mod;
5452 del.br_blockcount = mod;
5454 del.br_state = XFS_EXT_UNWRITTEN;
5455 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5456 whichfork, &icur, &cur, &del,
5462 div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod);
5464 xfs_extlen_t off = mp->m_sb.sb_rextsize - mod;
5467 * Realtime extent is lined up at the end but not
5468 * at the front. We'll get rid of full extents if
5471 if (del.br_blockcount > off) {
5472 del.br_blockcount -= off;
5473 del.br_startoff += off;
5474 del.br_startblock += off;
5475 } else if (del.br_startoff == start &&
5476 (del.br_state == XFS_EXT_UNWRITTEN ||
5477 tp->t_blk_res == 0)) {
5479 * Can't make it unwritten. There isn't
5480 * a full extent here so just skip it.
5482 ASSERT(end >= del.br_blockcount);
5483 end -= del.br_blockcount;
5484 if (got.br_startoff > end &&
5485 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5490 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5491 struct xfs_bmbt_irec prev;
5492 xfs_fileoff_t unwrite_start;
5495 * This one is already unwritten.
5496 * It must have a written left neighbor.
5497 * Unwrite the killed part of that one and
5500 if (!xfs_iext_prev_extent(ifp, &icur, &prev))
5502 ASSERT(prev.br_state == XFS_EXT_NORM);
5503 ASSERT(!isnullstartblock(prev.br_startblock));
5504 ASSERT(del.br_startblock ==
5505 prev.br_startblock + prev.br_blockcount);
5506 unwrite_start = max3(start,
5507 del.br_startoff - mod,
5509 mod = unwrite_start - prev.br_startoff;
5510 prev.br_startoff = unwrite_start;
5511 prev.br_startblock += mod;
5512 prev.br_blockcount -= mod;
5513 prev.br_state = XFS_EXT_UNWRITTEN;
5514 error = xfs_bmap_add_extent_unwritten_real(tp,
5515 ip, whichfork, &icur, &cur,
5521 ASSERT(del.br_state == XFS_EXT_NORM);
5522 del.br_state = XFS_EXT_UNWRITTEN;
5523 error = xfs_bmap_add_extent_unwritten_real(tp,
5524 ip, whichfork, &icur, &cur,
5534 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
5537 error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
5538 &del, &tmp_logflags, whichfork,
5540 logflags |= tmp_logflags;
5546 max_len -= del.br_blockcount;
5547 end = del.br_startoff - 1;
5550 * If not done go on to the next (previous) record.
5552 if (end != (xfs_fileoff_t)-1 && end >= start) {
5553 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5554 (got.br_startoff > end &&
5555 !xfs_iext_prev_extent(ifp, &icur, &got))) {
5562 if (done || end == (xfs_fileoff_t)-1 || end < start)
5565 *rlen = end - start + 1;
5568 * Convert to a btree if necessary.
5570 if (xfs_bmap_needs_btree(ip, whichfork)) {
5571 ASSERT(cur == NULL);
5572 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
5573 &tmp_logflags, whichfork);
5574 logflags |= tmp_logflags;
5576 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags,
5582 * Log everything. Do this after conversion, there's no point in
5583 * logging the extent records if we've converted to btree format.
5585 if ((logflags & xfs_ilog_fext(whichfork)) &&
5586 ifp->if_format != XFS_DINODE_FMT_EXTENTS)
5587 logflags &= ~xfs_ilog_fext(whichfork);
5588 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5589 ifp->if_format != XFS_DINODE_FMT_BTREE)
5590 logflags &= ~xfs_ilog_fbroot(whichfork);
5592 * Log inode even in the error case, if the transaction
5593 * is dirty we'll need to shut down the filesystem.
5596 xfs_trans_log_inode(tp, ip, logflags);
5599 cur->bc_ino.allocated = 0;
5600 xfs_btree_del_cursor(cur, error);
5605 /* Unmap a range of a file. */
5609 struct xfs_inode *ip,
5618 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts);
5624 * Determine whether an extent shift can be accomplished by a merge with the
5625 * extent that precedes the target hole of the shift.
5629 struct xfs_bmbt_irec *left, /* preceding extent */
5630 struct xfs_bmbt_irec *got, /* current extent to shift */
5631 xfs_fileoff_t shift) /* shift fsb */
5633 xfs_fileoff_t startoff;
5635 startoff = got->br_startoff - shift;
5638 * The extent, once shifted, must be adjacent in-file and on-disk with
5639 * the preceding extent.
5641 if ((left->br_startoff + left->br_blockcount != startoff) ||
5642 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5643 (left->br_state != got->br_state) ||
5644 (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
5651 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5652 * hole in the file. If an extent shift would result in the extent being fully
5653 * adjacent to the extent that currently precedes the hole, we can merge with
5654 * the preceding extent rather than do the shift.
5656 * This function assumes the caller has verified a shift-by-merge is possible
5657 * with the provided extents via xfs_bmse_can_merge().
5661 struct xfs_trans *tp,
5662 struct xfs_inode *ip,
5664 xfs_fileoff_t shift, /* shift fsb */
5665 struct xfs_iext_cursor *icur,
5666 struct xfs_bmbt_irec *got, /* extent to shift */
5667 struct xfs_bmbt_irec *left, /* preceding extent */
5668 struct xfs_btree_cur *cur,
5669 int *logflags) /* output */
5671 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5672 struct xfs_bmbt_irec new;
5673 xfs_filblks_t blockcount;
5675 struct xfs_mount *mp = ip->i_mount;
5677 blockcount = left->br_blockcount + got->br_blockcount;
5679 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5680 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5681 ASSERT(xfs_bmse_can_merge(left, got, shift));
5684 new.br_blockcount = blockcount;
5687 * Update the on-disk extent count, the btree if necessary and log the
5691 *logflags |= XFS_ILOG_CORE;
5693 *logflags |= XFS_ILOG_DEXT;
5697 /* lookup and remove the extent to merge */
5698 error = xfs_bmbt_lookup_eq(cur, got, &i);
5701 if (XFS_IS_CORRUPT(mp, i != 1))
5702 return -EFSCORRUPTED;
5704 error = xfs_btree_delete(cur, &i);
5707 if (XFS_IS_CORRUPT(mp, i != 1))
5708 return -EFSCORRUPTED;
5710 /* lookup and update size of the previous extent */
5711 error = xfs_bmbt_lookup_eq(cur, left, &i);
5714 if (XFS_IS_CORRUPT(mp, i != 1))
5715 return -EFSCORRUPTED;
5717 error = xfs_bmbt_update(cur, &new);
5721 /* change to extent format if required after extent removal */
5722 error = xfs_bmap_btree_to_extents(tp, ip, cur, logflags, whichfork);
5727 xfs_iext_remove(ip, icur, 0);
5728 xfs_iext_prev(ifp, icur);
5729 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5732 /* update reverse mapping. rmap functions merge the rmaps for us */
5733 xfs_rmap_unmap_extent(tp, ip, whichfork, got);
5734 memcpy(&new, got, sizeof(new));
5735 new.br_startoff = left->br_startoff + left->br_blockcount;
5736 xfs_rmap_map_extent(tp, ip, whichfork, &new);
5741 xfs_bmap_shift_update_extent(
5742 struct xfs_trans *tp,
5743 struct xfs_inode *ip,
5745 struct xfs_iext_cursor *icur,
5746 struct xfs_bmbt_irec *got,
5747 struct xfs_btree_cur *cur,
5749 xfs_fileoff_t startoff)
5751 struct xfs_mount *mp = ip->i_mount;
5752 struct xfs_bmbt_irec prev = *got;
5755 *logflags |= XFS_ILOG_CORE;
5757 got->br_startoff = startoff;
5760 error = xfs_bmbt_lookup_eq(cur, &prev, &i);
5763 if (XFS_IS_CORRUPT(mp, i != 1))
5764 return -EFSCORRUPTED;
5766 error = xfs_bmbt_update(cur, got);
5770 *logflags |= XFS_ILOG_DEXT;
5773 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5776 /* update reverse mapping */
5777 xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
5778 xfs_rmap_map_extent(tp, ip, whichfork, got);
5783 xfs_bmap_collapse_extents(
5784 struct xfs_trans *tp,
5785 struct xfs_inode *ip,
5786 xfs_fileoff_t *next_fsb,
5787 xfs_fileoff_t offset_shift_fsb,
5790 int whichfork = XFS_DATA_FORK;
5791 struct xfs_mount *mp = ip->i_mount;
5792 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5793 struct xfs_btree_cur *cur = NULL;
5794 struct xfs_bmbt_irec got, prev;
5795 struct xfs_iext_cursor icur;
5796 xfs_fileoff_t new_startoff;
5800 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5801 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5802 return -EFSCORRUPTED;
5805 if (xfs_is_shutdown(mp))
5808 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5810 error = xfs_iread_extents(tp, ip, whichfork);
5814 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5815 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5816 cur->bc_ino.flags = 0;
5819 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5823 if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5824 error = -EFSCORRUPTED;
5828 new_startoff = got.br_startoff - offset_shift_fsb;
5829 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
5830 if (new_startoff < prev.br_startoff + prev.br_blockcount) {
5835 if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
5836 error = xfs_bmse_merge(tp, ip, whichfork,
5837 offset_shift_fsb, &icur, &got, &prev,
5844 if (got.br_startoff < offset_shift_fsb) {
5850 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5851 cur, &logflags, new_startoff);
5856 if (!xfs_iext_next_extent(ifp, &icur, &got)) {
5861 *next_fsb = got.br_startoff;
5864 xfs_btree_del_cursor(cur, error);
5866 xfs_trans_log_inode(tp, ip, logflags);
5870 /* Make sure we won't be right-shifting an extent past the maximum bound. */
5872 xfs_bmap_can_insert_extents(
5873 struct xfs_inode *ip,
5875 xfs_fileoff_t shift)
5877 struct xfs_bmbt_irec got;
5881 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5883 if (xfs_is_shutdown(ip->i_mount))
5886 xfs_ilock(ip, XFS_ILOCK_EXCL);
5887 error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
5888 if (!error && !is_empty && got.br_startoff >= off &&
5889 ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
5891 xfs_iunlock(ip, XFS_ILOCK_EXCL);
5897 xfs_bmap_insert_extents(
5898 struct xfs_trans *tp,
5899 struct xfs_inode *ip,
5900 xfs_fileoff_t *next_fsb,
5901 xfs_fileoff_t offset_shift_fsb,
5903 xfs_fileoff_t stop_fsb)
5905 int whichfork = XFS_DATA_FORK;
5906 struct xfs_mount *mp = ip->i_mount;
5907 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5908 struct xfs_btree_cur *cur = NULL;
5909 struct xfs_bmbt_irec got, next;
5910 struct xfs_iext_cursor icur;
5911 xfs_fileoff_t new_startoff;
5915 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
5916 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
5917 return -EFSCORRUPTED;
5920 if (xfs_is_shutdown(mp))
5923 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5925 error = xfs_iread_extents(tp, ip, whichfork);
5929 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
5930 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5931 cur->bc_ino.flags = 0;
5934 if (*next_fsb == NULLFSBLOCK) {
5935 xfs_iext_last(ifp, &icur);
5936 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5937 stop_fsb > got.br_startoff) {
5942 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5947 if (XFS_IS_CORRUPT(mp, isnullstartblock(got.br_startblock))) {
5948 error = -EFSCORRUPTED;
5952 if (XFS_IS_CORRUPT(mp, stop_fsb > got.br_startoff)) {
5953 error = -EFSCORRUPTED;
5957 new_startoff = got.br_startoff + offset_shift_fsb;
5958 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
5959 if (new_startoff + got.br_blockcount > next.br_startoff) {
5965 * Unlike a left shift (which involves a hole punch), a right
5966 * shift does not modify extent neighbors in any way. We should
5967 * never find mergeable extents in this scenario. Check anyways
5968 * and warn if we encounter two extents that could be one.
5970 if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb))
5974 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5975 cur, &logflags, new_startoff);
5979 if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
5980 stop_fsb >= got.br_startoff + got.br_blockcount) {
5985 *next_fsb = got.br_startoff;
5988 xfs_btree_del_cursor(cur, error);
5990 xfs_trans_log_inode(tp, ip, logflags);
5995 * Splits an extent into two extents at split_fsb block such that it is the
5996 * first block of the current_ext. @ext is a target extent to be split.
5997 * @split_fsb is a block where the extents is split. If split_fsb lies in a
5998 * hole or the first block of extents, just return 0.
6001 xfs_bmap_split_extent(
6002 struct xfs_trans *tp,
6003 struct xfs_inode *ip,
6004 xfs_fileoff_t split_fsb)
6006 int whichfork = XFS_DATA_FORK;
6007 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
6008 struct xfs_btree_cur *cur = NULL;
6009 struct xfs_bmbt_irec got;
6010 struct xfs_bmbt_irec new; /* split extent */
6011 struct xfs_mount *mp = ip->i_mount;
6012 xfs_fsblock_t gotblkcnt; /* new block count for got */
6013 struct xfs_iext_cursor icur;
6018 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
6019 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
6020 return -EFSCORRUPTED;
6023 if (xfs_is_shutdown(mp))
6026 /* Read in all the extents */
6027 error = xfs_iread_extents(tp, ip, whichfork);
6032 * If there are not extents, or split_fsb lies in a hole we are done.
6034 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
6035 got.br_startoff >= split_fsb)
6038 gotblkcnt = split_fsb - got.br_startoff;
6039 new.br_startoff = split_fsb;
6040 new.br_startblock = got.br_startblock + gotblkcnt;
6041 new.br_blockcount = got.br_blockcount - gotblkcnt;
6042 new.br_state = got.br_state;
6044 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
6045 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6046 cur->bc_ino.flags = 0;
6047 error = xfs_bmbt_lookup_eq(cur, &got, &i);
6050 if (XFS_IS_CORRUPT(mp, i != 1)) {
6051 error = -EFSCORRUPTED;
6056 got.br_blockcount = gotblkcnt;
6057 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
6060 logflags = XFS_ILOG_CORE;
6062 error = xfs_bmbt_update(cur, &got);
6066 logflags |= XFS_ILOG_DEXT;
6068 /* Add new extent */
6069 xfs_iext_next(ifp, &icur);
6070 xfs_iext_insert(ip, &icur, &new, 0);
6074 error = xfs_bmbt_lookup_eq(cur, &new, &i);
6077 if (XFS_IS_CORRUPT(mp, i != 0)) {
6078 error = -EFSCORRUPTED;
6081 error = xfs_btree_insert(cur, &i);
6084 if (XFS_IS_CORRUPT(mp, i != 1)) {
6085 error = -EFSCORRUPTED;
6091 * Convert to a btree if necessary.
6093 if (xfs_bmap_needs_btree(ip, whichfork)) {
6094 int tmp_logflags; /* partial log flag return val */
6096 ASSERT(cur == NULL);
6097 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
6098 &tmp_logflags, whichfork);
6099 logflags |= tmp_logflags;
6104 cur->bc_ino.allocated = 0;
6105 xfs_btree_del_cursor(cur, error);
6109 xfs_trans_log_inode(tp, ip, logflags);
6113 /* Deferred mapping is only for real extents in the data fork. */
6115 xfs_bmap_is_update_needed(
6116 struct xfs_bmbt_irec *bmap)
6118 return bmap->br_startblock != HOLESTARTBLOCK &&
6119 bmap->br_startblock != DELAYSTARTBLOCK;
6122 /* Record a bmap intent. */
6125 struct xfs_trans *tp,
6126 enum xfs_bmap_intent_type type,
6127 struct xfs_inode *ip,
6129 struct xfs_bmbt_irec *bmap)
6131 struct xfs_bmap_intent *bi;
6133 trace_xfs_bmap_defer(tp->t_mountp,
6134 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
6136 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
6137 ip->i_ino, whichfork,
6139 bmap->br_blockcount,
6142 bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_NOFS | __GFP_NOFAIL);
6143 INIT_LIST_HEAD(&bi->bi_list);
6146 bi->bi_whichfork = whichfork;
6147 bi->bi_bmap = *bmap;
6149 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
6153 /* Map an extent into a file. */
6155 xfs_bmap_map_extent(
6156 struct xfs_trans *tp,
6157 struct xfs_inode *ip,
6158 struct xfs_bmbt_irec *PREV)
6160 if (!xfs_bmap_is_update_needed(PREV))
6163 __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV);
6166 /* Unmap an extent out of a file. */
6168 xfs_bmap_unmap_extent(
6169 struct xfs_trans *tp,
6170 struct xfs_inode *ip,
6171 struct xfs_bmbt_irec *PREV)
6173 if (!xfs_bmap_is_update_needed(PREV))
6176 __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV);
6180 * Process one of the deferred bmap operations. We pass back the
6181 * btree cursor to maintain our lock on the bmapbt between calls.
6184 xfs_bmap_finish_one(
6185 struct xfs_trans *tp,
6186 struct xfs_inode *ip,
6187 enum xfs_bmap_intent_type type,
6189 xfs_fileoff_t startoff,
6190 xfs_fsblock_t startblock,
6191 xfs_filblks_t *blockcount,
6196 ASSERT(tp->t_firstblock == NULLFSBLOCK);
6198 trace_xfs_bmap_deferred(tp->t_mountp,
6199 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
6200 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
6201 ip->i_ino, whichfork, startoff, *blockcount, state);
6203 if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK))
6204 return -EFSCORRUPTED;
6206 if (XFS_TEST_ERROR(false, tp->t_mountp,
6207 XFS_ERRTAG_BMAP_FINISH_ONE))
6212 error = xfs_bmapi_remap(tp, ip, startoff, *blockcount,
6216 case XFS_BMAP_UNMAP:
6217 error = __xfs_bunmapi(tp, ip, startoff, blockcount,
6218 XFS_BMAPI_REMAP, 1);
6222 error = -EFSCORRUPTED;
6228 /* Check that an inode's extent does not have invalid flags or bad ranges. */
6230 xfs_bmap_validate_extent(
6231 struct xfs_inode *ip,
6233 struct xfs_bmbt_irec *irec)
6235 struct xfs_mount *mp = ip->i_mount;
6237 if (!xfs_verify_fileext(mp, irec->br_startoff, irec->br_blockcount))
6238 return __this_address;
6240 if (XFS_IS_REALTIME_INODE(ip) && whichfork == XFS_DATA_FORK) {
6241 if (!xfs_verify_rtext(mp, irec->br_startblock,
6242 irec->br_blockcount))
6243 return __this_address;
6245 if (!xfs_verify_fsbext(mp, irec->br_startblock,
6246 irec->br_blockcount))
6247 return __this_address;
6249 if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK)
6250 return __this_address;
6255 xfs_bmap_intent_init_cache(void)
6257 xfs_bmap_intent_cache = kmem_cache_create("xfs_bmap_intent",
6258 sizeof(struct xfs_bmap_intent),
6261 return xfs_bmap_intent_cache != NULL ? 0 : -ENOMEM;
6265 xfs_bmap_intent_destroy_cache(void)
6267 kmem_cache_destroy(xfs_bmap_intent_cache);
6268 xfs_bmap_intent_cache = NULL;