2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_da_format.h"
29 #include "xfs_da_btree.h"
31 #include "xfs_inode.h"
32 #include "xfs_btree.h"
33 #include "xfs_trans.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_extfree_item.h"
36 #include "xfs_alloc.h"
38 #include "xfs_bmap_util.h"
39 #include "xfs_bmap_btree.h"
40 #include "xfs_rtalloc.h"
41 #include "xfs_errortag.h"
42 #include "xfs_error.h"
43 #include "xfs_quota.h"
44 #include "xfs_trans_space.h"
45 #include "xfs_buf_item.h"
46 #include "xfs_trace.h"
47 #include "xfs_symlink.h"
48 #include "xfs_attr_leaf.h"
49 #include "xfs_filestream.h"
51 #include "xfs_ag_resv.h"
52 #include "xfs_refcount.h"
53 #include "xfs_icache.h"
56 kmem_zone_t *xfs_bmap_free_item_zone;
59 * Miscellaneous helper functions
63 * Compute and fill in the value of the maximum depth of a bmap btree
64 * in this filesystem. Done once, during mount.
67 xfs_bmap_compute_maxlevels(
68 xfs_mount_t *mp, /* file system mount structure */
69 int whichfork) /* data or attr fork */
71 int level; /* btree level */
72 uint maxblocks; /* max blocks at this level */
73 uint maxleafents; /* max leaf entries possible */
74 int maxrootrecs; /* max records in root block */
75 int minleafrecs; /* min records in leaf block */
76 int minnoderecs; /* min records in node block */
77 int sz; /* root block size */
80 * The maximum number of extents in a file, hence the maximum
81 * number of leaf entries, is controlled by the type of di_nextents
82 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
83 * (a signed 16-bit number, xfs_aextnum_t).
85 * Note that we can no longer assume that if we are in ATTR1 that
86 * the fork offset of all the inodes will be
87 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
88 * with ATTR2 and then mounted back with ATTR1, keeping the
89 * di_forkoff's fixed but probably at various positions. Therefore,
90 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
91 * of a minimum size available.
93 if (whichfork == XFS_DATA_FORK) {
94 maxleafents = MAXEXTNUM;
95 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
97 maxleafents = MAXAEXTNUM;
98 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
100 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
101 minleafrecs = mp->m_bmap_dmnr[0];
102 minnoderecs = mp->m_bmap_dmnr[1];
103 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
104 for (level = 1; maxblocks > 1; level++) {
105 if (maxblocks <= maxrootrecs)
108 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
110 mp->m_bm_maxlevels[whichfork] = level;
113 STATIC int /* error */
115 struct xfs_btree_cur *cur,
116 struct xfs_bmbt_irec *irec,
117 int *stat) /* success/failure */
119 cur->bc_rec.b = *irec;
120 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
123 STATIC int /* error */
124 xfs_bmbt_lookup_first(
125 struct xfs_btree_cur *cur,
126 int *stat) /* success/failure */
128 cur->bc_rec.b.br_startoff = 0;
129 cur->bc_rec.b.br_startblock = 0;
130 cur->bc_rec.b.br_blockcount = 0;
131 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
135 * Check if the inode needs to be converted to btree format.
137 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
139 return whichfork != XFS_COW_FORK &&
140 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
141 XFS_IFORK_NEXTENTS(ip, whichfork) >
142 XFS_IFORK_MAXEXT(ip, whichfork);
146 * Check if the inode should be converted to extent format.
148 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
150 return whichfork != XFS_COW_FORK &&
151 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
152 XFS_IFORK_NEXTENTS(ip, whichfork) <=
153 XFS_IFORK_MAXEXT(ip, whichfork);
157 * Update the record referred to by cur to the value given by irec
158 * This either works (return 0) or gets an EFSCORRUPTED error.
162 struct xfs_btree_cur *cur,
163 struct xfs_bmbt_irec *irec)
165 union xfs_btree_rec rec;
167 xfs_bmbt_disk_set_all(&rec.bmbt, irec);
168 return xfs_btree_update(cur, &rec);
172 * Compute the worst-case number of indirect blocks that will be used
173 * for ip's delayed extent of length "len".
176 xfs_bmap_worst_indlen(
177 xfs_inode_t *ip, /* incore inode pointer */
178 xfs_filblks_t len) /* delayed extent length */
180 int level; /* btree level number */
181 int maxrecs; /* maximum record count at this level */
182 xfs_mount_t *mp; /* mount structure */
183 xfs_filblks_t rval; /* return value */
186 maxrecs = mp->m_bmap_dmxr[0];
187 for (level = 0, rval = 0;
188 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
191 do_div(len, maxrecs);
194 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
197 maxrecs = mp->m_bmap_dmxr[1];
203 * Calculate the default attribute fork offset for newly created inodes.
206 xfs_default_attroffset(
207 struct xfs_inode *ip)
209 struct xfs_mount *mp = ip->i_mount;
212 if (mp->m_sb.sb_inodesize == 256) {
213 offset = XFS_LITINO(mp, ip->i_d.di_version) -
214 XFS_BMDR_SPACE_CALC(MINABTPTRS);
216 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
219 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
224 * Helper routine to reset inode di_forkoff field when switching
225 * attribute fork from local to extent format - we reset it where
226 * possible to make space available for inline data fork extents.
229 xfs_bmap_forkoff_reset(
233 if (whichfork == XFS_ATTR_FORK &&
234 ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
235 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
236 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
238 if (dfl_forkoff > ip->i_d.di_forkoff)
239 ip->i_d.di_forkoff = dfl_forkoff;
244 STATIC struct xfs_buf *
246 struct xfs_btree_cur *cur,
249 struct xfs_log_item_desc *lidp;
255 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
256 if (!cur->bc_bufs[i])
258 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
259 return cur->bc_bufs[i];
262 /* Chase down all the log items to see if the bp is there */
263 list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) {
264 struct xfs_buf_log_item *bip;
265 bip = (struct xfs_buf_log_item *)lidp->lid_item;
266 if (bip->bli_item.li_type == XFS_LI_BUF &&
267 XFS_BUF_ADDR(bip->bli_buf) == bno)
276 struct xfs_btree_block *block,
282 __be64 *pp, *thispa; /* pointer to block address */
283 xfs_bmbt_key_t *prevp, *keyp;
285 ASSERT(be16_to_cpu(block->bb_level) > 0);
288 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
289 dmxr = mp->m_bmap_dmxr[0];
290 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
293 ASSERT(be64_to_cpu(prevp->br_startoff) <
294 be64_to_cpu(keyp->br_startoff));
299 * Compare the block numbers to see if there are dups.
302 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
304 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
306 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
308 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
310 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
311 if (*thispa == *pp) {
312 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
314 (unsigned long long)be64_to_cpu(*thispa));
315 panic("%s: ptrs are equal in node\n",
323 * Check that the extents for the inode ip are in the right order in all
324 * btree leaves. THis becomes prohibitively expensive for large extent count
325 * files, so don't bother with inodes that have more than 10,000 extents in
326 * them. The btree record ordering checks will still be done, so for such large
327 * bmapbt constructs that is going to catch most corruptions.
330 xfs_bmap_check_leaf_extents(
331 xfs_btree_cur_t *cur, /* btree cursor or null */
332 xfs_inode_t *ip, /* incore inode pointer */
333 int whichfork) /* data or attr fork */
335 struct xfs_btree_block *block; /* current btree block */
336 xfs_fsblock_t bno; /* block # of "block" */
337 xfs_buf_t *bp; /* buffer for "block" */
338 int error; /* error return value */
339 xfs_extnum_t i=0, j; /* index into the extents list */
340 xfs_ifork_t *ifp; /* fork structure */
341 int level; /* btree level, for checking */
342 xfs_mount_t *mp; /* file system mount structure */
343 __be64 *pp; /* pointer to block address */
344 xfs_bmbt_rec_t *ep; /* pointer to current extent */
345 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
346 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
349 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
353 /* skip large extent count inodes */
354 if (ip->i_d.di_nextents > 10000)
359 ifp = XFS_IFORK_PTR(ip, whichfork);
360 block = ifp->if_broot;
362 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
364 level = be16_to_cpu(block->bb_level);
366 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
367 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
368 bno = be64_to_cpu(*pp);
370 ASSERT(bno != NULLFSBLOCK);
371 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
372 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
375 * Go down the tree until leaf level is reached, following the first
376 * pointer (leftmost) at each level.
378 while (level-- > 0) {
379 /* See if buf is in cur first */
381 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
384 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
390 block = XFS_BUF_TO_BLOCK(bp);
395 * Check this block for basic sanity (increasing keys and
396 * no duplicate blocks).
399 xfs_check_block(block, mp, 0, 0);
400 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
401 bno = be64_to_cpu(*pp);
402 XFS_WANT_CORRUPTED_GOTO(mp,
403 xfs_verify_fsbno(mp, bno), error0);
406 xfs_trans_brelse(NULL, bp);
411 * Here with bp and block set to the leftmost leaf node in the tree.
416 * Loop over all leaf nodes checking that all extents are in the right order.
419 xfs_fsblock_t nextbno;
420 xfs_extnum_t num_recs;
423 num_recs = xfs_btree_get_numrecs(block);
426 * Read-ahead the next leaf block, if any.
429 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
432 * Check all the extents to make sure they are OK.
433 * If we had a previous block, the last entry should
434 * conform with the first entry in this one.
437 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
439 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
440 xfs_bmbt_disk_get_blockcount(&last) <=
441 xfs_bmbt_disk_get_startoff(ep));
443 for (j = 1; j < num_recs; j++) {
444 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
445 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
446 xfs_bmbt_disk_get_blockcount(ep) <=
447 xfs_bmbt_disk_get_startoff(nextp));
455 xfs_trans_brelse(NULL, bp);
459 * If we've reached the end, stop.
461 if (bno == NULLFSBLOCK)
465 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
468 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
474 block = XFS_BUF_TO_BLOCK(bp);
480 xfs_warn(mp, "%s: at error0", __func__);
482 xfs_trans_brelse(NULL, bp);
484 xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
486 panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
491 * Validate that the bmbt_irecs being returned from bmapi are valid
492 * given the caller's original parameters. Specifically check the
493 * ranges of the returned irecs to ensure that they only extend beyond
494 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
497 xfs_bmap_validate_ret(
501 xfs_bmbt_irec_t *mval,
505 int i; /* index to map values */
507 ASSERT(ret_nmap <= nmap);
509 for (i = 0; i < ret_nmap; i++) {
510 ASSERT(mval[i].br_blockcount > 0);
511 if (!(flags & XFS_BMAPI_ENTIRE)) {
512 ASSERT(mval[i].br_startoff >= bno);
513 ASSERT(mval[i].br_blockcount <= len);
514 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
517 ASSERT(mval[i].br_startoff < bno + len);
518 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
522 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
523 mval[i].br_startoff);
524 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
525 mval[i].br_startblock != HOLESTARTBLOCK);
526 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
527 mval[i].br_state == XFS_EXT_UNWRITTEN);
532 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
533 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
537 * bmap free list manipulation functions
541 * Add the extent to the list of extents to be free at transaction end.
542 * The list is maintained sorted (by block number).
546 struct xfs_mount *mp,
547 struct xfs_defer_ops *dfops,
550 struct xfs_owner_info *oinfo)
552 struct xfs_extent_free_item *new; /* new element */
557 ASSERT(bno != NULLFSBLOCK);
559 ASSERT(len <= MAXEXTLEN);
560 ASSERT(!isnullstartblock(bno));
561 agno = XFS_FSB_TO_AGNO(mp, bno);
562 agbno = XFS_FSB_TO_AGBNO(mp, bno);
563 ASSERT(agno < mp->m_sb.sb_agcount);
564 ASSERT(agbno < mp->m_sb.sb_agblocks);
565 ASSERT(len < mp->m_sb.sb_agblocks);
566 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
568 ASSERT(xfs_bmap_free_item_zone != NULL);
570 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
571 new->xefi_startblock = bno;
572 new->xefi_blockcount = (xfs_extlen_t)len;
574 new->xefi_oinfo = *oinfo;
576 xfs_rmap_skip_owner_update(&new->xefi_oinfo);
577 trace_xfs_bmap_free_defer(mp, XFS_FSB_TO_AGNO(mp, bno), 0,
578 XFS_FSB_TO_AGBNO(mp, bno), len);
579 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
583 * Inode fork format manipulation functions
587 * Transform a btree format file with only one leaf node, where the
588 * extents list will fit in the inode, into an extents format file.
589 * Since the file extents are already in-core, all we have to do is
590 * give up the space for the btree root and pitch the leaf block.
592 STATIC int /* error */
593 xfs_bmap_btree_to_extents(
594 xfs_trans_t *tp, /* transaction pointer */
595 xfs_inode_t *ip, /* incore inode pointer */
596 xfs_btree_cur_t *cur, /* btree cursor */
597 int *logflagsp, /* inode logging flags */
598 int whichfork) /* data or attr fork */
601 struct xfs_btree_block *cblock;/* child btree block */
602 xfs_fsblock_t cbno; /* child block number */
603 xfs_buf_t *cbp; /* child block's buffer */
604 int error; /* error return value */
605 xfs_ifork_t *ifp; /* inode fork data */
606 xfs_mount_t *mp; /* mount point structure */
607 __be64 *pp; /* ptr to block address */
608 struct xfs_btree_block *rblock;/* root btree block */
609 struct xfs_owner_info oinfo;
612 ifp = XFS_IFORK_PTR(ip, whichfork);
613 ASSERT(whichfork != XFS_COW_FORK);
614 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
615 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
616 rblock = ifp->if_broot;
617 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
618 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
619 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
620 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
621 cbno = be64_to_cpu(*pp);
624 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
625 xfs_btree_check_lptr(cur, cbno, 1));
627 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF,
631 cblock = XFS_BUF_TO_BLOCK(cbp);
632 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
634 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
635 xfs_bmap_add_free(mp, cur->bc_private.b.dfops, cbno, 1, &oinfo);
636 ip->i_d.di_nblocks--;
637 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
638 xfs_trans_binval(tp, cbp);
639 if (cur->bc_bufs[0] == cbp)
640 cur->bc_bufs[0] = NULL;
641 xfs_iroot_realloc(ip, -1, whichfork);
642 ASSERT(ifp->if_broot == NULL);
643 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
644 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
645 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
650 * Convert an extents-format file into a btree-format file.
651 * The new file will have a root block (in the inode) and a single child block.
653 STATIC int /* error */
654 xfs_bmap_extents_to_btree(
655 xfs_trans_t *tp, /* transaction pointer */
656 xfs_inode_t *ip, /* incore inode pointer */
657 xfs_fsblock_t *firstblock, /* first-block-allocated */
658 struct xfs_defer_ops *dfops, /* blocks freed in xaction */
659 xfs_btree_cur_t **curp, /* cursor returned to caller */
660 int wasdel, /* converting a delayed alloc */
661 int *logflagsp, /* inode logging flags */
662 int whichfork) /* data or attr fork */
664 struct xfs_btree_block *ablock; /* allocated (child) bt block */
665 xfs_buf_t *abp; /* buffer for ablock */
666 xfs_alloc_arg_t args; /* allocation arguments */
667 xfs_bmbt_rec_t *arp; /* child record pointer */
668 struct xfs_btree_block *block; /* btree root block */
669 xfs_btree_cur_t *cur; /* bmap btree cursor */
670 int error; /* error return value */
671 xfs_ifork_t *ifp; /* inode fork pointer */
672 xfs_bmbt_key_t *kp; /* root block key pointer */
673 xfs_mount_t *mp; /* mount structure */
674 xfs_bmbt_ptr_t *pp; /* root block address pointer */
675 struct xfs_iext_cursor icur;
676 struct xfs_bmbt_irec rec;
677 xfs_extnum_t cnt = 0;
680 ASSERT(whichfork != XFS_COW_FORK);
681 ifp = XFS_IFORK_PTR(ip, whichfork);
682 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
685 * Make space in the inode incore.
687 xfs_iroot_realloc(ip, 1, whichfork);
688 ifp->if_flags |= XFS_IFBROOT;
693 block = ifp->if_broot;
694 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
695 XFS_BTNUM_BMAP, 1, 1, ip->i_ino,
696 XFS_BTREE_LONG_PTRS);
698 * Need a cursor. Can't allocate until bb_level is filled in.
700 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
701 cur->bc_private.b.firstblock = *firstblock;
702 cur->bc_private.b.dfops = dfops;
703 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
705 * Convert to a btree with two levels, one record in root.
707 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
708 memset(&args, 0, sizeof(args));
711 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
712 args.firstblock = *firstblock;
713 if (*firstblock == NULLFSBLOCK) {
714 args.type = XFS_ALLOCTYPE_START_BNO;
715 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
716 } else if (dfops->dop_low) {
717 args.type = XFS_ALLOCTYPE_START_BNO;
718 args.fsbno = *firstblock;
720 args.type = XFS_ALLOCTYPE_NEAR_BNO;
721 args.fsbno = *firstblock;
723 args.minlen = args.maxlen = args.prod = 1;
724 args.wasdel = wasdel;
726 if ((error = xfs_alloc_vextent(&args))) {
727 xfs_iroot_realloc(ip, -1, whichfork);
728 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
732 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
733 xfs_iroot_realloc(ip, -1, whichfork);
734 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
738 * Allocation can't fail, the space was reserved.
740 ASSERT(*firstblock == NULLFSBLOCK ||
741 args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock));
742 *firstblock = cur->bc_private.b.firstblock = args.fsbno;
743 cur->bc_private.b.allocated++;
744 ip->i_d.di_nblocks++;
745 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
746 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
748 * Fill in the child block.
750 abp->b_ops = &xfs_bmbt_buf_ops;
751 ablock = XFS_BUF_TO_BLOCK(abp);
752 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
753 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
754 XFS_BTREE_LONG_PTRS);
756 for_each_xfs_iext(ifp, &icur, &rec) {
757 if (isnullstartblock(rec.br_startblock))
759 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt);
760 xfs_bmbt_disk_set_all(arp, &rec);
763 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
764 xfs_btree_set_numrecs(ablock, cnt);
767 * Fill in the root key and pointer.
769 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
770 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
771 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
772 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
773 be16_to_cpu(block->bb_level)));
774 *pp = cpu_to_be64(args.fsbno);
777 * Do all this logging at the end so that
778 * the root is at the right level.
780 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
781 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
782 ASSERT(*curp == NULL);
784 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
789 * Convert a local file to an extents file.
790 * This code is out of bounds for data forks of regular files,
791 * since the file data needs to get logged so things will stay consistent.
792 * (The bmap-level manipulations are ok, though).
795 xfs_bmap_local_to_extents_empty(
796 struct xfs_inode *ip,
799 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
801 ASSERT(whichfork != XFS_COW_FORK);
802 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
803 ASSERT(ifp->if_bytes == 0);
804 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
806 xfs_bmap_forkoff_reset(ip, whichfork);
807 ifp->if_flags &= ~XFS_IFINLINE;
808 ifp->if_flags |= XFS_IFEXTENTS;
809 ifp->if_u1.if_root = NULL;
811 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
815 STATIC int /* error */
816 xfs_bmap_local_to_extents(
817 xfs_trans_t *tp, /* transaction pointer */
818 xfs_inode_t *ip, /* incore inode pointer */
819 xfs_fsblock_t *firstblock, /* first block allocated in xaction */
820 xfs_extlen_t total, /* total blocks needed by transaction */
821 int *logflagsp, /* inode logging flags */
823 void (*init_fn)(struct xfs_trans *tp,
825 struct xfs_inode *ip,
826 struct xfs_ifork *ifp))
829 int flags; /* logging flags returned */
830 xfs_ifork_t *ifp; /* inode fork pointer */
831 xfs_alloc_arg_t args; /* allocation arguments */
832 xfs_buf_t *bp; /* buffer for extent block */
833 struct xfs_bmbt_irec rec;
834 struct xfs_iext_cursor icur;
837 * We don't want to deal with the case of keeping inode data inline yet.
838 * So sending the data fork of a regular inode is invalid.
840 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
841 ifp = XFS_IFORK_PTR(ip, whichfork);
842 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
844 if (!ifp->if_bytes) {
845 xfs_bmap_local_to_extents_empty(ip, whichfork);
846 flags = XFS_ILOG_CORE;
852 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS)) == XFS_IFINLINE);
853 memset(&args, 0, sizeof(args));
855 args.mp = ip->i_mount;
856 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
857 args.firstblock = *firstblock;
859 * Allocate a block. We know we need only one, since the
860 * file currently fits in an inode.
862 if (*firstblock == NULLFSBLOCK) {
863 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
864 args.type = XFS_ALLOCTYPE_START_BNO;
866 args.fsbno = *firstblock;
867 args.type = XFS_ALLOCTYPE_NEAR_BNO;
870 args.minlen = args.maxlen = args.prod = 1;
871 error = xfs_alloc_vextent(&args);
875 /* Can't fail, the space was reserved. */
876 ASSERT(args.fsbno != NULLFSBLOCK);
877 ASSERT(args.len == 1);
878 *firstblock = args.fsbno;
879 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
882 * Initialize the block, copy the data and log the remote buffer.
884 * The callout is responsible for logging because the remote format
885 * might differ from the local format and thus we don't know how much to
886 * log here. Note that init_fn must also set the buffer log item type
889 init_fn(tp, bp, ip, ifp);
891 /* account for the change in fork size */
892 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
893 xfs_bmap_local_to_extents_empty(ip, whichfork);
894 flags |= XFS_ILOG_CORE;
896 ifp->if_u1.if_root = NULL;
900 rec.br_startblock = args.fsbno;
901 rec.br_blockcount = 1;
902 rec.br_state = XFS_EXT_NORM;
903 xfs_iext_first(ifp, &icur);
904 xfs_iext_insert(ip, &icur, &rec, 0);
906 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
907 ip->i_d.di_nblocks = 1;
908 xfs_trans_mod_dquot_byino(tp, ip,
909 XFS_TRANS_DQ_BCOUNT, 1L);
910 flags |= xfs_ilog_fext(whichfork);
918 * Called from xfs_bmap_add_attrfork to handle btree format files.
920 STATIC int /* error */
921 xfs_bmap_add_attrfork_btree(
922 xfs_trans_t *tp, /* transaction pointer */
923 xfs_inode_t *ip, /* incore inode pointer */
924 xfs_fsblock_t *firstblock, /* first block allocated */
925 struct xfs_defer_ops *dfops, /* blocks to free at commit */
926 int *flags) /* inode logging flags */
928 xfs_btree_cur_t *cur; /* btree cursor */
929 int error; /* error return value */
930 xfs_mount_t *mp; /* file system mount struct */
931 int stat; /* newroot status */
934 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
935 *flags |= XFS_ILOG_DBROOT;
937 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
938 cur->bc_private.b.dfops = dfops;
939 cur->bc_private.b.firstblock = *firstblock;
940 error = xfs_bmbt_lookup_first(cur, &stat);
943 /* must be at least one entry */
944 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
945 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
948 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
951 *firstblock = cur->bc_private.b.firstblock;
952 cur->bc_private.b.allocated = 0;
953 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
957 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
962 * Called from xfs_bmap_add_attrfork to handle extents format files.
964 STATIC int /* error */
965 xfs_bmap_add_attrfork_extents(
966 xfs_trans_t *tp, /* transaction pointer */
967 xfs_inode_t *ip, /* incore inode pointer */
968 xfs_fsblock_t *firstblock, /* first block allocated */
969 struct xfs_defer_ops *dfops, /* blocks to free at commit */
970 int *flags) /* inode logging flags */
972 xfs_btree_cur_t *cur; /* bmap btree cursor */
973 int error; /* error return value */
975 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
978 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, &cur, 0,
979 flags, XFS_DATA_FORK);
981 cur->bc_private.b.allocated = 0;
982 xfs_btree_del_cursor(cur,
983 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
989 * Called from xfs_bmap_add_attrfork to handle local format files. Each
990 * different data fork content type needs a different callout to do the
991 * conversion. Some are basic and only require special block initialisation
992 * callouts for the data formating, others (directories) are so specialised they
993 * handle everything themselves.
995 * XXX (dgc): investigate whether directory conversion can use the generic
996 * formatting callout. It should be possible - it's just a very complex
999 STATIC int /* error */
1000 xfs_bmap_add_attrfork_local(
1001 xfs_trans_t *tp, /* transaction pointer */
1002 xfs_inode_t *ip, /* incore inode pointer */
1003 xfs_fsblock_t *firstblock, /* first block allocated */
1004 struct xfs_defer_ops *dfops, /* blocks to free at commit */
1005 int *flags) /* inode logging flags */
1007 xfs_da_args_t dargs; /* args for dir/attr code */
1009 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
1012 if (S_ISDIR(VFS_I(ip)->i_mode)) {
1013 memset(&dargs, 0, sizeof(dargs));
1014 dargs.geo = ip->i_mount->m_dir_geo;
1016 dargs.firstblock = firstblock;
1017 dargs.dfops = dfops;
1018 dargs.total = dargs.geo->fsbcount;
1019 dargs.whichfork = XFS_DATA_FORK;
1021 return xfs_dir2_sf_to_block(&dargs);
1024 if (S_ISLNK(VFS_I(ip)->i_mode))
1025 return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
1026 flags, XFS_DATA_FORK,
1027 xfs_symlink_local_to_remote);
1029 /* should only be called for types that support local format data */
1031 return -EFSCORRUPTED;
1035 * Convert inode from non-attributed to attributed.
1036 * Must not be in a transaction, ip must not be locked.
1038 int /* error code */
1039 xfs_bmap_add_attrfork(
1040 xfs_inode_t *ip, /* incore inode pointer */
1041 int size, /* space new attribute needs */
1042 int rsvd) /* xact may use reserved blks */
1044 xfs_fsblock_t firstblock; /* 1st block/ag allocated */
1045 struct xfs_defer_ops dfops; /* freed extent records */
1046 xfs_mount_t *mp; /* mount structure */
1047 xfs_trans_t *tp; /* transaction pointer */
1048 int blks; /* space reservation */
1049 int version = 1; /* superblock attr version */
1050 int logflags; /* logging flags */
1051 int error; /* error return value */
1053 ASSERT(XFS_IFORK_Q(ip) == 0);
1056 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1058 blks = XFS_ADDAFORK_SPACE_RES(mp);
1060 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0,
1061 rsvd ? XFS_TRANS_RESERVE : 0, &tp);
1065 xfs_ilock(ip, XFS_ILOCK_EXCL);
1066 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
1067 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
1068 XFS_QMOPT_RES_REGBLKS);
1071 if (XFS_IFORK_Q(ip))
1073 if (ip->i_d.di_anextents != 0) {
1074 error = -EFSCORRUPTED;
1077 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
1079 * For inodes coming from pre-6.2 filesystems.
1081 ASSERT(ip->i_d.di_aformat == 0);
1082 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1085 xfs_trans_ijoin(tp, ip, 0);
1086 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1088 switch (ip->i_d.di_format) {
1089 case XFS_DINODE_FMT_DEV:
1090 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
1092 case XFS_DINODE_FMT_LOCAL:
1093 case XFS_DINODE_FMT_EXTENTS:
1094 case XFS_DINODE_FMT_BTREE:
1095 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1096 if (!ip->i_d.di_forkoff)
1097 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
1098 else if (mp->m_flags & XFS_MOUNT_ATTR2)
1107 ASSERT(ip->i_afp == NULL);
1108 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
1109 ip->i_afp->if_flags = XFS_IFEXTENTS;
1111 xfs_defer_init(&dfops, &firstblock);
1112 switch (ip->i_d.di_format) {
1113 case XFS_DINODE_FMT_LOCAL:
1114 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &dfops,
1117 case XFS_DINODE_FMT_EXTENTS:
1118 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
1121 case XFS_DINODE_FMT_BTREE:
1122 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &dfops,
1130 xfs_trans_log_inode(tp, ip, logflags);
1133 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1134 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
1135 bool log_sb = false;
1137 spin_lock(&mp->m_sb_lock);
1138 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
1139 xfs_sb_version_addattr(&mp->m_sb);
1142 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
1143 xfs_sb_version_addattr2(&mp->m_sb);
1146 spin_unlock(&mp->m_sb_lock);
1151 error = xfs_defer_finish(&tp, &dfops);
1154 error = xfs_trans_commit(tp);
1155 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1159 xfs_defer_cancel(&dfops);
1161 xfs_trans_cancel(tp);
1162 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1167 * Internal and external extent tree search functions.
1171 * Read in extents from a btree-format inode.
1175 struct xfs_trans *tp,
1176 struct xfs_inode *ip,
1179 struct xfs_mount *mp = ip->i_mount;
1180 int state = xfs_bmap_fork_to_state(whichfork);
1181 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1182 xfs_extnum_t nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
1183 struct xfs_btree_block *block = ifp->if_broot;
1184 struct xfs_iext_cursor icur;
1185 struct xfs_bmbt_irec new;
1193 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1195 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
1196 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
1197 return -EFSCORRUPTED;
1201 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
1203 level = be16_to_cpu(block->bb_level);
1205 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
1206 bno = be64_to_cpu(*pp);
1209 * Go down the tree until leaf level is reached, following the first
1210 * pointer (leftmost) at each level.
1212 while (level-- > 0) {
1213 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1214 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1217 block = XFS_BUF_TO_BLOCK(bp);
1220 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
1221 bno = be64_to_cpu(*pp);
1222 XFS_WANT_CORRUPTED_GOTO(mp,
1223 xfs_verify_fsbno(mp, bno), out_brelse);
1224 xfs_trans_brelse(tp, bp);
1228 * Here with bp and block set to the leftmost leaf node in the tree.
1231 xfs_iext_first(ifp, &icur);
1234 * Loop over all leaf nodes. Copy information to the extent records.
1237 xfs_bmbt_rec_t *frp;
1238 xfs_fsblock_t nextbno;
1239 xfs_extnum_t num_recs;
1241 num_recs = xfs_btree_get_numrecs(block);
1242 if (unlikely(i + num_recs > nextents)) {
1243 ASSERT(i + num_recs <= nextents);
1244 xfs_warn(ip->i_mount,
1245 "corrupt dinode %Lu, (btree extents).",
1246 (unsigned long long) ip->i_ino);
1247 xfs_inode_verifier_error(ip, -EFSCORRUPTED,
1248 __func__, block, sizeof(*block),
1250 error = -EFSCORRUPTED;
1254 * Read-ahead the next leaf block, if any.
1256 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
1257 if (nextbno != NULLFSBLOCK)
1258 xfs_btree_reada_bufl(mp, nextbno, 1,
1261 * Copy records into the extent records.
1263 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1264 for (j = 0; j < num_recs; j++, frp++, i++) {
1267 xfs_bmbt_disk_get_all(frp, &new);
1268 fa = xfs_bmap_validate_extent(ip, whichfork, &new);
1270 error = -EFSCORRUPTED;
1271 xfs_inode_verifier_error(ip, error,
1272 "xfs_iread_extents(2)",
1273 frp, sizeof(*frp), fa);
1276 xfs_iext_insert(ip, &icur, &new, state);
1277 trace_xfs_read_extent(ip, &icur, state, _THIS_IP_);
1278 xfs_iext_next(ifp, &icur);
1280 xfs_trans_brelse(tp, bp);
1283 * If we've reached the end, stop.
1285 if (bno == NULLFSBLOCK)
1287 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1288 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1291 block = XFS_BUF_TO_BLOCK(bp);
1294 if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) {
1295 error = -EFSCORRUPTED;
1298 ASSERT(i == xfs_iext_count(ifp));
1300 ifp->if_flags |= XFS_IFEXTENTS;
1304 xfs_trans_brelse(tp, bp);
1306 xfs_iext_destroy(ifp);
1311 * Returns the relative block number of the first unused block(s) in the given
1312 * fork with at least "len" logically contiguous blocks free. This is the
1313 * lowest-address hole if the fork has holes, else the first block past the end
1314 * of fork. Return 0 if the fork is currently local (in-inode).
1317 xfs_bmap_first_unused(
1318 struct xfs_trans *tp, /* transaction pointer */
1319 struct xfs_inode *ip, /* incore inode */
1320 xfs_extlen_t len, /* size of hole to find */
1321 xfs_fileoff_t *first_unused, /* unused block */
1322 int whichfork) /* data or attr fork */
1324 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1325 struct xfs_bmbt_irec got;
1326 struct xfs_iext_cursor icur;
1327 xfs_fileoff_t lastaddr = 0;
1328 xfs_fileoff_t lowest, max;
1331 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
1332 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
1333 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
1335 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1340 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1341 error = xfs_iread_extents(tp, ip, whichfork);
1346 lowest = max = *first_unused;
1347 for_each_xfs_iext(ifp, &icur, &got) {
1349 * See if the hole before this extent will work.
1351 if (got.br_startoff >= lowest + len &&
1352 got.br_startoff - max >= len)
1354 lastaddr = got.br_startoff + got.br_blockcount;
1355 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1358 *first_unused = max;
1363 * Returns the file-relative block number of the last block - 1 before
1364 * last_block (input value) in the file.
1365 * This is not based on i_size, it is based on the extent records.
1366 * Returns 0 for local files, as they do not have extent records.
1369 xfs_bmap_last_before(
1370 struct xfs_trans *tp, /* transaction pointer */
1371 struct xfs_inode *ip, /* incore inode */
1372 xfs_fileoff_t *last_block, /* last block */
1373 int whichfork) /* data or attr fork */
1375 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1376 struct xfs_bmbt_irec got;
1377 struct xfs_iext_cursor icur;
1380 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
1381 case XFS_DINODE_FMT_LOCAL:
1384 case XFS_DINODE_FMT_BTREE:
1385 case XFS_DINODE_FMT_EXTENTS:
1391 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1392 error = xfs_iread_extents(tp, ip, whichfork);
1397 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
1403 xfs_bmap_last_extent(
1404 struct xfs_trans *tp,
1405 struct xfs_inode *ip,
1407 struct xfs_bmbt_irec *rec,
1410 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1411 struct xfs_iext_cursor icur;
1414 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1415 error = xfs_iread_extents(tp, ip, whichfork);
1420 xfs_iext_last(ifp, &icur);
1421 if (!xfs_iext_get_extent(ifp, &icur, rec))
1429 * Check the last inode extent to determine whether this allocation will result
1430 * in blocks being allocated at the end of the file. When we allocate new data
1431 * blocks at the end of the file which do not start at the previous data block,
1432 * we will try to align the new blocks at stripe unit boundaries.
1434 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1435 * at, or past the EOF.
1439 struct xfs_bmalloca *bma,
1442 struct xfs_bmbt_irec rec;
1447 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1458 * Check if we are allocation or past the last extent, or at least into
1459 * the last delayed allocated extent.
1461 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1462 (bma->offset >= rec.br_startoff &&
1463 isnullstartblock(rec.br_startblock));
1468 * Returns the file-relative block number of the first block past eof in
1469 * the file. This is not based on i_size, it is based on the extent records.
1470 * Returns 0 for local files, as they do not have extent records.
1473 xfs_bmap_last_offset(
1474 struct xfs_inode *ip,
1475 xfs_fileoff_t *last_block,
1478 struct xfs_bmbt_irec rec;
1484 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
1487 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1488 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1491 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1492 if (error || is_empty)
1495 *last_block = rec.br_startoff + rec.br_blockcount;
1500 * Returns whether the selected fork of the inode has exactly one
1501 * block or not. For the data fork we check this matches di_size,
1502 * implying the file's range is 0..bsize-1.
1504 int /* 1=>1 block, 0=>otherwise */
1506 xfs_inode_t *ip, /* incore inode */
1507 int whichfork) /* data or attr fork */
1509 xfs_ifork_t *ifp; /* inode fork pointer */
1510 int rval; /* return value */
1511 xfs_bmbt_irec_t s; /* internal version of extent */
1512 struct xfs_iext_cursor icur;
1515 if (whichfork == XFS_DATA_FORK)
1516 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
1518 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
1520 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1522 ifp = XFS_IFORK_PTR(ip, whichfork);
1523 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
1524 xfs_iext_first(ifp, &icur);
1525 xfs_iext_get_extent(ifp, &icur, &s);
1526 rval = s.br_startoff == 0 && s.br_blockcount == 1;
1527 if (rval && whichfork == XFS_DATA_FORK)
1528 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
1533 * Extent tree manipulation functions used during allocation.
1537 * Convert a delayed allocation to a real allocation.
1539 STATIC int /* error */
1540 xfs_bmap_add_extent_delay_real(
1541 struct xfs_bmalloca *bma,
1544 struct xfs_bmbt_irec *new = &bma->got;
1545 int error; /* error return value */
1546 int i; /* temp state */
1547 xfs_ifork_t *ifp; /* inode fork pointer */
1548 xfs_fileoff_t new_endoff; /* end offset of new entry */
1549 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1550 /* left is 0, right is 1, prev is 2 */
1551 int rval=0; /* return value (logging flags) */
1552 int state = xfs_bmap_fork_to_state(whichfork);
1553 xfs_filblks_t da_new; /* new count del alloc blocks used */
1554 xfs_filblks_t da_old; /* old count del alloc blocks used */
1555 xfs_filblks_t temp=0; /* value for da_new calculations */
1556 int tmp_rval; /* partial logging flags */
1557 struct xfs_mount *mp;
1558 xfs_extnum_t *nextents;
1559 struct xfs_bmbt_irec old;
1561 mp = bma->ip->i_mount;
1562 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1563 ASSERT(whichfork != XFS_ATTR_FORK);
1564 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents :
1565 &bma->ip->i_d.di_nextents);
1567 ASSERT(!isnullstartblock(new->br_startblock));
1569 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
1571 XFS_STATS_INC(mp, xs_add_exlist);
1578 * Set up a bunch of variables to make the tests simpler.
1580 xfs_iext_get_extent(ifp, &bma->icur, &PREV);
1581 new_endoff = new->br_startoff + new->br_blockcount;
1582 ASSERT(isnullstartblock(PREV.br_startblock));
1583 ASSERT(PREV.br_startoff <= new->br_startoff);
1584 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1586 da_old = startblockval(PREV.br_startblock);
1590 * Set flags determining what part of the previous delayed allocation
1591 * extent is being replaced by a real allocation.
1593 if (PREV.br_startoff == new->br_startoff)
1594 state |= BMAP_LEFT_FILLING;
1595 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1596 state |= BMAP_RIGHT_FILLING;
1599 * Check and set flags if this segment has a left neighbor.
1600 * Don't set contiguous if the combined extent would be too large.
1602 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
1603 state |= BMAP_LEFT_VALID;
1604 if (isnullstartblock(LEFT.br_startblock))
1605 state |= BMAP_LEFT_DELAY;
1608 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1609 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1610 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1611 LEFT.br_state == new->br_state &&
1612 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1613 state |= BMAP_LEFT_CONTIG;
1616 * Check and set flags if this segment has a right neighbor.
1617 * Don't set contiguous if the combined extent would be too large.
1618 * Also check for all-three-contiguous being too large.
1620 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
1621 state |= BMAP_RIGHT_VALID;
1622 if (isnullstartblock(RIGHT.br_startblock))
1623 state |= BMAP_RIGHT_DELAY;
1626 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1627 new_endoff == RIGHT.br_startoff &&
1628 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1629 new->br_state == RIGHT.br_state &&
1630 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1631 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1632 BMAP_RIGHT_FILLING)) !=
1633 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1634 BMAP_RIGHT_FILLING) ||
1635 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1637 state |= BMAP_RIGHT_CONTIG;
1641 * Switch out based on the FILLING and CONTIG state bits.
1643 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1644 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1645 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1646 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1648 * Filling in all of a previously delayed allocation extent.
1649 * The left and right neighbors are both contiguous with new.
1651 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
1653 xfs_iext_remove(bma->ip, &bma->icur, state);
1654 xfs_iext_remove(bma->ip, &bma->icur, state);
1655 xfs_iext_prev(ifp, &bma->icur);
1656 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1659 if (bma->cur == NULL)
1660 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1662 rval = XFS_ILOG_CORE;
1663 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1666 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1667 error = xfs_btree_delete(bma->cur, &i);
1670 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1671 error = xfs_btree_decrement(bma->cur, 0, &i);
1674 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1675 error = xfs_bmbt_update(bma->cur, &LEFT);
1681 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1683 * Filling in all of a previously delayed allocation extent.
1684 * The left neighbor is contiguous, the right is not.
1687 LEFT.br_blockcount += PREV.br_blockcount;
1689 xfs_iext_remove(bma->ip, &bma->icur, state);
1690 xfs_iext_prev(ifp, &bma->icur);
1691 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1693 if (bma->cur == NULL)
1694 rval = XFS_ILOG_DEXT;
1697 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1700 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1701 error = xfs_bmbt_update(bma->cur, &LEFT);
1707 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1709 * Filling in all of a previously delayed allocation extent.
1710 * The right neighbor is contiguous, the left is not.
1712 PREV.br_startblock = new->br_startblock;
1713 PREV.br_blockcount += RIGHT.br_blockcount;
1715 xfs_iext_next(ifp, &bma->icur);
1716 xfs_iext_remove(bma->ip, &bma->icur, state);
1717 xfs_iext_prev(ifp, &bma->icur);
1718 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1720 if (bma->cur == NULL)
1721 rval = XFS_ILOG_DEXT;
1724 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1727 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1728 error = xfs_bmbt_update(bma->cur, &PREV);
1734 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1736 * Filling in all of a previously delayed allocation extent.
1737 * Neither the left nor right neighbors are contiguous with
1740 PREV.br_startblock = new->br_startblock;
1741 PREV.br_state = new->br_state;
1742 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1745 if (bma->cur == NULL)
1746 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1748 rval = XFS_ILOG_CORE;
1749 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1752 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1753 error = xfs_btree_insert(bma->cur, &i);
1756 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1760 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1762 * Filling in the first part of a previous delayed allocation.
1763 * The left neighbor is contiguous.
1766 temp = PREV.br_blockcount - new->br_blockcount;
1767 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1768 startblockval(PREV.br_startblock));
1770 LEFT.br_blockcount += new->br_blockcount;
1772 PREV.br_blockcount = temp;
1773 PREV.br_startoff += new->br_blockcount;
1774 PREV.br_startblock = nullstartblock(da_new);
1776 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1777 xfs_iext_prev(ifp, &bma->icur);
1778 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1780 if (bma->cur == NULL)
1781 rval = XFS_ILOG_DEXT;
1784 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1787 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1788 error = xfs_bmbt_update(bma->cur, &LEFT);
1794 case BMAP_LEFT_FILLING:
1796 * Filling in the first part of a previous delayed allocation.
1797 * The left neighbor is not contiguous.
1799 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1801 if (bma->cur == NULL)
1802 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1804 rval = XFS_ILOG_CORE;
1805 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1808 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1809 error = xfs_btree_insert(bma->cur, &i);
1812 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1815 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1816 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1817 bma->firstblock, bma->dfops,
1818 &bma->cur, 1, &tmp_rval, whichfork);
1824 temp = PREV.br_blockcount - new->br_blockcount;
1825 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1826 startblockval(PREV.br_startblock) -
1827 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1829 PREV.br_startoff = new_endoff;
1830 PREV.br_blockcount = temp;
1831 PREV.br_startblock = nullstartblock(da_new);
1832 xfs_iext_next(ifp, &bma->icur);
1833 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1834 xfs_iext_prev(ifp, &bma->icur);
1837 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1839 * Filling in the last part of a previous delayed allocation.
1840 * The right neighbor is contiguous with the new allocation.
1843 RIGHT.br_startoff = new->br_startoff;
1844 RIGHT.br_startblock = new->br_startblock;
1845 RIGHT.br_blockcount += new->br_blockcount;
1847 if (bma->cur == NULL)
1848 rval = XFS_ILOG_DEXT;
1851 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1854 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1855 error = xfs_bmbt_update(bma->cur, &RIGHT);
1860 temp = PREV.br_blockcount - new->br_blockcount;
1861 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1862 startblockval(PREV.br_startblock));
1864 PREV.br_blockcount = temp;
1865 PREV.br_startblock = nullstartblock(da_new);
1867 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1868 xfs_iext_next(ifp, &bma->icur);
1869 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
1872 case BMAP_RIGHT_FILLING:
1874 * Filling in the last part of a previous delayed allocation.
1875 * The right neighbor is not contiguous.
1877 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1879 if (bma->cur == NULL)
1880 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1882 rval = XFS_ILOG_CORE;
1883 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1886 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1887 error = xfs_btree_insert(bma->cur, &i);
1890 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1893 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1894 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1895 bma->firstblock, bma->dfops, &bma->cur, 1,
1896 &tmp_rval, whichfork);
1902 temp = PREV.br_blockcount - new->br_blockcount;
1903 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1904 startblockval(PREV.br_startblock) -
1905 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1907 PREV.br_startblock = nullstartblock(da_new);
1908 PREV.br_blockcount = temp;
1909 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1910 xfs_iext_next(ifp, &bma->icur);
1915 * Filling in the middle part of a previous delayed allocation.
1916 * Contiguity is impossible here.
1917 * This case is avoided almost all the time.
1919 * We start with a delayed allocation:
1921 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1924 * and we are allocating:
1925 * +rrrrrrrrrrrrrrrrr+
1928 * and we set it up for insertion as:
1929 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1931 * PREV @ idx LEFT RIGHT
1932 * inserted at idx + 1
1936 /* LEFT is the new middle */
1939 /* RIGHT is the new right */
1940 RIGHT.br_state = PREV.br_state;
1941 RIGHT.br_startoff = new_endoff;
1942 RIGHT.br_blockcount =
1943 PREV.br_startoff + PREV.br_blockcount - new_endoff;
1944 RIGHT.br_startblock =
1945 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1946 RIGHT.br_blockcount));
1949 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
1950 PREV.br_startblock =
1951 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1952 PREV.br_blockcount));
1953 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1955 xfs_iext_next(ifp, &bma->icur);
1956 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
1957 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
1960 if (bma->cur == NULL)
1961 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1963 rval = XFS_ILOG_CORE;
1964 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1967 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1968 error = xfs_btree_insert(bma->cur, &i);
1971 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1974 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1975 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1976 bma->firstblock, bma->dfops, &bma->cur,
1977 1, &tmp_rval, whichfork);
1983 da_new = startblockval(PREV.br_startblock) +
1984 startblockval(RIGHT.br_startblock);
1987 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1988 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1989 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1990 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1991 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1992 case BMAP_LEFT_CONTIG:
1993 case BMAP_RIGHT_CONTIG:
1995 * These cases are all impossible.
2000 /* add reverse mapping */
2001 error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new);
2005 /* convert to a btree if necessary */
2006 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2007 int tmp_logflags; /* partial log flag return val */
2009 ASSERT(bma->cur == NULL);
2010 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2011 bma->firstblock, bma->dfops, &bma->cur,
2012 da_old > 0, &tmp_logflags, whichfork);
2013 bma->logflags |= tmp_logflags;
2019 da_new += bma->cur->bc_private.b.allocated;
2020 bma->cur->bc_private.b.allocated = 0;
2023 /* adjust for changes in reserved delayed indirect blocks */
2024 if (da_new != da_old) {
2025 ASSERT(state == 0 || da_new < da_old);
2026 error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new),
2030 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
2032 if (whichfork != XFS_COW_FORK)
2033 bma->logflags |= rval;
2041 * Convert an unwritten allocation to a real allocation or vice versa.
2043 STATIC int /* error */
2044 xfs_bmap_add_extent_unwritten_real(
2045 struct xfs_trans *tp,
2046 xfs_inode_t *ip, /* incore inode pointer */
2048 struct xfs_iext_cursor *icur,
2049 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
2050 xfs_bmbt_irec_t *new, /* new data to add to file extents */
2051 xfs_fsblock_t *first, /* pointer to firstblock variable */
2052 struct xfs_defer_ops *dfops, /* list of extents to be freed */
2053 int *logflagsp) /* inode logging flags */
2055 xfs_btree_cur_t *cur; /* btree cursor */
2056 int error; /* error return value */
2057 int i; /* temp state */
2058 xfs_ifork_t *ifp; /* inode fork pointer */
2059 xfs_fileoff_t new_endoff; /* end offset of new entry */
2060 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
2061 /* left is 0, right is 1, prev is 2 */
2062 int rval=0; /* return value (logging flags) */
2063 int state = xfs_bmap_fork_to_state(whichfork);
2064 struct xfs_mount *mp = ip->i_mount;
2065 struct xfs_bmbt_irec old;
2070 ifp = XFS_IFORK_PTR(ip, whichfork);
2072 ASSERT(!isnullstartblock(new->br_startblock));
2074 XFS_STATS_INC(mp, xs_add_exlist);
2081 * Set up a bunch of variables to make the tests simpler.
2084 xfs_iext_get_extent(ifp, icur, &PREV);
2085 ASSERT(new->br_state != PREV.br_state);
2086 new_endoff = new->br_startoff + new->br_blockcount;
2087 ASSERT(PREV.br_startoff <= new->br_startoff);
2088 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2091 * Set flags determining what part of the previous oldext allocation
2092 * extent is being replaced by a newext allocation.
2094 if (PREV.br_startoff == new->br_startoff)
2095 state |= BMAP_LEFT_FILLING;
2096 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2097 state |= BMAP_RIGHT_FILLING;
2100 * Check and set flags if this segment has a left neighbor.
2101 * Don't set contiguous if the combined extent would be too large.
2103 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
2104 state |= BMAP_LEFT_VALID;
2105 if (isnullstartblock(LEFT.br_startblock))
2106 state |= BMAP_LEFT_DELAY;
2109 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2110 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2111 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2112 LEFT.br_state == new->br_state &&
2113 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2114 state |= BMAP_LEFT_CONTIG;
2117 * Check and set flags if this segment has a right neighbor.
2118 * Don't set contiguous if the combined extent would be too large.
2119 * Also check for all-three-contiguous being too large.
2121 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
2122 state |= BMAP_RIGHT_VALID;
2123 if (isnullstartblock(RIGHT.br_startblock))
2124 state |= BMAP_RIGHT_DELAY;
2127 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2128 new_endoff == RIGHT.br_startoff &&
2129 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2130 new->br_state == RIGHT.br_state &&
2131 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
2132 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2133 BMAP_RIGHT_FILLING)) !=
2134 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2135 BMAP_RIGHT_FILLING) ||
2136 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2138 state |= BMAP_RIGHT_CONTIG;
2141 * Switch out based on the FILLING and CONTIG state bits.
2143 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2144 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2145 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2146 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2148 * Setting all of a previous oldext extent to newext.
2149 * The left and right neighbors are both contiguous with new.
2151 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
2153 xfs_iext_remove(ip, icur, state);
2154 xfs_iext_remove(ip, icur, state);
2155 xfs_iext_prev(ifp, icur);
2156 xfs_iext_update_extent(ip, state, icur, &LEFT);
2157 XFS_IFORK_NEXT_SET(ip, whichfork,
2158 XFS_IFORK_NEXTENTS(ip, whichfork) - 2);
2160 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2162 rval = XFS_ILOG_CORE;
2163 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2166 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2167 if ((error = xfs_btree_delete(cur, &i)))
2169 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2170 if ((error = xfs_btree_decrement(cur, 0, &i)))
2172 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2173 if ((error = xfs_btree_delete(cur, &i)))
2175 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2176 if ((error = xfs_btree_decrement(cur, 0, &i)))
2178 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2179 error = xfs_bmbt_update(cur, &LEFT);
2185 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2187 * Setting all of a previous oldext extent to newext.
2188 * The left neighbor is contiguous, the right is not.
2190 LEFT.br_blockcount += PREV.br_blockcount;
2192 xfs_iext_remove(ip, icur, state);
2193 xfs_iext_prev(ifp, icur);
2194 xfs_iext_update_extent(ip, state, icur, &LEFT);
2195 XFS_IFORK_NEXT_SET(ip, whichfork,
2196 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2198 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2200 rval = XFS_ILOG_CORE;
2201 error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
2204 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2205 if ((error = xfs_btree_delete(cur, &i)))
2207 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2208 if ((error = xfs_btree_decrement(cur, 0, &i)))
2210 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2211 error = xfs_bmbt_update(cur, &LEFT);
2217 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2219 * Setting all of a previous oldext extent to newext.
2220 * The right neighbor is contiguous, the left is not.
2222 PREV.br_blockcount += RIGHT.br_blockcount;
2223 PREV.br_state = new->br_state;
2225 xfs_iext_next(ifp, icur);
2226 xfs_iext_remove(ip, icur, state);
2227 xfs_iext_prev(ifp, icur);
2228 xfs_iext_update_extent(ip, state, icur, &PREV);
2230 XFS_IFORK_NEXT_SET(ip, whichfork,
2231 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2233 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2235 rval = XFS_ILOG_CORE;
2236 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2239 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2240 if ((error = xfs_btree_delete(cur, &i)))
2242 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2243 if ((error = xfs_btree_decrement(cur, 0, &i)))
2245 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2246 error = xfs_bmbt_update(cur, &PREV);
2252 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2254 * Setting all of a previous oldext extent to newext.
2255 * Neither the left nor right neighbors are contiguous with
2258 PREV.br_state = new->br_state;
2259 xfs_iext_update_extent(ip, state, icur, &PREV);
2262 rval = XFS_ILOG_DEXT;
2265 error = xfs_bmbt_lookup_eq(cur, new, &i);
2268 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2269 error = xfs_bmbt_update(cur, &PREV);
2275 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2277 * Setting the first part of a previous oldext extent to newext.
2278 * The left neighbor is contiguous.
2280 LEFT.br_blockcount += new->br_blockcount;
2283 PREV.br_startoff += new->br_blockcount;
2284 PREV.br_startblock += new->br_blockcount;
2285 PREV.br_blockcount -= new->br_blockcount;
2287 xfs_iext_update_extent(ip, state, icur, &PREV);
2288 xfs_iext_prev(ifp, icur);
2289 xfs_iext_update_extent(ip, state, icur, &LEFT);
2292 rval = XFS_ILOG_DEXT;
2295 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2298 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2299 error = xfs_bmbt_update(cur, &PREV);
2302 error = xfs_btree_decrement(cur, 0, &i);
2305 error = xfs_bmbt_update(cur, &LEFT);
2311 case BMAP_LEFT_FILLING:
2313 * Setting the first part of a previous oldext extent to newext.
2314 * The left neighbor is not contiguous.
2317 PREV.br_startoff += new->br_blockcount;
2318 PREV.br_startblock += new->br_blockcount;
2319 PREV.br_blockcount -= new->br_blockcount;
2321 xfs_iext_update_extent(ip, state, icur, &PREV);
2322 xfs_iext_insert(ip, icur, new, state);
2323 XFS_IFORK_NEXT_SET(ip, whichfork,
2324 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2326 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2328 rval = XFS_ILOG_CORE;
2329 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2332 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2333 error = xfs_bmbt_update(cur, &PREV);
2336 cur->bc_rec.b = *new;
2337 if ((error = xfs_btree_insert(cur, &i)))
2339 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2343 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2345 * Setting the last part of a previous oldext extent to newext.
2346 * The right neighbor is contiguous with the new allocation.
2349 PREV.br_blockcount -= new->br_blockcount;
2351 RIGHT.br_startoff = new->br_startoff;
2352 RIGHT.br_startblock = new->br_startblock;
2353 RIGHT.br_blockcount += new->br_blockcount;
2355 xfs_iext_update_extent(ip, state, icur, &PREV);
2356 xfs_iext_next(ifp, icur);
2357 xfs_iext_update_extent(ip, state, icur, &RIGHT);
2360 rval = XFS_ILOG_DEXT;
2363 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2366 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2367 error = xfs_bmbt_update(cur, &PREV);
2370 error = xfs_btree_increment(cur, 0, &i);
2373 error = xfs_bmbt_update(cur, &RIGHT);
2379 case BMAP_RIGHT_FILLING:
2381 * Setting the last part of a previous oldext extent to newext.
2382 * The right neighbor is not contiguous.
2385 PREV.br_blockcount -= new->br_blockcount;
2387 xfs_iext_update_extent(ip, state, icur, &PREV);
2388 xfs_iext_next(ifp, icur);
2389 xfs_iext_insert(ip, icur, new, state);
2391 XFS_IFORK_NEXT_SET(ip, whichfork,
2392 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2394 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2396 rval = XFS_ILOG_CORE;
2397 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2400 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2401 error = xfs_bmbt_update(cur, &PREV);
2404 error = xfs_bmbt_lookup_eq(cur, new, &i);
2407 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2408 if ((error = xfs_btree_insert(cur, &i)))
2410 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2416 * Setting the middle part of a previous oldext extent to
2417 * newext. Contiguity is impossible here.
2418 * One extent becomes three extents.
2421 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
2424 r[1].br_startoff = new_endoff;
2425 r[1].br_blockcount =
2426 old.br_startoff + old.br_blockcount - new_endoff;
2427 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2428 r[1].br_state = PREV.br_state;
2430 xfs_iext_update_extent(ip, state, icur, &PREV);
2431 xfs_iext_next(ifp, icur);
2432 xfs_iext_insert(ip, icur, &r[1], state);
2433 xfs_iext_insert(ip, icur, &r[0], state);
2435 XFS_IFORK_NEXT_SET(ip, whichfork,
2436 XFS_IFORK_NEXTENTS(ip, whichfork) + 2);
2438 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2440 rval = XFS_ILOG_CORE;
2441 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2444 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2445 /* new right extent - oldext */
2446 error = xfs_bmbt_update(cur, &r[1]);
2449 /* new left extent - oldext */
2450 cur->bc_rec.b = PREV;
2451 if ((error = xfs_btree_insert(cur, &i)))
2453 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2455 * Reset the cursor to the position of the new extent
2456 * we are about to insert as we can't trust it after
2457 * the previous insert.
2459 error = xfs_bmbt_lookup_eq(cur, new, &i);
2462 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2463 /* new middle extent - newext */
2464 if ((error = xfs_btree_insert(cur, &i)))
2466 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2470 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2471 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2472 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2473 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2474 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2475 case BMAP_LEFT_CONTIG:
2476 case BMAP_RIGHT_CONTIG:
2478 * These cases are all impossible.
2483 /* update reverse mappings */
2484 error = xfs_rmap_convert_extent(mp, dfops, ip, whichfork, new);
2488 /* convert to a btree if necessary */
2489 if (xfs_bmap_needs_btree(ip, whichfork)) {
2490 int tmp_logflags; /* partial log flag return val */
2492 ASSERT(cur == NULL);
2493 error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, &cur,
2494 0, &tmp_logflags, whichfork);
2495 *logflagsp |= tmp_logflags;
2500 /* clear out the allocated field, done with it now in any case. */
2502 cur->bc_private.b.allocated = 0;
2506 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2516 * Convert a hole to a delayed allocation.
2519 xfs_bmap_add_extent_hole_delay(
2520 xfs_inode_t *ip, /* incore inode pointer */
2522 struct xfs_iext_cursor *icur,
2523 xfs_bmbt_irec_t *new) /* new data to add to file extents */
2525 xfs_ifork_t *ifp; /* inode fork pointer */
2526 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2527 xfs_filblks_t newlen=0; /* new indirect size */
2528 xfs_filblks_t oldlen=0; /* old indirect size */
2529 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2530 int state = xfs_bmap_fork_to_state(whichfork);
2531 xfs_filblks_t temp; /* temp for indirect calculations */
2533 ifp = XFS_IFORK_PTR(ip, whichfork);
2534 ASSERT(isnullstartblock(new->br_startblock));
2537 * Check and set flags if this segment has a left neighbor
2539 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2540 state |= BMAP_LEFT_VALID;
2541 if (isnullstartblock(left.br_startblock))
2542 state |= BMAP_LEFT_DELAY;
2546 * Check and set flags if the current (right) segment exists.
2547 * If it doesn't exist, we're converting the hole at end-of-file.
2549 if (xfs_iext_get_extent(ifp, icur, &right)) {
2550 state |= BMAP_RIGHT_VALID;
2551 if (isnullstartblock(right.br_startblock))
2552 state |= BMAP_RIGHT_DELAY;
2556 * Set contiguity flags on the left and right neighbors.
2557 * Don't let extents get too large, even if the pieces are contiguous.
2559 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2560 left.br_startoff + left.br_blockcount == new->br_startoff &&
2561 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2562 state |= BMAP_LEFT_CONTIG;
2564 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2565 new->br_startoff + new->br_blockcount == right.br_startoff &&
2566 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2567 (!(state & BMAP_LEFT_CONTIG) ||
2568 (left.br_blockcount + new->br_blockcount +
2569 right.br_blockcount <= MAXEXTLEN)))
2570 state |= BMAP_RIGHT_CONTIG;
2573 * Switch out based on the contiguity flags.
2575 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2576 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2578 * New allocation is contiguous with delayed allocations
2579 * on the left and on the right.
2580 * Merge all three into a single extent record.
2582 temp = left.br_blockcount + new->br_blockcount +
2583 right.br_blockcount;
2585 oldlen = startblockval(left.br_startblock) +
2586 startblockval(new->br_startblock) +
2587 startblockval(right.br_startblock);
2588 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2590 left.br_startblock = nullstartblock(newlen);
2591 left.br_blockcount = temp;
2593 xfs_iext_remove(ip, icur, state);
2594 xfs_iext_prev(ifp, icur);
2595 xfs_iext_update_extent(ip, state, icur, &left);
2598 case BMAP_LEFT_CONTIG:
2600 * New allocation is contiguous with a delayed allocation
2602 * Merge the new allocation with the left neighbor.
2604 temp = left.br_blockcount + new->br_blockcount;
2606 oldlen = startblockval(left.br_startblock) +
2607 startblockval(new->br_startblock);
2608 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2610 left.br_blockcount = temp;
2611 left.br_startblock = nullstartblock(newlen);
2613 xfs_iext_prev(ifp, icur);
2614 xfs_iext_update_extent(ip, state, icur, &left);
2617 case BMAP_RIGHT_CONTIG:
2619 * New allocation is contiguous with a delayed allocation
2621 * Merge the new allocation with the right neighbor.
2623 temp = new->br_blockcount + right.br_blockcount;
2624 oldlen = startblockval(new->br_startblock) +
2625 startblockval(right.br_startblock);
2626 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2628 right.br_startoff = new->br_startoff;
2629 right.br_startblock = nullstartblock(newlen);
2630 right.br_blockcount = temp;
2631 xfs_iext_update_extent(ip, state, icur, &right);
2636 * New allocation is not contiguous with another
2637 * delayed allocation.
2638 * Insert a new entry.
2640 oldlen = newlen = 0;
2641 xfs_iext_insert(ip, icur, new, state);
2644 if (oldlen != newlen) {
2645 ASSERT(oldlen > newlen);
2646 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2649 * Nothing to do for disk quota accounting here.
2655 * Convert a hole to a real allocation.
2657 STATIC int /* error */
2658 xfs_bmap_add_extent_hole_real(
2659 struct xfs_trans *tp,
2660 struct xfs_inode *ip,
2662 struct xfs_iext_cursor *icur,
2663 struct xfs_btree_cur **curp,
2664 struct xfs_bmbt_irec *new,
2665 xfs_fsblock_t *first,
2666 struct xfs_defer_ops *dfops,
2669 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
2670 struct xfs_mount *mp = ip->i_mount;
2671 struct xfs_btree_cur *cur = *curp;
2672 int error; /* error return value */
2673 int i; /* temp state */
2674 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2675 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2676 int rval=0; /* return value (logging flags) */
2677 int state = xfs_bmap_fork_to_state(whichfork);
2678 struct xfs_bmbt_irec old;
2680 ASSERT(!isnullstartblock(new->br_startblock));
2681 ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
2683 XFS_STATS_INC(mp, xs_add_exlist);
2686 * Check and set flags if this segment has a left neighbor.
2688 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2689 state |= BMAP_LEFT_VALID;
2690 if (isnullstartblock(left.br_startblock))
2691 state |= BMAP_LEFT_DELAY;
2695 * Check and set flags if this segment has a current value.
2696 * Not true if we're inserting into the "hole" at eof.
2698 if (xfs_iext_get_extent(ifp, icur, &right)) {
2699 state |= BMAP_RIGHT_VALID;
2700 if (isnullstartblock(right.br_startblock))
2701 state |= BMAP_RIGHT_DELAY;
2705 * We're inserting a real allocation between "left" and "right".
2706 * Set the contiguity flags. Don't let extents get too large.
2708 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2709 left.br_startoff + left.br_blockcount == new->br_startoff &&
2710 left.br_startblock + left.br_blockcount == new->br_startblock &&
2711 left.br_state == new->br_state &&
2712 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2713 state |= BMAP_LEFT_CONTIG;
2715 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2716 new->br_startoff + new->br_blockcount == right.br_startoff &&
2717 new->br_startblock + new->br_blockcount == right.br_startblock &&
2718 new->br_state == right.br_state &&
2719 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2720 (!(state & BMAP_LEFT_CONTIG) ||
2721 left.br_blockcount + new->br_blockcount +
2722 right.br_blockcount <= MAXEXTLEN))
2723 state |= BMAP_RIGHT_CONTIG;
2727 * Select which case we're in here, and implement it.
2729 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2730 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2732 * New allocation is contiguous with real allocations on the
2733 * left and on the right.
2734 * Merge all three into a single extent record.
2736 left.br_blockcount += new->br_blockcount + right.br_blockcount;
2738 xfs_iext_remove(ip, icur, state);
2739 xfs_iext_prev(ifp, icur);
2740 xfs_iext_update_extent(ip, state, icur, &left);
2742 XFS_IFORK_NEXT_SET(ip, whichfork,
2743 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2745 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2747 rval = XFS_ILOG_CORE;
2748 error = xfs_bmbt_lookup_eq(cur, &right, &i);
2751 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2752 error = xfs_btree_delete(cur, &i);
2755 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2756 error = xfs_btree_decrement(cur, 0, &i);
2759 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2760 error = xfs_bmbt_update(cur, &left);
2766 case BMAP_LEFT_CONTIG:
2768 * New allocation is contiguous with a real allocation
2770 * Merge the new allocation with the left neighbor.
2773 left.br_blockcount += new->br_blockcount;
2775 xfs_iext_prev(ifp, icur);
2776 xfs_iext_update_extent(ip, state, icur, &left);
2779 rval = xfs_ilog_fext(whichfork);
2782 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2785 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2786 error = xfs_bmbt_update(cur, &left);
2792 case BMAP_RIGHT_CONTIG:
2794 * New allocation is contiguous with a real allocation
2796 * Merge the new allocation with the right neighbor.
2800 right.br_startoff = new->br_startoff;
2801 right.br_startblock = new->br_startblock;
2802 right.br_blockcount += new->br_blockcount;
2803 xfs_iext_update_extent(ip, state, icur, &right);
2806 rval = xfs_ilog_fext(whichfork);
2809 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2812 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2813 error = xfs_bmbt_update(cur, &right);
2821 * New allocation is not contiguous with another
2823 * Insert a new entry.
2825 xfs_iext_insert(ip, icur, new, state);
2826 XFS_IFORK_NEXT_SET(ip, whichfork,
2827 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2829 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2831 rval = XFS_ILOG_CORE;
2832 error = xfs_bmbt_lookup_eq(cur, new, &i);
2835 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2836 error = xfs_btree_insert(cur, &i);
2839 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2844 /* add reverse mapping */
2845 error = xfs_rmap_map_extent(mp, dfops, ip, whichfork, new);
2849 /* convert to a btree if necessary */
2850 if (xfs_bmap_needs_btree(ip, whichfork)) {
2851 int tmp_logflags; /* partial log flag return val */
2853 ASSERT(cur == NULL);
2854 error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, curp,
2855 0, &tmp_logflags, whichfork);
2856 *logflagsp |= tmp_logflags;
2862 /* clear out the allocated field, done with it now in any case. */
2864 cur->bc_private.b.allocated = 0;
2866 xfs_bmap_check_leaf_extents(cur, ip, whichfork);
2873 * Functions used in the extent read, allocate and remove paths
2877 * Adjust the size of the new extent based on di_extsize and rt extsize.
2880 xfs_bmap_extsize_align(
2882 xfs_bmbt_irec_t *gotp, /* next extent pointer */
2883 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
2884 xfs_extlen_t extsz, /* align to this extent size */
2885 int rt, /* is this a realtime inode? */
2886 int eof, /* is extent at end-of-file? */
2887 int delay, /* creating delalloc extent? */
2888 int convert, /* overwriting unwritten extent? */
2889 xfs_fileoff_t *offp, /* in/out: aligned offset */
2890 xfs_extlen_t *lenp) /* in/out: aligned length */
2892 xfs_fileoff_t orig_off; /* original offset */
2893 xfs_extlen_t orig_alen; /* original length */
2894 xfs_fileoff_t orig_end; /* original off+len */
2895 xfs_fileoff_t nexto; /* next file offset */
2896 xfs_fileoff_t prevo; /* previous file offset */
2897 xfs_fileoff_t align_off; /* temp for offset */
2898 xfs_extlen_t align_alen; /* temp for length */
2899 xfs_extlen_t temp; /* temp for calculations */
2904 orig_off = align_off = *offp;
2905 orig_alen = align_alen = *lenp;
2906 orig_end = orig_off + orig_alen;
2909 * If this request overlaps an existing extent, then don't
2910 * attempt to perform any additional alignment.
2912 if (!delay && !eof &&
2913 (orig_off >= gotp->br_startoff) &&
2914 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2919 * If the file offset is unaligned vs. the extent size
2920 * we need to align it. This will be possible unless
2921 * the file was previously written with a kernel that didn't
2922 * perform this alignment, or if a truncate shot us in the
2925 temp = do_mod(orig_off, extsz);
2931 /* Same adjustment for the end of the requested area. */
2932 temp = (align_alen % extsz);
2934 align_alen += extsz - temp;
2937 * For large extent hint sizes, the aligned extent might be larger than
2938 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
2939 * the length back under MAXEXTLEN. The outer allocation loops handle
2940 * short allocation just fine, so it is safe to do this. We only want to
2941 * do it when we are forced to, though, because it means more allocation
2942 * operations are required.
2944 while (align_alen > MAXEXTLEN)
2945 align_alen -= extsz;
2946 ASSERT(align_alen <= MAXEXTLEN);
2949 * If the previous block overlaps with this proposed allocation
2950 * then move the start forward without adjusting the length.
2952 if (prevp->br_startoff != NULLFILEOFF) {
2953 if (prevp->br_startblock == HOLESTARTBLOCK)
2954 prevo = prevp->br_startoff;
2956 prevo = prevp->br_startoff + prevp->br_blockcount;
2959 if (align_off != orig_off && align_off < prevo)
2962 * If the next block overlaps with this proposed allocation
2963 * then move the start back without adjusting the length,
2964 * but not before offset 0.
2965 * This may of course make the start overlap previous block,
2966 * and if we hit the offset 0 limit then the next block
2967 * can still overlap too.
2969 if (!eof && gotp->br_startoff != NULLFILEOFF) {
2970 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
2971 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
2972 nexto = gotp->br_startoff + gotp->br_blockcount;
2974 nexto = gotp->br_startoff;
2976 nexto = NULLFILEOFF;
2978 align_off + align_alen != orig_end &&
2979 align_off + align_alen > nexto)
2980 align_off = nexto > align_alen ? nexto - align_alen : 0;
2982 * If we're now overlapping the next or previous extent that
2983 * means we can't fit an extsz piece in this hole. Just move
2984 * the start forward to the first valid spot and set
2985 * the length so we hit the end.
2987 if (align_off != orig_off && align_off < prevo)
2989 if (align_off + align_alen != orig_end &&
2990 align_off + align_alen > nexto &&
2991 nexto != NULLFILEOFF) {
2992 ASSERT(nexto > prevo);
2993 align_alen = nexto - align_off;
2997 * If realtime, and the result isn't a multiple of the realtime
2998 * extent size we need to remove blocks until it is.
3000 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
3002 * We're not covering the original request, or
3003 * we won't be able to once we fix the length.
3005 if (orig_off < align_off ||
3006 orig_end > align_off + align_alen ||
3007 align_alen - temp < orig_alen)
3010 * Try to fix it by moving the start up.
3012 if (align_off + temp <= orig_off) {
3017 * Try to fix it by moving the end in.
3019 else if (align_off + align_alen - temp >= orig_end)
3022 * Set the start to the minimum then trim the length.
3025 align_alen -= orig_off - align_off;
3026 align_off = orig_off;
3027 align_alen -= align_alen % mp->m_sb.sb_rextsize;
3030 * Result doesn't cover the request, fail it.
3032 if (orig_off < align_off || orig_end > align_off + align_alen)
3035 ASSERT(orig_off >= align_off);
3036 /* see MAXEXTLEN handling above */
3037 ASSERT(orig_end <= align_off + align_alen ||
3038 align_alen + extsz > MAXEXTLEN);
3042 if (!eof && gotp->br_startoff != NULLFILEOFF)
3043 ASSERT(align_off + align_alen <= gotp->br_startoff);
3044 if (prevp->br_startoff != NULLFILEOFF)
3045 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3053 #define XFS_ALLOC_GAP_UNITS 4
3057 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3059 xfs_fsblock_t adjust; /* adjustment to block numbers */
3060 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3061 xfs_mount_t *mp; /* mount point structure */
3062 int nullfb; /* true if ap->firstblock isn't set */
3063 int rt; /* true if inode is realtime */
3065 #define ISVALID(x,y) \
3067 (x) < mp->m_sb.sb_rblocks : \
3068 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3069 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3070 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3072 mp = ap->ip->i_mount;
3073 nullfb = *ap->firstblock == NULLFSBLOCK;
3074 rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3075 xfs_alloc_is_userdata(ap->datatype);
3076 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3078 * If allocating at eof, and there's a previous real block,
3079 * try to use its last block as our starting point.
3081 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3082 !isnullstartblock(ap->prev.br_startblock) &&
3083 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3084 ap->prev.br_startblock)) {
3085 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3087 * Adjust for the gap between prevp and us.
3089 adjust = ap->offset -
3090 (ap->prev.br_startoff + ap->prev.br_blockcount);
3092 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3093 ap->blkno += adjust;
3096 * If not at eof, then compare the two neighbor blocks.
3097 * Figure out whether either one gives us a good starting point,
3098 * and pick the better one.
3100 else if (!ap->eof) {
3101 xfs_fsblock_t gotbno; /* right side block number */
3102 xfs_fsblock_t gotdiff=0; /* right side difference */
3103 xfs_fsblock_t prevbno; /* left side block number */
3104 xfs_fsblock_t prevdiff=0; /* left side difference */
3107 * If there's a previous (left) block, select a requested
3108 * start block based on it.
3110 if (ap->prev.br_startoff != NULLFILEOFF &&
3111 !isnullstartblock(ap->prev.br_startblock) &&
3112 (prevbno = ap->prev.br_startblock +
3113 ap->prev.br_blockcount) &&
3114 ISVALID(prevbno, ap->prev.br_startblock)) {
3116 * Calculate gap to end of previous block.
3118 adjust = prevdiff = ap->offset -
3119 (ap->prev.br_startoff +
3120 ap->prev.br_blockcount);
3122 * Figure the startblock based on the previous block's
3123 * end and the gap size.
3125 * If the gap is large relative to the piece we're
3126 * allocating, or using it gives us an invalid block
3127 * number, then just use the end of the previous block.
3129 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3130 ISVALID(prevbno + prevdiff,
3131 ap->prev.br_startblock))
3136 * If the firstblock forbids it, can't use it,
3139 if (!rt && !nullfb &&
3140 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
3141 prevbno = NULLFSBLOCK;
3144 * No previous block or can't follow it, just default.
3147 prevbno = NULLFSBLOCK;
3149 * If there's a following (right) block, select a requested
3150 * start block based on it.
3152 if (!isnullstartblock(ap->got.br_startblock)) {
3154 * Calculate gap to start of next block.
3156 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3158 * Figure the startblock based on the next block's
3159 * start and the gap size.
3161 gotbno = ap->got.br_startblock;
3164 * If the gap is large relative to the piece we're
3165 * allocating, or using it gives us an invalid block
3166 * number, then just use the start of the next block
3167 * offset by our length.
3169 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3170 ISVALID(gotbno - gotdiff, gotbno))
3172 else if (ISVALID(gotbno - ap->length, gotbno)) {
3173 gotbno -= ap->length;
3174 gotdiff += adjust - ap->length;
3178 * If the firstblock forbids it, can't use it,
3181 if (!rt && !nullfb &&
3182 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
3183 gotbno = NULLFSBLOCK;
3186 * No next block, just default.
3189 gotbno = NULLFSBLOCK;
3191 * If both valid, pick the better one, else the only good
3192 * one, else ap->blkno is already set (to 0 or the inode block).
3194 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
3195 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3196 else if (prevbno != NULLFSBLOCK)
3197 ap->blkno = prevbno;
3198 else if (gotbno != NULLFSBLOCK)
3205 xfs_bmap_longest_free_extent(
3206 struct xfs_trans *tp,
3211 struct xfs_mount *mp = tp->t_mountp;
3212 struct xfs_perag *pag;
3213 xfs_extlen_t longest;
3216 pag = xfs_perag_get(mp, ag);
3217 if (!pag->pagf_init) {
3218 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
3222 if (!pag->pagf_init) {
3228 longest = xfs_alloc_longest_free_extent(pag,
3229 xfs_alloc_min_freelist(mp, pag),
3230 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3231 if (*blen < longest)
3240 xfs_bmap_select_minlen(
3241 struct xfs_bmalloca *ap,
3242 struct xfs_alloc_arg *args,
3246 if (notinit || *blen < ap->minlen) {
3248 * Since we did a BUF_TRYLOCK above, it is possible that
3249 * there is space for this request.
3251 args->minlen = ap->minlen;
3252 } else if (*blen < args->maxlen) {
3254 * If the best seen length is less than the request length,
3255 * use the best as the minimum.
3257 args->minlen = *blen;
3260 * Otherwise we've seen an extent as big as maxlen, use that
3263 args->minlen = args->maxlen;
3268 xfs_bmap_btalloc_nullfb(
3269 struct xfs_bmalloca *ap,
3270 struct xfs_alloc_arg *args,
3273 struct xfs_mount *mp = ap->ip->i_mount;
3274 xfs_agnumber_t ag, startag;
3278 args->type = XFS_ALLOCTYPE_START_BNO;
3279 args->total = ap->total;
3281 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3282 if (startag == NULLAGNUMBER)
3285 while (*blen < args->maxlen) {
3286 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3291 if (++ag == mp->m_sb.sb_agcount)
3297 xfs_bmap_select_minlen(ap, args, blen, notinit);
3302 xfs_bmap_btalloc_filestreams(
3303 struct xfs_bmalloca *ap,
3304 struct xfs_alloc_arg *args,
3307 struct xfs_mount *mp = ap->ip->i_mount;
3312 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3313 args->total = ap->total;
3315 ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3316 if (ag == NULLAGNUMBER)
3319 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init);
3323 if (*blen < args->maxlen) {
3324 error = xfs_filestream_new_ag(ap, &ag);
3328 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3335 xfs_bmap_select_minlen(ap, args, blen, notinit);
3338 * Set the failure fallback case to look in the selected AG as stream
3341 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
3345 /* Update all inode and quota accounting for the allocation we just did. */
3347 xfs_bmap_btalloc_accounting(
3348 struct xfs_bmalloca *ap,
3349 struct xfs_alloc_arg *args)
3351 if (ap->flags & XFS_BMAPI_COWFORK) {
3353 * COW fork blocks are in-core only and thus are treated as
3354 * in-core quota reservation (like delalloc blocks) even when
3355 * converted to real blocks. The quota reservation is not
3356 * accounted to disk until blocks are remapped to the data
3357 * fork. So if these blocks were previously delalloc, we
3358 * already have quota reservation and there's nothing to do
3365 * Otherwise, we've allocated blocks in a hole. The transaction
3366 * has acquired in-core quota reservation for this extent.
3367 * Rather than account these as real blocks, however, we reduce
3368 * the transaction quota reservation based on the allocation.
3369 * This essentially transfers the transaction quota reservation
3370 * to that of a delalloc extent.
3372 ap->ip->i_delayed_blks += args->len;
3373 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS,
3378 /* data/attr fork only */
3379 ap->ip->i_d.di_nblocks += args->len;
3380 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3382 ap->ip->i_delayed_blks -= args->len;
3383 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3384 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT,
3390 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3392 xfs_mount_t *mp; /* mount point structure */
3393 xfs_alloctype_t atype = 0; /* type for allocation routines */
3394 xfs_extlen_t align = 0; /* minimum allocation alignment */
3395 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3397 xfs_alloc_arg_t args;
3398 xfs_fileoff_t orig_offset;
3399 xfs_extlen_t orig_length;
3401 xfs_extlen_t nextminlen = 0;
3402 int nullfb; /* true if ap->firstblock isn't set */
3409 orig_offset = ap->offset;
3410 orig_length = ap->length;
3412 mp = ap->ip->i_mount;
3414 /* stripe alignment for allocation is determined by mount parameters */
3416 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3417 stripe_align = mp->m_swidth;
3418 else if (mp->m_dalign)
3419 stripe_align = mp->m_dalign;
3421 if (ap->flags & XFS_BMAPI_COWFORK)
3422 align = xfs_get_cowextsz_hint(ap->ip);
3423 else if (xfs_alloc_is_userdata(ap->datatype))
3424 align = xfs_get_extsz_hint(ap->ip);
3426 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
3427 align, 0, ap->eof, 0, ap->conv,
3428 &ap->offset, &ap->length);
3434 nullfb = *ap->firstblock == NULLFSBLOCK;
3435 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3437 if (xfs_alloc_is_userdata(ap->datatype) &&
3438 xfs_inode_is_filestream(ap->ip)) {
3439 ag = xfs_filestream_lookup_ag(ap->ip);
3440 ag = (ag != NULLAGNUMBER) ? ag : 0;
3441 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
3443 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
3446 ap->blkno = *ap->firstblock;
3448 xfs_bmap_adjacent(ap);
3451 * If allowed, use ap->blkno; otherwise must use firstblock since
3452 * it's in the right allocation group.
3454 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
3457 ap->blkno = *ap->firstblock;
3459 * Normal allocation, done through xfs_alloc_vextent.
3461 tryagain = isaligned = 0;
3462 memset(&args, 0, sizeof(args));
3465 args.fsbno = ap->blkno;
3466 xfs_rmap_skip_owner_update(&args.oinfo);
3468 /* Trim the allocation back to the maximum an AG can fit. */
3469 args.maxlen = MIN(ap->length, mp->m_ag_max_usable);
3470 args.firstblock = *ap->firstblock;
3474 * Search for an allocation group with a single extent large
3475 * enough for the request. If one isn't found, then adjust
3476 * the minimum allocation size to the largest space found.
3478 if (xfs_alloc_is_userdata(ap->datatype) &&
3479 xfs_inode_is_filestream(ap->ip))
3480 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
3482 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
3485 } else if (ap->dfops->dop_low) {
3486 if (xfs_inode_is_filestream(ap->ip))
3487 args.type = XFS_ALLOCTYPE_FIRST_AG;
3489 args.type = XFS_ALLOCTYPE_START_BNO;
3490 args.total = args.minlen = ap->minlen;
3492 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3493 args.total = ap->total;
3494 args.minlen = ap->minlen;
3496 /* apply extent size hints if obtained earlier */
3499 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
3500 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3501 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3505 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3506 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
3507 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3510 * If we are not low on available data blocks, and the
3511 * underlying logical volume manager is a stripe, and
3512 * the file offset is zero then try to allocate data
3513 * blocks on stripe unit boundary.
3514 * NOTE: ap->aeof is only set if the allocation length
3515 * is >= the stripe unit and the allocation offset is
3516 * at the end of file.
3518 if (!ap->dfops->dop_low && ap->aeof) {
3520 args.alignment = stripe_align;
3524 * Adjust for alignment
3526 if (blen > args.alignment && blen <= args.maxlen)
3527 args.minlen = blen - args.alignment;
3528 args.minalignslop = 0;
3531 * First try an exact bno allocation.
3532 * If it fails then do a near or start bno
3533 * allocation with alignment turned on.
3537 args.type = XFS_ALLOCTYPE_THIS_BNO;
3540 * Compute the minlen+alignment for the
3541 * next case. Set slop so that the value
3542 * of minlen+alignment+slop doesn't go up
3543 * between the calls.
3545 if (blen > stripe_align && blen <= args.maxlen)
3546 nextminlen = blen - stripe_align;
3548 nextminlen = args.minlen;
3549 if (nextminlen + stripe_align > args.minlen + 1)
3551 nextminlen + stripe_align -
3554 args.minalignslop = 0;
3558 args.minalignslop = 0;
3560 args.minleft = ap->minleft;
3561 args.wasdel = ap->wasdel;
3562 args.resv = XFS_AG_RESV_NONE;
3563 args.datatype = ap->datatype;
3564 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO)
3567 error = xfs_alloc_vextent(&args);
3571 if (tryagain && args.fsbno == NULLFSBLOCK) {
3573 * Exact allocation failed. Now try with alignment
3577 args.fsbno = ap->blkno;
3578 args.alignment = stripe_align;
3579 args.minlen = nextminlen;
3580 args.minalignslop = 0;
3582 if ((error = xfs_alloc_vextent(&args)))
3585 if (isaligned && args.fsbno == NULLFSBLOCK) {
3587 * allocation failed, so turn off alignment and
3591 args.fsbno = ap->blkno;
3593 if ((error = xfs_alloc_vextent(&args)))
3596 if (args.fsbno == NULLFSBLOCK && nullfb &&
3597 args.minlen > ap->minlen) {
3598 args.minlen = ap->minlen;
3599 args.type = XFS_ALLOCTYPE_START_BNO;
3600 args.fsbno = ap->blkno;
3601 if ((error = xfs_alloc_vextent(&args)))
3604 if (args.fsbno == NULLFSBLOCK && nullfb) {
3606 args.type = XFS_ALLOCTYPE_FIRST_AG;
3607 args.total = ap->minlen;
3608 if ((error = xfs_alloc_vextent(&args)))
3610 ap->dfops->dop_low = true;
3612 if (args.fsbno != NULLFSBLOCK) {
3614 * check the allocation happened at the same or higher AG than
3615 * the first block that was allocated.
3617 ASSERT(*ap->firstblock == NULLFSBLOCK ||
3618 XFS_FSB_TO_AGNO(mp, *ap->firstblock) <=
3619 XFS_FSB_TO_AGNO(mp, args.fsbno));
3621 ap->blkno = args.fsbno;
3622 if (*ap->firstblock == NULLFSBLOCK)
3623 *ap->firstblock = args.fsbno;
3624 ASSERT(nullfb || fb_agno <= args.agno);
3625 ap->length = args.len;
3627 * If the extent size hint is active, we tried to round the
3628 * caller's allocation request offset down to extsz and the
3629 * length up to another extsz boundary. If we found a free
3630 * extent we mapped it in starting at this new offset. If the
3631 * newly mapped space isn't long enough to cover any of the
3632 * range of offsets that was originally requested, move the
3633 * mapping up so that we can fill as much of the caller's
3634 * original request as possible. Free space is apparently
3635 * very fragmented so we're unlikely to be able to satisfy the
3638 if (ap->length <= orig_length)
3639 ap->offset = orig_offset;
3640 else if (ap->offset + ap->length < orig_offset + orig_length)
3641 ap->offset = orig_offset + orig_length - ap->length;
3642 xfs_bmap_btalloc_accounting(ap, &args);
3644 ap->blkno = NULLFSBLOCK;
3651 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
3652 * It figures out where to ask the underlying allocator to put the new extent.
3656 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3658 if (XFS_IS_REALTIME_INODE(ap->ip) &&
3659 xfs_alloc_is_userdata(ap->datatype))
3660 return xfs_bmap_rtalloc(ap);
3661 return xfs_bmap_btalloc(ap);
3664 /* Trim extent to fit a logical block range. */
3667 struct xfs_bmbt_irec *irec,
3671 xfs_fileoff_t distance;
3672 xfs_fileoff_t end = bno + len;
3674 if (irec->br_startoff + irec->br_blockcount <= bno ||
3675 irec->br_startoff >= end) {
3676 irec->br_blockcount = 0;
3680 if (irec->br_startoff < bno) {
3681 distance = bno - irec->br_startoff;
3682 if (isnullstartblock(irec->br_startblock))
3683 irec->br_startblock = DELAYSTARTBLOCK;
3684 if (irec->br_startblock != DELAYSTARTBLOCK &&
3685 irec->br_startblock != HOLESTARTBLOCK)
3686 irec->br_startblock += distance;
3687 irec->br_startoff += distance;
3688 irec->br_blockcount -= distance;
3691 if (end < irec->br_startoff + irec->br_blockcount) {
3692 distance = irec->br_startoff + irec->br_blockcount - end;
3693 irec->br_blockcount -= distance;
3697 /* trim extent to within eof */
3699 xfs_trim_extent_eof(
3700 struct xfs_bmbt_irec *irec,
3701 struct xfs_inode *ip)
3704 xfs_trim_extent(irec, 0, XFS_B_TO_FSB(ip->i_mount,
3705 i_size_read(VFS_I(ip))));
3709 * Trim the returned map to the required bounds
3713 struct xfs_bmbt_irec *mval,
3714 struct xfs_bmbt_irec *got,
3722 if ((flags & XFS_BMAPI_ENTIRE) ||
3723 got->br_startoff + got->br_blockcount <= obno) {
3725 if (isnullstartblock(got->br_startblock))
3726 mval->br_startblock = DELAYSTARTBLOCK;
3732 ASSERT((*bno >= obno) || (n == 0));
3734 mval->br_startoff = *bno;
3735 if (isnullstartblock(got->br_startblock))
3736 mval->br_startblock = DELAYSTARTBLOCK;
3738 mval->br_startblock = got->br_startblock +
3739 (*bno - got->br_startoff);
3741 * Return the minimum of what we got and what we asked for for
3742 * the length. We can use the len variable here because it is
3743 * modified below and we could have been there before coming
3744 * here if the first part of the allocation didn't overlap what
3747 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3748 got->br_blockcount - (*bno - got->br_startoff));
3749 mval->br_state = got->br_state;
3750 ASSERT(mval->br_blockcount <= len);
3755 * Update and validate the extent map to return
3758 xfs_bmapi_update_map(
3759 struct xfs_bmbt_irec **map,
3767 xfs_bmbt_irec_t *mval = *map;
3769 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3770 ((mval->br_startoff + mval->br_blockcount) <= end));
3771 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3772 (mval->br_startoff < obno));
3774 *bno = mval->br_startoff + mval->br_blockcount;
3776 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3777 /* update previous map with new information */
3778 ASSERT(mval->br_startblock == mval[-1].br_startblock);
3779 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3780 ASSERT(mval->br_state == mval[-1].br_state);
3781 mval[-1].br_blockcount = mval->br_blockcount;
3782 mval[-1].br_state = mval->br_state;
3783 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3784 mval[-1].br_startblock != DELAYSTARTBLOCK &&
3785 mval[-1].br_startblock != HOLESTARTBLOCK &&
3786 mval->br_startblock == mval[-1].br_startblock +
3787 mval[-1].br_blockcount &&
3788 ((flags & XFS_BMAPI_IGSTATE) ||
3789 mval[-1].br_state == mval->br_state)) {
3790 ASSERT(mval->br_startoff ==
3791 mval[-1].br_startoff + mval[-1].br_blockcount);
3792 mval[-1].br_blockcount += mval->br_blockcount;
3793 } else if (*n > 0 &&
3794 mval->br_startblock == DELAYSTARTBLOCK &&
3795 mval[-1].br_startblock == DELAYSTARTBLOCK &&
3796 mval->br_startoff ==
3797 mval[-1].br_startoff + mval[-1].br_blockcount) {
3798 mval[-1].br_blockcount += mval->br_blockcount;
3799 mval[-1].br_state = mval->br_state;
3800 } else if (!((*n == 0) &&
3801 ((mval->br_startoff + mval->br_blockcount) <=
3810 * Map file blocks to filesystem blocks without allocation.
3814 struct xfs_inode *ip,
3817 struct xfs_bmbt_irec *mval,
3821 struct xfs_mount *mp = ip->i_mount;
3822 struct xfs_ifork *ifp;
3823 struct xfs_bmbt_irec got;
3826 struct xfs_iext_cursor icur;
3830 int whichfork = xfs_bmapi_whichfork(flags);
3833 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
3834 XFS_BMAPI_IGSTATE|XFS_BMAPI_COWFORK)));
3835 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
3837 if (unlikely(XFS_TEST_ERROR(
3838 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
3839 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
3840 mp, XFS_ERRTAG_BMAPIFORMAT))) {
3841 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
3842 return -EFSCORRUPTED;
3845 if (XFS_FORCED_SHUTDOWN(mp))
3848 XFS_STATS_INC(mp, xs_blk_mapr);
3850 ifp = XFS_IFORK_PTR(ip, whichfork);
3852 /* No CoW fork? Return a hole. */
3853 if (whichfork == XFS_COW_FORK && !ifp) {
3854 mval->br_startoff = bno;
3855 mval->br_startblock = HOLESTARTBLOCK;
3856 mval->br_blockcount = len;
3857 mval->br_state = XFS_EXT_NORM;
3862 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
3863 error = xfs_iread_extents(NULL, ip, whichfork);
3868 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
3873 while (bno < end && n < *nmap) {
3874 /* Reading past eof, act as though there's a hole up to end. */
3876 got.br_startoff = end;
3877 if (got.br_startoff > bno) {
3878 /* Reading in a hole. */
3879 mval->br_startoff = bno;
3880 mval->br_startblock = HOLESTARTBLOCK;
3881 mval->br_blockcount =
3882 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
3883 mval->br_state = XFS_EXT_NORM;
3884 bno += mval->br_blockcount;
3885 len -= mval->br_blockcount;
3891 /* set up the extent map to return. */
3892 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
3893 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
3895 /* If we're done, stop now. */
3896 if (bno >= end || n >= *nmap)
3899 /* Else go on to the next record. */
3900 if (!xfs_iext_next_extent(ifp, &icur, &got))
3908 * Add a delayed allocation extent to an inode. Blocks are reserved from the
3909 * global pool and the extent inserted into the inode in-core extent tree.
3911 * On entry, got refers to the first extent beyond the offset of the extent to
3912 * allocate or eof is specified if no such extent exists. On return, got refers
3913 * to the extent record that was inserted to the inode fork.
3915 * Note that the allocated extent may have been merged with contiguous extents
3916 * during insertion into the inode fork. Thus, got does not reflect the current
3917 * state of the inode fork on return. If necessary, the caller can use lastx to
3918 * look up the updated record in the inode fork.
3921 xfs_bmapi_reserve_delalloc(
3922 struct xfs_inode *ip,
3926 xfs_filblks_t prealloc,
3927 struct xfs_bmbt_irec *got,
3928 struct xfs_iext_cursor *icur,
3931 struct xfs_mount *mp = ip->i_mount;
3932 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
3934 xfs_extlen_t indlen;
3936 xfs_fileoff_t aoff = off;
3939 * Cap the alloc length. Keep track of prealloc so we know whether to
3940 * tag the inode before we return.
3942 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN);
3944 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
3945 if (prealloc && alen >= len)
3946 prealloc = alen - len;
3948 /* Figure out the extent size, adjust alen */
3949 if (whichfork == XFS_COW_FORK) {
3950 struct xfs_bmbt_irec prev;
3951 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip);
3953 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
3954 prev.br_startoff = NULLFILEOFF;
3956 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
3957 1, 0, &aoff, &alen);
3962 * Make a transaction-less quota reservation for delayed allocation
3963 * blocks. This number gets adjusted later. We return if we haven't
3964 * allocated blocks already inside this loop.
3966 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
3967 XFS_QMOPT_RES_REGBLKS);
3972 * Split changing sb for alen and indlen since they could be coming
3973 * from different places.
3975 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
3978 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
3980 goto out_unreserve_quota;
3982 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
3984 goto out_unreserve_blocks;
3987 ip->i_delayed_blks += alen;
3989 got->br_startoff = aoff;
3990 got->br_startblock = nullstartblock(indlen);
3991 got->br_blockcount = alen;
3992 got->br_state = XFS_EXT_NORM;
3994 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
3997 * Tag the inode if blocks were preallocated. Note that COW fork
3998 * preallocation can occur at the start or end of the extent, even when
3999 * prealloc == 0, so we must also check the aligned offset and length.
4001 if (whichfork == XFS_DATA_FORK && prealloc)
4002 xfs_inode_set_eofblocks_tag(ip);
4003 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4004 xfs_inode_set_cowblocks_tag(ip);
4008 out_unreserve_blocks:
4009 xfs_mod_fdblocks(mp, alen, false);
4010 out_unreserve_quota:
4011 if (XFS_IS_QUOTA_ON(mp))
4012 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0,
4013 XFS_QMOPT_RES_REGBLKS);
4019 struct xfs_bmalloca *bma)
4021 struct xfs_mount *mp = bma->ip->i_mount;
4022 int whichfork = xfs_bmapi_whichfork(bma->flags);
4023 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4024 int tmp_logflags = 0;
4027 ASSERT(bma->length > 0);
4030 * For the wasdelay case, we could also just allocate the stuff asked
4031 * for in this bmap call but that wouldn't be as good.
4034 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4035 bma->offset = bma->got.br_startoff;
4036 xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev);
4038 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4040 bma->length = XFS_FILBLKS_MIN(bma->length,
4041 bma->got.br_startoff - bma->offset);
4045 * Set the data type being allocated. For the data fork, the first data
4046 * in the file is treated differently to all other allocations. For the
4047 * attribute fork, we only need to ensure the allocated range is not on
4050 if (!(bma->flags & XFS_BMAPI_METADATA)) {
4051 bma->datatype = XFS_ALLOC_NOBUSY;
4052 if (whichfork == XFS_DATA_FORK) {
4053 if (bma->offset == 0)
4054 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4056 bma->datatype |= XFS_ALLOC_USERDATA;
4058 if (bma->flags & XFS_BMAPI_ZERO)
4059 bma->datatype |= XFS_ALLOC_USERDATA_ZERO;
4062 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
4065 * Only want to do the alignment at the eof if it is userdata and
4066 * allocation length is larger than a stripe unit.
4068 if (mp->m_dalign && bma->length >= mp->m_dalign &&
4069 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
4070 error = xfs_bmap_isaeof(bma, whichfork);
4075 error = xfs_bmap_alloc(bma);
4080 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4081 if (bma->blkno == NULLFSBLOCK)
4083 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4084 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4085 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4086 bma->cur->bc_private.b.dfops = bma->dfops;
4089 * Bump the number of extents we've allocated
4095 bma->cur->bc_private.b.flags =
4096 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
4098 bma->got.br_startoff = bma->offset;
4099 bma->got.br_startblock = bma->blkno;
4100 bma->got.br_blockcount = bma->length;
4101 bma->got.br_state = XFS_EXT_NORM;
4104 * In the data fork, a wasdelay extent has been initialized, so
4105 * shouldn't be flagged as unwritten.
4107 * For the cow fork, however, we convert delalloc reservations
4108 * (extents allocated for speculative preallocation) to
4109 * allocated unwritten extents, and only convert the unwritten
4110 * extents to real extents when we're about to write the data.
4112 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) &&
4113 (bma->flags & XFS_BMAPI_PREALLOC) &&
4114 xfs_sb_version_hasextflgbit(&mp->m_sb))
4115 bma->got.br_state = XFS_EXT_UNWRITTEN;
4118 error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4120 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4121 whichfork, &bma->icur, &bma->cur, &bma->got,
4122 bma->firstblock, bma->dfops, &bma->logflags);
4124 bma->logflags |= tmp_logflags;
4129 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4130 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4131 * the neighbouring ones.
4133 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4135 ASSERT(bma->got.br_startoff <= bma->offset);
4136 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4137 bma->offset + bma->length);
4138 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4139 bma->got.br_state == XFS_EXT_UNWRITTEN);
4144 xfs_bmapi_convert_unwritten(
4145 struct xfs_bmalloca *bma,
4146 struct xfs_bmbt_irec *mval,
4150 int whichfork = xfs_bmapi_whichfork(flags);
4151 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4152 int tmp_logflags = 0;
4155 /* check if we need to do unwritten->real conversion */
4156 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4157 (flags & XFS_BMAPI_PREALLOC))
4160 /* check if we need to do real->unwritten conversion */
4161 if (mval->br_state == XFS_EXT_NORM &&
4162 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4163 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4167 * Modify (by adding) the state flag, if writing.
4169 ASSERT(mval->br_blockcount <= len);
4170 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4171 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4172 bma->ip, whichfork);
4173 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4174 bma->cur->bc_private.b.dfops = bma->dfops;
4176 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4177 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4180 * Before insertion into the bmbt, zero the range being converted
4183 if (flags & XFS_BMAPI_ZERO) {
4184 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4185 mval->br_blockcount);
4190 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4191 &bma->icur, &bma->cur, mval, bma->firstblock,
4192 bma->dfops, &tmp_logflags);
4194 * Log the inode core unconditionally in the unwritten extent conversion
4195 * path because the conversion might not have done so (e.g., if the
4196 * extent count hasn't changed). We need to make sure the inode is dirty
4197 * in the transaction for the sake of fsync(), even if nothing has
4198 * changed, because fsync() will not force the log for this transaction
4199 * unless it sees the inode pinned.
4201 * Note: If we're only converting cow fork extents, there aren't
4202 * any on-disk updates to make, so we don't need to log anything.
4204 if (whichfork != XFS_COW_FORK)
4205 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4210 * Update our extent pointer, given that
4211 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4212 * of the neighbouring ones.
4214 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4217 * We may have combined previously unwritten space with written space,
4218 * so generate another request.
4220 if (mval->br_blockcount < len)
4226 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4227 * extent state if necessary. Details behaviour is controlled by the flags
4228 * parameter. Only allocates blocks from a single allocation group, to avoid
4231 * The returned value in "firstblock" from the first call in a transaction
4232 * must be remembered and presented to subsequent calls in "firstblock".
4233 * An upper bound for the number of blocks to be allocated is supplied to
4234 * the first call in "total"; if no allocation group has that many free
4235 * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4239 struct xfs_trans *tp, /* transaction pointer */
4240 struct xfs_inode *ip, /* incore inode */
4241 xfs_fileoff_t bno, /* starting file offs. mapped */
4242 xfs_filblks_t len, /* length to map in file */
4243 int flags, /* XFS_BMAPI_... */
4244 xfs_fsblock_t *firstblock, /* first allocated block
4245 controls a.g. for allocs */
4246 xfs_extlen_t total, /* total blocks needed */
4247 struct xfs_bmbt_irec *mval, /* output: map values */
4248 int *nmap, /* i/o: mval size/count */
4249 struct xfs_defer_ops *dfops) /* i/o: list extents to free */
4251 struct xfs_mount *mp = ip->i_mount;
4252 struct xfs_ifork *ifp;
4253 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */
4254 xfs_fileoff_t end; /* end of mapped file region */
4255 bool eof = false; /* after the end of extents */
4256 int error; /* error return */
4257 int n; /* current extent index */
4258 xfs_fileoff_t obno; /* old block number (offset) */
4259 int whichfork; /* data or attr fork */
4262 xfs_fileoff_t orig_bno; /* original block number value */
4263 int orig_flags; /* original flags arg value */
4264 xfs_filblks_t orig_len; /* original value of len arg */
4265 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4266 int orig_nmap; /* original value of *nmap */
4274 whichfork = xfs_bmapi_whichfork(flags);
4277 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4278 ASSERT(!(flags & XFS_BMAPI_IGSTATE));
4279 ASSERT(tp != NULL ||
4280 (flags & (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)) ==
4281 (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK));
4283 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
4284 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4285 ASSERT(!(flags & XFS_BMAPI_REMAP));
4287 /* zeroing is for currently only for data extents, not metadata */
4288 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4289 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4291 * we can allocate unwritten extents or pre-zero allocated blocks,
4292 * but it makes no sense to do both at once. This would result in
4293 * zeroing the unwritten extent twice, but it still being an
4294 * unwritten extent....
4296 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4297 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4299 if (unlikely(XFS_TEST_ERROR(
4300 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4301 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4302 mp, XFS_ERRTAG_BMAPIFORMAT))) {
4303 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
4304 return -EFSCORRUPTED;
4307 if (XFS_FORCED_SHUTDOWN(mp))
4310 ifp = XFS_IFORK_PTR(ip, whichfork);
4312 XFS_STATS_INC(mp, xs_blk_mapw);
4314 if (*firstblock == NULLFSBLOCK) {
4315 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
4316 bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
4323 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4324 error = xfs_iread_extents(tp, ip, whichfork);
4333 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
4335 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4336 bma.prev.br_startoff = NULLFILEOFF;
4342 bma.firstblock = firstblock;
4344 while (bno < end && n < *nmap) {
4345 bool need_alloc = false, wasdelay = false;
4347 /* in hole or beyond EOF? */
4348 if (eof || bma.got.br_startoff > bno) {
4350 * CoW fork conversions should /never/ hit EOF or
4351 * holes. There should always be something for us
4354 ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
4355 (flags & XFS_BMAPI_COWFORK)));
4357 if (flags & XFS_BMAPI_DELALLOC) {
4359 * For the COW fork we can reasonably get a
4360 * request for converting an extent that races
4361 * with other threads already having converted
4362 * part of it, as there converting COW to
4363 * regular blocks is not protected using the
4366 ASSERT(flags & XFS_BMAPI_COWFORK);
4367 if (!(flags & XFS_BMAPI_COWFORK)) {
4372 if (eof || bno >= end)
4377 } else if (isnullstartblock(bma.got.br_startblock)) {
4382 * First, deal with the hole before the allocated space
4383 * that we found, if any.
4385 if ((need_alloc || wasdelay) &&
4386 !(flags & XFS_BMAPI_CONVERT_ONLY)) {
4388 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4389 bma.wasdel = wasdelay;
4394 * There's a 32/64 bit type mismatch between the
4395 * allocation length request (which can be 64 bits in
4396 * length) and the bma length request, which is
4397 * xfs_extlen_t and therefore 32 bits. Hence we have to
4398 * check for 32-bit overflows and handle them here.
4400 if (len > (xfs_filblks_t)MAXEXTLEN)
4401 bma.length = MAXEXTLEN;
4406 ASSERT(bma.length > 0);
4407 error = xfs_bmapi_allocate(&bma);
4410 if (bma.blkno == NULLFSBLOCK)
4414 * If this is a CoW allocation, record the data in
4415 * the refcount btree for orphan recovery.
4417 if (whichfork == XFS_COW_FORK) {
4418 error = xfs_refcount_alloc_cow_extent(mp, dfops,
4419 bma.blkno, bma.length);
4425 /* Deal with the allocated space we found. */
4426 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4429 /* Execute unwritten extent conversion if necessary */
4430 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4431 if (error == -EAGAIN)
4436 /* update the extent map to return */
4437 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4440 * If we're done, stop now. Stop when we've allocated
4441 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4442 * the transaction may get too big.
4444 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4447 /* Else go on to the next record. */
4449 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
4455 * Transform from btree to extents, give it cur.
4457 if (xfs_bmap_wants_extents(ip, whichfork)) {
4458 int tmp_logflags = 0;
4461 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur,
4462 &tmp_logflags, whichfork);
4463 bma.logflags |= tmp_logflags;
4468 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
4469 XFS_IFORK_NEXTENTS(ip, whichfork) >
4470 XFS_IFORK_MAXEXT(ip, whichfork));
4474 * Log everything. Do this after conversion, there's no point in
4475 * logging the extent records if we've converted to btree format.
4477 if ((bma.logflags & xfs_ilog_fext(whichfork)) &&
4478 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4479 bma.logflags &= ~xfs_ilog_fext(whichfork);
4480 else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) &&
4481 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
4482 bma.logflags &= ~xfs_ilog_fbroot(whichfork);
4484 * Log whatever the flags say, even if error. Otherwise we might miss
4485 * detecting a case where the data is changed, there's an error,
4486 * and it's not logged so we don't shutdown when we should.
4489 xfs_trans_log_inode(tp, ip, bma.logflags);
4493 ASSERT(*firstblock == NULLFSBLOCK ||
4494 XFS_FSB_TO_AGNO(mp, *firstblock) <=
4496 bma.cur->bc_private.b.firstblock));
4497 *firstblock = bma.cur->bc_private.b.firstblock;
4499 xfs_btree_del_cursor(bma.cur,
4500 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
4503 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4510 struct xfs_trans *tp,
4511 struct xfs_inode *ip,
4514 xfs_fsblock_t startblock,
4515 struct xfs_defer_ops *dfops)
4517 struct xfs_mount *mp = ip->i_mount;
4518 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
4519 struct xfs_btree_cur *cur = NULL;
4520 xfs_fsblock_t firstblock = NULLFSBLOCK;
4521 struct xfs_bmbt_irec got;
4522 struct xfs_iext_cursor icur;
4523 int logflags = 0, error;
4526 ASSERT(len <= (xfs_filblks_t)MAXEXTLEN);
4527 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4529 if (unlikely(XFS_TEST_ERROR(
4530 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
4531 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
4532 mp, XFS_ERRTAG_BMAPIFORMAT))) {
4533 XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp);
4534 return -EFSCORRUPTED;
4537 if (XFS_FORCED_SHUTDOWN(mp))
4540 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4541 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
4546 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
4547 /* make sure we only reflink into a hole. */
4548 ASSERT(got.br_startoff > bno);
4549 ASSERT(got.br_startoff - bno >= len);
4552 ip->i_d.di_nblocks += len;
4553 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4555 if (ifp->if_flags & XFS_IFBROOT) {
4556 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
4557 cur->bc_private.b.firstblock = firstblock;
4558 cur->bc_private.b.dfops = dfops;
4559 cur->bc_private.b.flags = 0;
4562 got.br_startoff = bno;
4563 got.br_startblock = startblock;
4564 got.br_blockcount = len;
4565 got.br_state = XFS_EXT_NORM;
4567 error = xfs_bmap_add_extent_hole_real(tp, ip, XFS_DATA_FORK, &icur,
4568 &cur, &got, &firstblock, dfops, &logflags);
4572 if (xfs_bmap_wants_extents(ip, XFS_DATA_FORK)) {
4573 int tmp_logflags = 0;
4575 error = xfs_bmap_btree_to_extents(tp, ip, cur,
4576 &tmp_logflags, XFS_DATA_FORK);
4577 logflags |= tmp_logflags;
4581 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS)
4582 logflags &= ~XFS_ILOG_DEXT;
4583 else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
4584 logflags &= ~XFS_ILOG_DBROOT;
4587 xfs_trans_log_inode(tp, ip, logflags);
4589 xfs_btree_del_cursor(cur,
4590 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
4596 * When a delalloc extent is split (e.g., due to a hole punch), the original
4597 * indlen reservation must be shared across the two new extents that are left
4600 * Given the original reservation and the worst case indlen for the two new
4601 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4602 * reservation fairly across the two new extents. If necessary, steal available
4603 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4604 * ores == 1). The number of stolen blocks is returned. The availability and
4605 * subsequent accounting of stolen blocks is the responsibility of the caller.
4607 static xfs_filblks_t
4608 xfs_bmap_split_indlen(
4609 xfs_filblks_t ores, /* original res. */
4610 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4611 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4612 xfs_filblks_t avail) /* stealable blocks */
4614 xfs_filblks_t len1 = *indlen1;
4615 xfs_filblks_t len2 = *indlen2;
4616 xfs_filblks_t nres = len1 + len2; /* new total res. */
4617 xfs_filblks_t stolen = 0;
4618 xfs_filblks_t resfactor;
4621 * Steal as many blocks as we can to try and satisfy the worst case
4622 * indlen for both new extents.
4624 if (ores < nres && avail)
4625 stolen = XFS_FILBLKS_MIN(nres - ores, avail);
4628 /* nothing else to do if we've satisfied the new reservation */
4633 * We can't meet the total required reservation for the two extents.
4634 * Calculate the percent of the overall shortage between both extents
4635 * and apply this percentage to each of the requested indlen values.
4636 * This distributes the shortage fairly and reduces the chances that one
4637 * of the two extents is left with nothing when extents are repeatedly
4640 resfactor = (ores * 100);
4641 do_div(resfactor, nres);
4646 ASSERT(len1 + len2 <= ores);
4647 ASSERT(len1 < *indlen1 && len2 < *indlen2);
4650 * Hand out the remainder to each extent. If one of the two reservations
4651 * is zero, we want to make sure that one gets a block first. The loop
4652 * below starts with len1, so hand len2 a block right off the bat if it
4655 ores -= (len1 + len2);
4656 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4657 if (ores && !len2 && *indlen2) {
4662 if (len1 < *indlen1) {
4668 if (len2 < *indlen2) {
4681 xfs_bmap_del_extent_delay(
4682 struct xfs_inode *ip,
4684 struct xfs_iext_cursor *icur,
4685 struct xfs_bmbt_irec *got,
4686 struct xfs_bmbt_irec *del)
4688 struct xfs_mount *mp = ip->i_mount;
4689 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4690 struct xfs_bmbt_irec new;
4691 int64_t da_old, da_new, da_diff = 0;
4692 xfs_fileoff_t del_endoff, got_endoff;
4693 xfs_filblks_t got_indlen, new_indlen, stolen;
4694 int state = xfs_bmap_fork_to_state(whichfork);
4698 XFS_STATS_INC(mp, xs_del_exlist);
4700 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4701 del_endoff = del->br_startoff + del->br_blockcount;
4702 got_endoff = got->br_startoff + got->br_blockcount;
4703 da_old = startblockval(got->br_startblock);
4706 ASSERT(del->br_blockcount > 0);
4707 ASSERT(got->br_startoff <= del->br_startoff);
4708 ASSERT(got_endoff >= del_endoff);
4711 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
4713 do_div(rtexts, mp->m_sb.sb_rextsize);
4714 xfs_mod_frextents(mp, rtexts);
4718 * Update the inode delalloc counter now and wait to update the
4719 * sb counters as we might have to borrow some blocks for the
4720 * indirect block accounting.
4722 error = xfs_trans_reserve_quota_nblks(NULL, ip,
4723 -((long)del->br_blockcount), 0,
4724 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4727 ip->i_delayed_blks -= del->br_blockcount;
4729 if (got->br_startoff == del->br_startoff)
4730 state |= BMAP_LEFT_FILLING;
4731 if (got_endoff == del_endoff)
4732 state |= BMAP_RIGHT_FILLING;
4734 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4735 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4737 * Matches the whole extent. Delete the entry.
4739 xfs_iext_remove(ip, icur, state);
4740 xfs_iext_prev(ifp, icur);
4742 case BMAP_LEFT_FILLING:
4744 * Deleting the first part of the extent.
4746 got->br_startoff = del_endoff;
4747 got->br_blockcount -= del->br_blockcount;
4748 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4749 got->br_blockcount), da_old);
4750 got->br_startblock = nullstartblock((int)da_new);
4751 xfs_iext_update_extent(ip, state, icur, got);
4753 case BMAP_RIGHT_FILLING:
4755 * Deleting the last part of the extent.
4757 got->br_blockcount = got->br_blockcount - del->br_blockcount;
4758 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4759 got->br_blockcount), da_old);
4760 got->br_startblock = nullstartblock((int)da_new);
4761 xfs_iext_update_extent(ip, state, icur, got);
4765 * Deleting the middle of the extent.
4767 * Distribute the original indlen reservation across the two new
4768 * extents. Steal blocks from the deleted extent if necessary.
4769 * Stealing blocks simply fudges the fdblocks accounting below.
4770 * Warn if either of the new indlen reservations is zero as this
4771 * can lead to delalloc problems.
4773 got->br_blockcount = del->br_startoff - got->br_startoff;
4774 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4776 new.br_blockcount = got_endoff - del_endoff;
4777 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4779 WARN_ON_ONCE(!got_indlen || !new_indlen);
4780 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
4781 del->br_blockcount);
4783 got->br_startblock = nullstartblock((int)got_indlen);
4785 new.br_startoff = del_endoff;
4786 new.br_state = got->br_state;
4787 new.br_startblock = nullstartblock((int)new_indlen);
4789 xfs_iext_update_extent(ip, state, icur, got);
4790 xfs_iext_next(ifp, icur);
4791 xfs_iext_insert(ip, icur, &new, state);
4793 da_new = got_indlen + new_indlen - stolen;
4794 del->br_blockcount -= stolen;
4798 ASSERT(da_old >= da_new);
4799 da_diff = da_old - da_new;
4801 da_diff += del->br_blockcount;
4803 xfs_mod_fdblocks(mp, da_diff, false);
4808 xfs_bmap_del_extent_cow(
4809 struct xfs_inode *ip,
4810 struct xfs_iext_cursor *icur,
4811 struct xfs_bmbt_irec *got,
4812 struct xfs_bmbt_irec *del)
4814 struct xfs_mount *mp = ip->i_mount;
4815 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
4816 struct xfs_bmbt_irec new;
4817 xfs_fileoff_t del_endoff, got_endoff;
4818 int state = BMAP_COWFORK;
4820 XFS_STATS_INC(mp, xs_del_exlist);
4822 del_endoff = del->br_startoff + del->br_blockcount;
4823 got_endoff = got->br_startoff + got->br_blockcount;
4825 ASSERT(del->br_blockcount > 0);
4826 ASSERT(got->br_startoff <= del->br_startoff);
4827 ASSERT(got_endoff >= del_endoff);
4828 ASSERT(!isnullstartblock(got->br_startblock));
4830 if (got->br_startoff == del->br_startoff)
4831 state |= BMAP_LEFT_FILLING;
4832 if (got_endoff == del_endoff)
4833 state |= BMAP_RIGHT_FILLING;
4835 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4836 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4838 * Matches the whole extent. Delete the entry.
4840 xfs_iext_remove(ip, icur, state);
4841 xfs_iext_prev(ifp, icur);
4843 case BMAP_LEFT_FILLING:
4845 * Deleting the first part of the extent.
4847 got->br_startoff = del_endoff;
4848 got->br_blockcount -= del->br_blockcount;
4849 got->br_startblock = del->br_startblock + del->br_blockcount;
4850 xfs_iext_update_extent(ip, state, icur, got);
4852 case BMAP_RIGHT_FILLING:
4854 * Deleting the last part of the extent.
4856 got->br_blockcount -= del->br_blockcount;
4857 xfs_iext_update_extent(ip, state, icur, got);
4861 * Deleting the middle of the extent.
4863 got->br_blockcount = del->br_startoff - got->br_startoff;
4865 new.br_startoff = del_endoff;
4866 new.br_blockcount = got_endoff - del_endoff;
4867 new.br_state = got->br_state;
4868 new.br_startblock = del->br_startblock + del->br_blockcount;
4870 xfs_iext_update_extent(ip, state, icur, got);
4871 xfs_iext_next(ifp, icur);
4872 xfs_iext_insert(ip, icur, &new, state);
4875 ip->i_delayed_blks -= del->br_blockcount;
4879 * Called by xfs_bmapi to update file extent records and the btree
4880 * after removing space.
4882 STATIC int /* error */
4883 xfs_bmap_del_extent_real(
4884 xfs_inode_t *ip, /* incore inode pointer */
4885 xfs_trans_t *tp, /* current transaction pointer */
4886 struct xfs_iext_cursor *icur,
4887 struct xfs_defer_ops *dfops, /* list of extents to be freed */
4888 xfs_btree_cur_t *cur, /* if null, not a btree */
4889 xfs_bmbt_irec_t *del, /* data to remove from extents */
4890 int *logflagsp, /* inode logging flags */
4891 int whichfork, /* data or attr fork */
4892 int bflags) /* bmapi flags */
4894 xfs_fsblock_t del_endblock=0; /* first block past del */
4895 xfs_fileoff_t del_endoff; /* first offset past del */
4896 int do_fx; /* free extent at end of routine */
4897 int error; /* error return value */
4898 int flags = 0;/* inode logging flags */
4899 struct xfs_bmbt_irec got; /* current extent entry */
4900 xfs_fileoff_t got_endoff; /* first offset past got */
4901 int i; /* temp state */
4902 xfs_ifork_t *ifp; /* inode fork pointer */
4903 xfs_mount_t *mp; /* mount structure */
4904 xfs_filblks_t nblks; /* quota/sb block count */
4905 xfs_bmbt_irec_t new; /* new record to be inserted */
4907 uint qfield; /* quota field to update */
4908 int state = xfs_bmap_fork_to_state(whichfork);
4909 struct xfs_bmbt_irec old;
4912 XFS_STATS_INC(mp, xs_del_exlist);
4914 ifp = XFS_IFORK_PTR(ip, whichfork);
4915 ASSERT(del->br_blockcount > 0);
4916 xfs_iext_get_extent(ifp, icur, &got);
4917 ASSERT(got.br_startoff <= del->br_startoff);
4918 del_endoff = del->br_startoff + del->br_blockcount;
4919 got_endoff = got.br_startoff + got.br_blockcount;
4920 ASSERT(got_endoff >= del_endoff);
4921 ASSERT(!isnullstartblock(got.br_startblock));
4926 * If it's the case where the directory code is running with no block
4927 * reservation, and the deleted block is in the middle of its extent,
4928 * and the resulting insert of an extent would cause transformation to
4929 * btree format, then reject it. The calling code will then swap blocks
4930 * around instead. We have to do this now, rather than waiting for the
4931 * conversion to btree format, since the transaction will be dirty then.
4933 if (tp->t_blk_res == 0 &&
4934 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
4935 XFS_IFORK_NEXTENTS(ip, whichfork) >=
4936 XFS_IFORK_MAXEXT(ip, whichfork) &&
4937 del->br_startoff > got.br_startoff && del_endoff < got_endoff)
4940 flags = XFS_ILOG_CORE;
4941 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
4945 ASSERT(do_mod(del->br_blockcount, mp->m_sb.sb_rextsize) == 0);
4946 ASSERT(do_mod(del->br_startblock, mp->m_sb.sb_rextsize) == 0);
4947 bno = del->br_startblock;
4948 len = del->br_blockcount;
4949 do_div(bno, mp->m_sb.sb_rextsize);
4950 do_div(len, mp->m_sb.sb_rextsize);
4951 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
4955 nblks = len * mp->m_sb.sb_rextsize;
4956 qfield = XFS_TRANS_DQ_RTBCOUNT;
4959 nblks = del->br_blockcount;
4960 qfield = XFS_TRANS_DQ_BCOUNT;
4963 del_endblock = del->br_startblock + del->br_blockcount;
4965 error = xfs_bmbt_lookup_eq(cur, &got, &i);
4968 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
4971 if (got.br_startoff == del->br_startoff)
4972 state |= BMAP_LEFT_FILLING;
4973 if (got_endoff == del_endoff)
4974 state |= BMAP_RIGHT_FILLING;
4976 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4977 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4979 * Matches the whole extent. Delete the entry.
4981 xfs_iext_remove(ip, icur, state);
4982 xfs_iext_prev(ifp, icur);
4983 XFS_IFORK_NEXT_SET(ip, whichfork,
4984 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
4985 flags |= XFS_ILOG_CORE;
4987 flags |= xfs_ilog_fext(whichfork);
4990 if ((error = xfs_btree_delete(cur, &i)))
4992 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
4994 case BMAP_LEFT_FILLING:
4996 * Deleting the first part of the extent.
4998 got.br_startoff = del_endoff;
4999 got.br_startblock = del_endblock;
5000 got.br_blockcount -= del->br_blockcount;
5001 xfs_iext_update_extent(ip, state, icur, &got);
5003 flags |= xfs_ilog_fext(whichfork);
5006 error = xfs_bmbt_update(cur, &got);
5010 case BMAP_RIGHT_FILLING:
5012 * Deleting the last part of the extent.
5014 got.br_blockcount -= del->br_blockcount;
5015 xfs_iext_update_extent(ip, state, icur, &got);
5017 flags |= xfs_ilog_fext(whichfork);
5020 error = xfs_bmbt_update(cur, &got);
5026 * Deleting the middle of the extent.
5030 got.br_blockcount = del->br_startoff - got.br_startoff;
5031 xfs_iext_update_extent(ip, state, icur, &got);
5033 new.br_startoff = del_endoff;
5034 new.br_blockcount = got_endoff - del_endoff;
5035 new.br_state = got.br_state;
5036 new.br_startblock = del_endblock;
5038 flags |= XFS_ILOG_CORE;
5040 error = xfs_bmbt_update(cur, &got);
5043 error = xfs_btree_increment(cur, 0, &i);
5046 cur->bc_rec.b = new;
5047 error = xfs_btree_insert(cur, &i);
5048 if (error && error != -ENOSPC)
5051 * If get no-space back from btree insert, it tried a
5052 * split, and we have a zero block reservation. Fix up
5053 * our state and return the error.
5055 if (error == -ENOSPC) {
5057 * Reset the cursor, don't trust it after any
5060 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5063 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5065 * Update the btree record back
5066 * to the original value.
5068 error = xfs_bmbt_update(cur, &old);
5072 * Reset the extent record back
5073 * to the original value.
5075 xfs_iext_update_extent(ip, state, icur, &old);
5080 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5082 flags |= xfs_ilog_fext(whichfork);
5083 XFS_IFORK_NEXT_SET(ip, whichfork,
5084 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5085 xfs_iext_next(ifp, icur);
5086 xfs_iext_insert(ip, icur, &new, state);
5090 /* remove reverse mapping */
5091 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del);
5096 * If we need to, add to list of extents to delete.
5098 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
5099 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5100 error = xfs_refcount_decrease_extent(mp, dfops, del);
5104 xfs_bmap_add_free(mp, dfops, del->br_startblock,
5105 del->br_blockcount, NULL);
5109 * Adjust inode # blocks in the file.
5112 ip->i_d.di_nblocks -= nblks;
5114 * Adjust quota data.
5116 if (qfield && !(bflags & XFS_BMAPI_REMAP))
5117 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5125 * Unmap (remove) blocks from a file.
5126 * If nexts is nonzero then the number of extents to remove is limited to
5127 * that value. If not all extents in the block range can be removed then
5132 xfs_trans_t *tp, /* transaction pointer */
5133 struct xfs_inode *ip, /* incore inode */
5134 xfs_fileoff_t start, /* first file offset deleted */
5135 xfs_filblks_t *rlen, /* i/o: amount remaining */
5136 int flags, /* misc flags */
5137 xfs_extnum_t nexts, /* number of extents max */
5138 xfs_fsblock_t *firstblock, /* first allocated block
5139 controls a.g. for allocs */
5140 struct xfs_defer_ops *dfops) /* i/o: deferred updates */
5142 xfs_btree_cur_t *cur; /* bmap btree cursor */
5143 xfs_bmbt_irec_t del; /* extent being deleted */
5144 int error; /* error return value */
5145 xfs_extnum_t extno; /* extent number in list */
5146 xfs_bmbt_irec_t got; /* current extent record */
5147 xfs_ifork_t *ifp; /* inode fork pointer */
5148 int isrt; /* freeing in rt area */
5149 int logflags; /* transaction logging flags */
5150 xfs_extlen_t mod; /* rt extent offset */
5151 xfs_mount_t *mp; /* mount structure */
5152 int tmp_logflags; /* partial logging flags */
5153 int wasdel; /* was a delayed alloc extent */
5154 int whichfork; /* data or attribute fork */
5156 xfs_filblks_t len = *rlen; /* length to unmap in file */
5157 xfs_fileoff_t max_len;
5158 xfs_agnumber_t prev_agno = NULLAGNUMBER, agno;
5160 struct xfs_iext_cursor icur;
5163 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
5165 whichfork = xfs_bmapi_whichfork(flags);
5166 ASSERT(whichfork != XFS_COW_FORK);
5167 ifp = XFS_IFORK_PTR(ip, whichfork);
5169 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5170 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5171 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5173 return -EFSCORRUPTED;
5176 if (XFS_FORCED_SHUTDOWN(mp))
5179 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5184 * Guesstimate how many blocks we can unmap without running the risk of
5185 * blowing out the transaction with a mix of EFIs and reflink
5188 if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
5189 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
5193 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5194 (error = xfs_iread_extents(tp, ip, whichfork)))
5196 if (xfs_iext_count(ifp) == 0) {
5200 XFS_STATS_INC(mp, xs_blk_unmap);
5201 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5204 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
5211 if (ifp->if_flags & XFS_IFBROOT) {
5212 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5213 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5214 cur->bc_private.b.firstblock = *firstblock;
5215 cur->bc_private.b.dfops = dfops;
5216 cur->bc_private.b.flags = 0;
5222 * Synchronize by locking the bitmap inode.
5224 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
5225 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5226 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5227 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
5231 while (end != (xfs_fileoff_t)-1 && end >= start &&
5232 (nexts == 0 || extno < nexts) && max_len > 0) {
5234 * Is the found extent after a hole in which end lives?
5235 * Just back up to the previous extent, if so.
5237 if (got.br_startoff > end &&
5238 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5243 * Is the last block of this extent before the range
5244 * we're supposed to delete? If so, we're done.
5246 end = XFS_FILEOFF_MIN(end,
5247 got.br_startoff + got.br_blockcount - 1);
5251 * Then deal with the (possibly delayed) allocated space
5255 wasdel = isnullstartblock(del.br_startblock);
5258 * Make sure we don't touch multiple AGF headers out of order
5259 * in a single transaction, as that could cause AB-BA deadlocks.
5262 agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
5263 if (prev_agno != NULLAGNUMBER && prev_agno > agno)
5267 if (got.br_startoff < start) {
5268 del.br_startoff = start;
5269 del.br_blockcount -= start - got.br_startoff;
5271 del.br_startblock += start - got.br_startoff;
5273 if (del.br_startoff + del.br_blockcount > end + 1)
5274 del.br_blockcount = end + 1 - del.br_startoff;
5276 /* How much can we safely unmap? */
5277 if (max_len < del.br_blockcount) {
5278 del.br_startoff += del.br_blockcount - max_len;
5280 del.br_startblock += del.br_blockcount - max_len;
5281 del.br_blockcount = max_len;
5284 sum = del.br_startblock + del.br_blockcount;
5286 (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
5288 * Realtime extent not lined up at the end.
5289 * The extent could have been split into written
5290 * and unwritten pieces, or we could just be
5291 * unmapping part of it. But we can't really
5292 * get rid of part of a realtime extent.
5294 if (del.br_state == XFS_EXT_UNWRITTEN ||
5295 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5297 * This piece is unwritten, or we're not
5298 * using unwritten extents. Skip over it.
5301 end -= mod > del.br_blockcount ?
5302 del.br_blockcount : mod;
5303 if (end < got.br_startoff &&
5304 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5311 * It's written, turn it unwritten.
5312 * This is better than zeroing it.
5314 ASSERT(del.br_state == XFS_EXT_NORM);
5315 ASSERT(tp->t_blk_res > 0);
5317 * If this spans a realtime extent boundary,
5318 * chop it back to the start of the one we end at.
5320 if (del.br_blockcount > mod) {
5321 del.br_startoff += del.br_blockcount - mod;
5322 del.br_startblock += del.br_blockcount - mod;
5323 del.br_blockcount = mod;
5325 del.br_state = XFS_EXT_UNWRITTEN;
5326 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5327 whichfork, &icur, &cur, &del,
5328 firstblock, dfops, &logflags);
5333 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
5335 * Realtime extent is lined up at the end but not
5336 * at the front. We'll get rid of full extents if
5339 mod = mp->m_sb.sb_rextsize - mod;
5340 if (del.br_blockcount > mod) {
5341 del.br_blockcount -= mod;
5342 del.br_startoff += mod;
5343 del.br_startblock += mod;
5344 } else if ((del.br_startoff == start &&
5345 (del.br_state == XFS_EXT_UNWRITTEN ||
5346 tp->t_blk_res == 0)) ||
5347 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5349 * Can't make it unwritten. There isn't
5350 * a full extent here so just skip it.
5352 ASSERT(end >= del.br_blockcount);
5353 end -= del.br_blockcount;
5354 if (got.br_startoff > end &&
5355 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5360 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5361 struct xfs_bmbt_irec prev;
5364 * This one is already unwritten.
5365 * It must have a written left neighbor.
5366 * Unwrite the killed part of that one and
5369 if (!xfs_iext_prev_extent(ifp, &icur, &prev))
5371 ASSERT(prev.br_state == XFS_EXT_NORM);
5372 ASSERT(!isnullstartblock(prev.br_startblock));
5373 ASSERT(del.br_startblock ==
5374 prev.br_startblock + prev.br_blockcount);
5375 if (prev.br_startoff < start) {
5376 mod = start - prev.br_startoff;
5377 prev.br_blockcount -= mod;
5378 prev.br_startblock += mod;
5379 prev.br_startoff = start;
5381 prev.br_state = XFS_EXT_UNWRITTEN;
5382 error = xfs_bmap_add_extent_unwritten_real(tp,
5383 ip, whichfork, &icur, &cur,
5384 &prev, firstblock, dfops,
5390 ASSERT(del.br_state == XFS_EXT_NORM);
5391 del.br_state = XFS_EXT_UNWRITTEN;
5392 error = xfs_bmap_add_extent_unwritten_real(tp,
5393 ip, whichfork, &icur, &cur,
5394 &del, firstblock, dfops,
5403 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
5406 error = xfs_bmap_del_extent_real(ip, tp, &icur, dfops,
5407 cur, &del, &tmp_logflags, whichfork,
5409 logflags |= tmp_logflags;
5415 max_len -= del.br_blockcount;
5416 end = del.br_startoff - 1;
5419 * If not done go on to the next (previous) record.
5421 if (end != (xfs_fileoff_t)-1 && end >= start) {
5422 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5423 (got.br_startoff > end &&
5424 !xfs_iext_prev_extent(ifp, &icur, &got))) {
5431 if (done || end == (xfs_fileoff_t)-1 || end < start)
5434 *rlen = end - start + 1;
5437 * Convert to a btree if necessary.
5439 if (xfs_bmap_needs_btree(ip, whichfork)) {
5440 ASSERT(cur == NULL);
5441 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops,
5442 &cur, 0, &tmp_logflags, whichfork);
5443 logflags |= tmp_logflags;
5448 * transform from btree to extents, give it cur
5450 else if (xfs_bmap_wants_extents(ip, whichfork)) {
5451 ASSERT(cur != NULL);
5452 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5454 logflags |= tmp_logflags;
5459 * transform from extents to local?
5464 * Log everything. Do this after conversion, there's no point in
5465 * logging the extent records if we've converted to btree format.
5467 if ((logflags & xfs_ilog_fext(whichfork)) &&
5468 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5469 logflags &= ~xfs_ilog_fext(whichfork);
5470 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5471 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5472 logflags &= ~xfs_ilog_fbroot(whichfork);
5474 * Log inode even in the error case, if the transaction
5475 * is dirty we'll need to shut down the filesystem.
5478 xfs_trans_log_inode(tp, ip, logflags);
5481 *firstblock = cur->bc_private.b.firstblock;
5482 cur->bc_private.b.allocated = 0;
5484 xfs_btree_del_cursor(cur,
5485 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5490 /* Unmap a range of a file. */
5494 struct xfs_inode *ip,
5499 xfs_fsblock_t *firstblock,
5500 struct xfs_defer_ops *dfops,
5505 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts, firstblock,
5512 * Determine whether an extent shift can be accomplished by a merge with the
5513 * extent that precedes the target hole of the shift.
5517 struct xfs_bmbt_irec *left, /* preceding extent */
5518 struct xfs_bmbt_irec *got, /* current extent to shift */
5519 xfs_fileoff_t shift) /* shift fsb */
5521 xfs_fileoff_t startoff;
5523 startoff = got->br_startoff - shift;
5526 * The extent, once shifted, must be adjacent in-file and on-disk with
5527 * the preceding extent.
5529 if ((left->br_startoff + left->br_blockcount != startoff) ||
5530 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5531 (left->br_state != got->br_state) ||
5532 (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
5539 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5540 * hole in the file. If an extent shift would result in the extent being fully
5541 * adjacent to the extent that currently precedes the hole, we can merge with
5542 * the preceding extent rather than do the shift.
5544 * This function assumes the caller has verified a shift-by-merge is possible
5545 * with the provided extents via xfs_bmse_can_merge().
5549 struct xfs_inode *ip,
5551 xfs_fileoff_t shift, /* shift fsb */
5552 struct xfs_iext_cursor *icur,
5553 struct xfs_bmbt_irec *got, /* extent to shift */
5554 struct xfs_bmbt_irec *left, /* preceding extent */
5555 struct xfs_btree_cur *cur,
5556 int *logflags, /* output */
5557 struct xfs_defer_ops *dfops)
5559 struct xfs_bmbt_irec new;
5560 xfs_filblks_t blockcount;
5562 struct xfs_mount *mp = ip->i_mount;
5564 blockcount = left->br_blockcount + got->br_blockcount;
5566 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5567 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5568 ASSERT(xfs_bmse_can_merge(left, got, shift));
5571 new.br_blockcount = blockcount;
5574 * Update the on-disk extent count, the btree if necessary and log the
5577 XFS_IFORK_NEXT_SET(ip, whichfork,
5578 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5579 *logflags |= XFS_ILOG_CORE;
5581 *logflags |= XFS_ILOG_DEXT;
5585 /* lookup and remove the extent to merge */
5586 error = xfs_bmbt_lookup_eq(cur, got, &i);
5589 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5591 error = xfs_btree_delete(cur, &i);
5594 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5596 /* lookup and update size of the previous extent */
5597 error = xfs_bmbt_lookup_eq(cur, left, &i);
5600 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5602 error = xfs_bmbt_update(cur, &new);
5607 xfs_iext_remove(ip, icur, 0);
5608 xfs_iext_prev(XFS_IFORK_PTR(ip, whichfork), icur);
5609 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5612 /* update reverse mapping. rmap functions merge the rmaps for us */
5613 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, got);
5616 memcpy(&new, got, sizeof(new));
5617 new.br_startoff = left->br_startoff + left->br_blockcount;
5618 return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &new);
5622 xfs_bmap_shift_update_extent(
5623 struct xfs_inode *ip,
5625 struct xfs_iext_cursor *icur,
5626 struct xfs_bmbt_irec *got,
5627 struct xfs_btree_cur *cur,
5629 struct xfs_defer_ops *dfops,
5630 xfs_fileoff_t startoff)
5632 struct xfs_mount *mp = ip->i_mount;
5633 struct xfs_bmbt_irec prev = *got;
5636 *logflags |= XFS_ILOG_CORE;
5638 got->br_startoff = startoff;
5641 error = xfs_bmbt_lookup_eq(cur, &prev, &i);
5644 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5646 error = xfs_bmbt_update(cur, got);
5650 *logflags |= XFS_ILOG_DEXT;
5653 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5656 /* update reverse mapping */
5657 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, &prev);
5660 return xfs_rmap_map_extent(mp, dfops, ip, whichfork, got);
5664 xfs_bmap_collapse_extents(
5665 struct xfs_trans *tp,
5666 struct xfs_inode *ip,
5667 xfs_fileoff_t *next_fsb,
5668 xfs_fileoff_t offset_shift_fsb,
5670 xfs_fsblock_t *firstblock,
5671 struct xfs_defer_ops *dfops)
5673 int whichfork = XFS_DATA_FORK;
5674 struct xfs_mount *mp = ip->i_mount;
5675 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5676 struct xfs_btree_cur *cur = NULL;
5677 struct xfs_bmbt_irec got, prev;
5678 struct xfs_iext_cursor icur;
5679 xfs_fileoff_t new_startoff;
5683 if (unlikely(XFS_TEST_ERROR(
5684 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5685 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5686 mp, XFS_ERRTAG_BMAPIFORMAT))) {
5687 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
5688 return -EFSCORRUPTED;
5691 if (XFS_FORCED_SHUTDOWN(mp))
5694 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5696 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5697 error = xfs_iread_extents(tp, ip, whichfork);
5702 if (ifp->if_flags & XFS_IFBROOT) {
5703 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5704 cur->bc_private.b.firstblock = *firstblock;
5705 cur->bc_private.b.dfops = dfops;
5706 cur->bc_private.b.flags = 0;
5709 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5713 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
5716 new_startoff = got.br_startoff - offset_shift_fsb;
5717 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
5718 if (new_startoff < prev.br_startoff + prev.br_blockcount) {
5723 if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
5724 error = xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
5725 &icur, &got, &prev, cur, &logflags,
5732 if (got.br_startoff < offset_shift_fsb) {
5738 error = xfs_bmap_shift_update_extent(ip, whichfork, &icur, &got, cur,
5739 &logflags, dfops, new_startoff);
5744 if (!xfs_iext_next_extent(ifp, &icur, &got)) {
5749 *next_fsb = got.br_startoff;
5752 xfs_btree_del_cursor(cur,
5753 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5755 xfs_trans_log_inode(tp, ip, logflags);
5760 xfs_bmap_insert_extents(
5761 struct xfs_trans *tp,
5762 struct xfs_inode *ip,
5763 xfs_fileoff_t *next_fsb,
5764 xfs_fileoff_t offset_shift_fsb,
5766 xfs_fileoff_t stop_fsb,
5767 xfs_fsblock_t *firstblock,
5768 struct xfs_defer_ops *dfops)
5770 int whichfork = XFS_DATA_FORK;
5771 struct xfs_mount *mp = ip->i_mount;
5772 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5773 struct xfs_btree_cur *cur = NULL;
5774 struct xfs_bmbt_irec got, next;
5775 struct xfs_iext_cursor icur;
5776 xfs_fileoff_t new_startoff;
5780 if (unlikely(XFS_TEST_ERROR(
5781 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5782 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5783 mp, XFS_ERRTAG_BMAPIFORMAT))) {
5784 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
5785 return -EFSCORRUPTED;
5788 if (XFS_FORCED_SHUTDOWN(mp))
5791 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5793 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5794 error = xfs_iread_extents(tp, ip, whichfork);
5799 if (ifp->if_flags & XFS_IFBROOT) {
5800 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5801 cur->bc_private.b.firstblock = *firstblock;
5802 cur->bc_private.b.dfops = dfops;
5803 cur->bc_private.b.flags = 0;
5806 if (*next_fsb == NULLFSBLOCK) {
5807 xfs_iext_last(ifp, &icur);
5808 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5809 stop_fsb > got.br_startoff) {
5814 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5819 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
5822 if (stop_fsb >= got.br_startoff + got.br_blockcount) {
5827 new_startoff = got.br_startoff + offset_shift_fsb;
5828 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
5829 if (new_startoff + got.br_blockcount > next.br_startoff) {
5835 * Unlike a left shift (which involves a hole punch), a right
5836 * shift does not modify extent neighbors in any way. We should
5837 * never find mergeable extents in this scenario. Check anyways
5838 * and warn if we encounter two extents that could be one.
5840 if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb))
5844 error = xfs_bmap_shift_update_extent(ip, whichfork, &icur, &got, cur,
5845 &logflags, dfops, new_startoff);
5849 if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
5850 stop_fsb >= got.br_startoff + got.br_blockcount) {
5855 *next_fsb = got.br_startoff;
5858 xfs_btree_del_cursor(cur,
5859 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5861 xfs_trans_log_inode(tp, ip, logflags);
5866 * Splits an extent into two extents at split_fsb block such that it is the
5867 * first block of the current_ext. @ext is a target extent to be split.
5868 * @split_fsb is a block where the extents is split. If split_fsb lies in a
5869 * hole or the first block of extents, just return 0.
5872 xfs_bmap_split_extent_at(
5873 struct xfs_trans *tp,
5874 struct xfs_inode *ip,
5875 xfs_fileoff_t split_fsb,
5876 xfs_fsblock_t *firstfsb,
5877 struct xfs_defer_ops *dfops)
5879 int whichfork = XFS_DATA_FORK;
5880 struct xfs_btree_cur *cur = NULL;
5881 struct xfs_bmbt_irec got;
5882 struct xfs_bmbt_irec new; /* split extent */
5883 struct xfs_mount *mp = ip->i_mount;
5884 struct xfs_ifork *ifp;
5885 xfs_fsblock_t gotblkcnt; /* new block count for got */
5886 struct xfs_iext_cursor icur;
5891 if (unlikely(XFS_TEST_ERROR(
5892 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5893 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5894 mp, XFS_ERRTAG_BMAPIFORMAT))) {
5895 XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
5896 XFS_ERRLEVEL_LOW, mp);
5897 return -EFSCORRUPTED;
5900 if (XFS_FORCED_SHUTDOWN(mp))
5903 ifp = XFS_IFORK_PTR(ip, whichfork);
5904 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5905 /* Read in all the extents */
5906 error = xfs_iread_extents(tp, ip, whichfork);
5912 * If there are not extents, or split_fsb lies in a hole we are done.
5914 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
5915 got.br_startoff >= split_fsb)
5918 gotblkcnt = split_fsb - got.br_startoff;
5919 new.br_startoff = split_fsb;
5920 new.br_startblock = got.br_startblock + gotblkcnt;
5921 new.br_blockcount = got.br_blockcount - gotblkcnt;
5922 new.br_state = got.br_state;
5924 if (ifp->if_flags & XFS_IFBROOT) {
5925 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5926 cur->bc_private.b.firstblock = *firstfsb;
5927 cur->bc_private.b.dfops = dfops;
5928 cur->bc_private.b.flags = 0;
5929 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5932 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
5935 got.br_blockcount = gotblkcnt;
5936 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
5939 logflags = XFS_ILOG_CORE;
5941 error = xfs_bmbt_update(cur, &got);
5945 logflags |= XFS_ILOG_DEXT;
5947 /* Add new extent */
5948 xfs_iext_next(ifp, &icur);
5949 xfs_iext_insert(ip, &icur, &new, 0);
5950 XFS_IFORK_NEXT_SET(ip, whichfork,
5951 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5954 error = xfs_bmbt_lookup_eq(cur, &new, &i);
5957 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
5958 error = xfs_btree_insert(cur, &i);
5961 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
5965 * Convert to a btree if necessary.
5967 if (xfs_bmap_needs_btree(ip, whichfork)) {
5968 int tmp_logflags; /* partial log flag return val */
5970 ASSERT(cur == NULL);
5971 error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, dfops,
5972 &cur, 0, &tmp_logflags, whichfork);
5973 logflags |= tmp_logflags;
5978 cur->bc_private.b.allocated = 0;
5979 xfs_btree_del_cursor(cur,
5980 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5984 xfs_trans_log_inode(tp, ip, logflags);
5989 xfs_bmap_split_extent(
5990 struct xfs_inode *ip,
5991 xfs_fileoff_t split_fsb)
5993 struct xfs_mount *mp = ip->i_mount;
5994 struct xfs_trans *tp;
5995 struct xfs_defer_ops dfops;
5996 xfs_fsblock_t firstfsb;
5999 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
6000 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
6004 xfs_ilock(ip, XFS_ILOCK_EXCL);
6005 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
6007 xfs_defer_init(&dfops, &firstfsb);
6009 error = xfs_bmap_split_extent_at(tp, ip, split_fsb,
6014 error = xfs_defer_finish(&tp, &dfops);
6018 return xfs_trans_commit(tp);
6021 xfs_defer_cancel(&dfops);
6022 xfs_trans_cancel(tp);
6026 /* Deferred mapping is only for real extents in the data fork. */
6028 xfs_bmap_is_update_needed(
6029 struct xfs_bmbt_irec *bmap)
6031 return bmap->br_startblock != HOLESTARTBLOCK &&
6032 bmap->br_startblock != DELAYSTARTBLOCK;
6035 /* Record a bmap intent. */
6038 struct xfs_mount *mp,
6039 struct xfs_defer_ops *dfops,
6040 enum xfs_bmap_intent_type type,
6041 struct xfs_inode *ip,
6043 struct xfs_bmbt_irec *bmap)
6046 struct xfs_bmap_intent *bi;
6048 trace_xfs_bmap_defer(mp,
6049 XFS_FSB_TO_AGNO(mp, bmap->br_startblock),
6051 XFS_FSB_TO_AGBNO(mp, bmap->br_startblock),
6052 ip->i_ino, whichfork,
6054 bmap->br_blockcount,
6057 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS);
6058 INIT_LIST_HEAD(&bi->bi_list);
6061 bi->bi_whichfork = whichfork;
6062 bi->bi_bmap = *bmap;
6064 error = xfs_defer_ijoin(dfops, bi->bi_owner);
6070 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
6074 /* Map an extent into a file. */
6076 xfs_bmap_map_extent(
6077 struct xfs_mount *mp,
6078 struct xfs_defer_ops *dfops,
6079 struct xfs_inode *ip,
6080 struct xfs_bmbt_irec *PREV)
6082 if (!xfs_bmap_is_update_needed(PREV))
6085 return __xfs_bmap_add(mp, dfops, XFS_BMAP_MAP, ip,
6086 XFS_DATA_FORK, PREV);
6089 /* Unmap an extent out of a file. */
6091 xfs_bmap_unmap_extent(
6092 struct xfs_mount *mp,
6093 struct xfs_defer_ops *dfops,
6094 struct xfs_inode *ip,
6095 struct xfs_bmbt_irec *PREV)
6097 if (!xfs_bmap_is_update_needed(PREV))
6100 return __xfs_bmap_add(mp, dfops, XFS_BMAP_UNMAP, ip,
6101 XFS_DATA_FORK, PREV);
6105 * Process one of the deferred bmap operations. We pass back the
6106 * btree cursor to maintain our lock on the bmapbt between calls.
6109 xfs_bmap_finish_one(
6110 struct xfs_trans *tp,
6111 struct xfs_defer_ops *dfops,
6112 struct xfs_inode *ip,
6113 enum xfs_bmap_intent_type type,
6115 xfs_fileoff_t startoff,
6116 xfs_fsblock_t startblock,
6117 xfs_filblks_t *blockcount,
6120 xfs_fsblock_t firstfsb;
6124 * firstfsb is tied to the transaction lifetime and is used to
6125 * ensure correct AG locking order and schedule work item
6126 * continuations. XFS_BUI_MAX_FAST_EXTENTS (== 1) restricts us
6127 * to only making one bmap call per transaction, so it should
6128 * be safe to have it as a local variable here.
6130 firstfsb = NULLFSBLOCK;
6132 trace_xfs_bmap_deferred(tp->t_mountp,
6133 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
6134 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
6135 ip->i_ino, whichfork, startoff, *blockcount, state);
6137 if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK))
6138 return -EFSCORRUPTED;
6140 if (XFS_TEST_ERROR(false, tp->t_mountp,
6141 XFS_ERRTAG_BMAP_FINISH_ONE))
6146 error = xfs_bmapi_remap(tp, ip, startoff, *blockcount,
6150 case XFS_BMAP_UNMAP:
6151 error = __xfs_bunmapi(tp, ip, startoff, blockcount,
6152 XFS_BMAPI_REMAP, 1, &firstfsb, dfops);
6156 error = -EFSCORRUPTED;
6162 /* Check that an inode's extent does not have invalid flags or bad ranges. */
6164 xfs_bmap_validate_extent(
6165 struct xfs_inode *ip,
6167 struct xfs_bmbt_irec *irec)
6169 struct xfs_mount *mp = ip->i_mount;
6170 xfs_fsblock_t endfsb;
6173 isrt = XFS_IS_REALTIME_INODE(ip);
6174 endfsb = irec->br_startblock + irec->br_blockcount - 1;
6176 if (!xfs_verify_rtbno(mp, irec->br_startblock))
6177 return __this_address;
6178 if (!xfs_verify_rtbno(mp, endfsb))
6179 return __this_address;
6181 if (!xfs_verify_fsbno(mp, irec->br_startblock))
6182 return __this_address;
6183 if (!xfs_verify_fsbno(mp, endfsb))
6184 return __this_address;
6185 if (XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
6186 XFS_FSB_TO_AGNO(mp, endfsb))
6187 return __this_address;
6189 if (irec->br_state != XFS_EXT_NORM) {
6190 if (whichfork != XFS_DATA_FORK)
6191 return __this_address;
6192 if (!xfs_sb_version_hasextflgbit(&mp->m_sb))
6193 return __this_address;