2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_da_btree.h"
30 #include "xfs_inode.h"
31 #include "xfs_btree.h"
32 #include "xfs_trans.h"
33 #include "xfs_inode_item.h"
34 #include "xfs_extfree_item.h"
35 #include "xfs_alloc.h"
37 #include "xfs_bmap_util.h"
38 #include "xfs_bmap_btree.h"
39 #include "xfs_rtalloc.h"
40 #include "xfs_error.h"
41 #include "xfs_quota.h"
42 #include "xfs_trans_space.h"
43 #include "xfs_buf_item.h"
44 #include "xfs_trace.h"
45 #include "xfs_symlink.h"
46 #include "xfs_attr_leaf.h"
47 #include "xfs_filestream.h"
50 kmem_zone_t *xfs_bmap_free_item_zone;
53 * Miscellaneous helper functions
57 * Compute and fill in the value of the maximum depth of a bmap btree
58 * in this filesystem. Done once, during mount.
61 xfs_bmap_compute_maxlevels(
62 xfs_mount_t *mp, /* file system mount structure */
63 int whichfork) /* data or attr fork */
65 int level; /* btree level */
66 uint maxblocks; /* max blocks at this level */
67 uint maxleafents; /* max leaf entries possible */
68 int maxrootrecs; /* max records in root block */
69 int minleafrecs; /* min records in leaf block */
70 int minnoderecs; /* min records in node block */
71 int sz; /* root block size */
74 * The maximum number of extents in a file, hence the maximum
75 * number of leaf entries, is controlled by the type of di_nextents
76 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
77 * (a signed 16-bit number, xfs_aextnum_t).
79 * Note that we can no longer assume that if we are in ATTR1 that
80 * the fork offset of all the inodes will be
81 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
82 * with ATTR2 and then mounted back with ATTR1, keeping the
83 * di_forkoff's fixed but probably at various positions. Therefore,
84 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
85 * of a minimum size available.
87 if (whichfork == XFS_DATA_FORK) {
88 maxleafents = MAXEXTNUM;
89 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
91 maxleafents = MAXAEXTNUM;
92 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
94 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
95 minleafrecs = mp->m_bmap_dmnr[0];
96 minnoderecs = mp->m_bmap_dmnr[1];
97 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
98 for (level = 1; maxblocks > 1; level++) {
99 if (maxblocks <= maxrootrecs)
102 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
104 mp->m_bm_maxlevels[whichfork] = level;
107 STATIC int /* error */
109 struct xfs_btree_cur *cur,
113 int *stat) /* success/failure */
115 cur->bc_rec.b.br_startoff = off;
116 cur->bc_rec.b.br_startblock = bno;
117 cur->bc_rec.b.br_blockcount = len;
118 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
121 STATIC int /* error */
123 struct xfs_btree_cur *cur,
127 int *stat) /* success/failure */
129 cur->bc_rec.b.br_startoff = off;
130 cur->bc_rec.b.br_startblock = bno;
131 cur->bc_rec.b.br_blockcount = len;
132 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
136 * Check if the inode needs to be converted to btree format.
138 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
140 return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
141 XFS_IFORK_NEXTENTS(ip, whichfork) >
142 XFS_IFORK_MAXEXT(ip, whichfork);
146 * Check if the inode should be converted to extent format.
148 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
150 return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
151 XFS_IFORK_NEXTENTS(ip, whichfork) <=
152 XFS_IFORK_MAXEXT(ip, whichfork);
156 * Update the record referred to by cur to the value given
157 * by [off, bno, len, state].
158 * This either works (return 0) or gets an EFSCORRUPTED error.
162 struct xfs_btree_cur *cur,
168 union xfs_btree_rec rec;
170 xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
171 return xfs_btree_update(cur, &rec);
175 * Compute the worst-case number of indirect blocks that will be used
176 * for ip's delayed extent of length "len".
179 xfs_bmap_worst_indlen(
180 xfs_inode_t *ip, /* incore inode pointer */
181 xfs_filblks_t len) /* delayed extent length */
183 int level; /* btree level number */
184 int maxrecs; /* maximum record count at this level */
185 xfs_mount_t *mp; /* mount structure */
186 xfs_filblks_t rval; /* return value */
189 maxrecs = mp->m_bmap_dmxr[0];
190 for (level = 0, rval = 0;
191 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
194 do_div(len, maxrecs);
197 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
200 maxrecs = mp->m_bmap_dmxr[1];
206 * Calculate the default attribute fork offset for newly created inodes.
209 xfs_default_attroffset(
210 struct xfs_inode *ip)
212 struct xfs_mount *mp = ip->i_mount;
215 if (mp->m_sb.sb_inodesize == 256) {
216 offset = XFS_LITINO(mp, ip->i_d.di_version) -
217 XFS_BMDR_SPACE_CALC(MINABTPTRS);
219 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
222 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
227 * Helper routine to reset inode di_forkoff field when switching
228 * attribute fork from local to extent format - we reset it where
229 * possible to make space available for inline data fork extents.
232 xfs_bmap_forkoff_reset(
236 if (whichfork == XFS_ATTR_FORK &&
237 ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
238 ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
239 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
240 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
242 if (dfl_forkoff > ip->i_d.di_forkoff)
243 ip->i_d.di_forkoff = dfl_forkoff;
248 STATIC struct xfs_buf *
250 struct xfs_btree_cur *cur,
253 struct xfs_log_item_desc *lidp;
259 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
260 if (!cur->bc_bufs[i])
262 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
263 return cur->bc_bufs[i];
266 /* Chase down all the log items to see if the bp is there */
267 list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) {
268 struct xfs_buf_log_item *bip;
269 bip = (struct xfs_buf_log_item *)lidp->lid_item;
270 if (bip->bli_item.li_type == XFS_LI_BUF &&
271 XFS_BUF_ADDR(bip->bli_buf) == bno)
280 struct xfs_btree_block *block,
286 __be64 *pp, *thispa; /* pointer to block address */
287 xfs_bmbt_key_t *prevp, *keyp;
289 ASSERT(be16_to_cpu(block->bb_level) > 0);
292 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
293 dmxr = mp->m_bmap_dmxr[0];
294 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
297 ASSERT(be64_to_cpu(prevp->br_startoff) <
298 be64_to_cpu(keyp->br_startoff));
303 * Compare the block numbers to see if there are dups.
306 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
308 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
310 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
312 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
314 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
315 if (*thispa == *pp) {
316 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
318 (unsigned long long)be64_to_cpu(*thispa));
319 panic("%s: ptrs are equal in node\n",
327 * Check that the extents for the inode ip are in the right order in all
328 * btree leaves. THis becomes prohibitively expensive for large extent count
329 * files, so don't bother with inodes that have more than 10,000 extents in
330 * them. The btree record ordering checks will still be done, so for such large
331 * bmapbt constructs that is going to catch most corruptions.
334 xfs_bmap_check_leaf_extents(
335 xfs_btree_cur_t *cur, /* btree cursor or null */
336 xfs_inode_t *ip, /* incore inode pointer */
337 int whichfork) /* data or attr fork */
339 struct xfs_btree_block *block; /* current btree block */
340 xfs_fsblock_t bno; /* block # of "block" */
341 xfs_buf_t *bp; /* buffer for "block" */
342 int error; /* error return value */
343 xfs_extnum_t i=0, j; /* index into the extents list */
344 xfs_ifork_t *ifp; /* fork structure */
345 int level; /* btree level, for checking */
346 xfs_mount_t *mp; /* file system mount structure */
347 __be64 *pp; /* pointer to block address */
348 xfs_bmbt_rec_t *ep; /* pointer to current extent */
349 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
350 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
353 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
357 /* skip large extent count inodes */
358 if (ip->i_d.di_nextents > 10000)
363 ifp = XFS_IFORK_PTR(ip, whichfork);
364 block = ifp->if_broot;
366 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
368 level = be16_to_cpu(block->bb_level);
370 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
371 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
372 bno = be64_to_cpu(*pp);
374 ASSERT(bno != NULLFSBLOCK);
375 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
376 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
379 * Go down the tree until leaf level is reached, following the first
380 * pointer (leftmost) at each level.
382 while (level-- > 0) {
383 /* See if buf is in cur first */
385 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
388 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
394 block = XFS_BUF_TO_BLOCK(bp);
399 * Check this block for basic sanity (increasing keys and
400 * no duplicate blocks).
403 xfs_check_block(block, mp, 0, 0);
404 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
405 bno = be64_to_cpu(*pp);
406 XFS_WANT_CORRUPTED_GOTO(mp,
407 XFS_FSB_SANITY_CHECK(mp, bno), error0);
410 xfs_trans_brelse(NULL, bp);
415 * Here with bp and block set to the leftmost leaf node in the tree.
420 * Loop over all leaf nodes checking that all extents are in the right order.
423 xfs_fsblock_t nextbno;
424 xfs_extnum_t num_recs;
427 num_recs = xfs_btree_get_numrecs(block);
430 * Read-ahead the next leaf block, if any.
433 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
436 * Check all the extents to make sure they are OK.
437 * If we had a previous block, the last entry should
438 * conform with the first entry in this one.
441 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
443 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
444 xfs_bmbt_disk_get_blockcount(&last) <=
445 xfs_bmbt_disk_get_startoff(ep));
447 for (j = 1; j < num_recs; j++) {
448 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
449 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
450 xfs_bmbt_disk_get_blockcount(ep) <=
451 xfs_bmbt_disk_get_startoff(nextp));
459 xfs_trans_brelse(NULL, bp);
463 * If we've reached the end, stop.
465 if (bno == NULLFSBLOCK)
469 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
472 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
478 block = XFS_BUF_TO_BLOCK(bp);
484 xfs_warn(mp, "%s: at error0", __func__);
486 xfs_trans_brelse(NULL, bp);
488 xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
490 panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
495 * Add bmap trace insert entries for all the contents of the extent records.
498 xfs_bmap_trace_exlist(
499 xfs_inode_t *ip, /* incore inode pointer */
500 xfs_extnum_t cnt, /* count of entries in the list */
501 int whichfork, /* data or attr fork */
502 unsigned long caller_ip)
504 xfs_extnum_t idx; /* extent record index */
505 xfs_ifork_t *ifp; /* inode fork pointer */
508 if (whichfork == XFS_ATTR_FORK)
509 state |= BMAP_ATTRFORK;
511 ifp = XFS_IFORK_PTR(ip, whichfork);
512 ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
513 for (idx = 0; idx < cnt; idx++)
514 trace_xfs_extlist(ip, idx, whichfork, caller_ip);
518 * Validate that the bmbt_irecs being returned from bmapi are valid
519 * given the caller's original parameters. Specifically check the
520 * ranges of the returned irecs to ensure that they only extend beyond
521 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
524 xfs_bmap_validate_ret(
528 xfs_bmbt_irec_t *mval,
532 int i; /* index to map values */
534 ASSERT(ret_nmap <= nmap);
536 for (i = 0; i < ret_nmap; i++) {
537 ASSERT(mval[i].br_blockcount > 0);
538 if (!(flags & XFS_BMAPI_ENTIRE)) {
539 ASSERT(mval[i].br_startoff >= bno);
540 ASSERT(mval[i].br_blockcount <= len);
541 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
544 ASSERT(mval[i].br_startoff < bno + len);
545 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
549 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
550 mval[i].br_startoff);
551 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
552 mval[i].br_startblock != HOLESTARTBLOCK);
553 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
554 mval[i].br_state == XFS_EXT_UNWRITTEN);
559 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
560 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
564 * bmap free list manipulation functions
568 * Add the extent to the list of extents to be free at transaction end.
569 * The list is maintained sorted (by block number).
573 xfs_fsblock_t bno, /* fs block number of extent */
574 xfs_filblks_t len, /* length of extent */
575 xfs_bmap_free_t *flist, /* list of extents */
576 xfs_mount_t *mp) /* mount point structure */
578 xfs_bmap_free_item_t *cur; /* current (next) element */
579 xfs_bmap_free_item_t *new; /* new element */
580 xfs_bmap_free_item_t *prev; /* previous element */
585 ASSERT(bno != NULLFSBLOCK);
587 ASSERT(len <= MAXEXTLEN);
588 ASSERT(!isnullstartblock(bno));
589 agno = XFS_FSB_TO_AGNO(mp, bno);
590 agbno = XFS_FSB_TO_AGBNO(mp, bno);
591 ASSERT(agno < mp->m_sb.sb_agcount);
592 ASSERT(agbno < mp->m_sb.sb_agblocks);
593 ASSERT(len < mp->m_sb.sb_agblocks);
594 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
596 ASSERT(xfs_bmap_free_item_zone != NULL);
597 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
598 new->xbfi_startblock = bno;
599 new->xbfi_blockcount = (xfs_extlen_t)len;
600 for (prev = NULL, cur = flist->xbf_first;
602 prev = cur, cur = cur->xbfi_next) {
603 if (cur->xbfi_startblock >= bno)
607 prev->xbfi_next = new;
609 flist->xbf_first = new;
610 new->xbfi_next = cur;
615 * Remove the entry "free" from the free item list. Prev points to the
616 * previous entry, unless "free" is the head of the list.
620 xfs_bmap_free_t *flist, /* free item list header */
621 xfs_bmap_free_item_t *prev, /* previous item on list, if any */
622 xfs_bmap_free_item_t *free) /* list item to be freed */
625 prev->xbfi_next = free->xbfi_next;
627 flist->xbf_first = free->xbfi_next;
629 kmem_zone_free(xfs_bmap_free_item_zone, free);
633 * Free up any items left in the list.
637 xfs_bmap_free_t *flist) /* list of bmap_free_items */
639 xfs_bmap_free_item_t *free; /* free list item */
640 xfs_bmap_free_item_t *next;
642 if (flist->xbf_count == 0)
644 ASSERT(flist->xbf_first != NULL);
645 for (free = flist->xbf_first; free; free = next) {
646 next = free->xbfi_next;
647 xfs_bmap_del_free(flist, NULL, free);
649 ASSERT(flist->xbf_count == 0);
653 * Inode fork format manipulation functions
657 * Transform a btree format file with only one leaf node, where the
658 * extents list will fit in the inode, into an extents format file.
659 * Since the file extents are already in-core, all we have to do is
660 * give up the space for the btree root and pitch the leaf block.
662 STATIC int /* error */
663 xfs_bmap_btree_to_extents(
664 xfs_trans_t *tp, /* transaction pointer */
665 xfs_inode_t *ip, /* incore inode pointer */
666 xfs_btree_cur_t *cur, /* btree cursor */
667 int *logflagsp, /* inode logging flags */
668 int whichfork) /* data or attr fork */
671 struct xfs_btree_block *cblock;/* child btree block */
672 xfs_fsblock_t cbno; /* child block number */
673 xfs_buf_t *cbp; /* child block's buffer */
674 int error; /* error return value */
675 xfs_ifork_t *ifp; /* inode fork data */
676 xfs_mount_t *mp; /* mount point structure */
677 __be64 *pp; /* ptr to block address */
678 struct xfs_btree_block *rblock;/* root btree block */
681 ifp = XFS_IFORK_PTR(ip, whichfork);
682 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
683 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
684 rblock = ifp->if_broot;
685 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
686 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
687 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
688 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
689 cbno = be64_to_cpu(*pp);
692 if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
695 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF,
699 cblock = XFS_BUF_TO_BLOCK(cbp);
700 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
702 xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp);
703 ip->i_d.di_nblocks--;
704 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
705 xfs_trans_binval(tp, cbp);
706 if (cur->bc_bufs[0] == cbp)
707 cur->bc_bufs[0] = NULL;
708 xfs_iroot_realloc(ip, -1, whichfork);
709 ASSERT(ifp->if_broot == NULL);
710 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
711 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
712 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
717 * Convert an extents-format file into a btree-format file.
718 * The new file will have a root block (in the inode) and a single child block.
720 STATIC int /* error */
721 xfs_bmap_extents_to_btree(
722 xfs_trans_t *tp, /* transaction pointer */
723 xfs_inode_t *ip, /* incore inode pointer */
724 xfs_fsblock_t *firstblock, /* first-block-allocated */
725 xfs_bmap_free_t *flist, /* blocks freed in xaction */
726 xfs_btree_cur_t **curp, /* cursor returned to caller */
727 int wasdel, /* converting a delayed alloc */
728 int *logflagsp, /* inode logging flags */
729 int whichfork) /* data or attr fork */
731 struct xfs_btree_block *ablock; /* allocated (child) bt block */
732 xfs_buf_t *abp; /* buffer for ablock */
733 xfs_alloc_arg_t args; /* allocation arguments */
734 xfs_bmbt_rec_t *arp; /* child record pointer */
735 struct xfs_btree_block *block; /* btree root block */
736 xfs_btree_cur_t *cur; /* bmap btree cursor */
737 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
738 int error; /* error return value */
739 xfs_extnum_t i, cnt; /* extent record index */
740 xfs_ifork_t *ifp; /* inode fork pointer */
741 xfs_bmbt_key_t *kp; /* root block key pointer */
742 xfs_mount_t *mp; /* mount structure */
743 xfs_extnum_t nextents; /* number of file extents */
744 xfs_bmbt_ptr_t *pp; /* root block address pointer */
747 ifp = XFS_IFORK_PTR(ip, whichfork);
748 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
751 * Make space in the inode incore.
753 xfs_iroot_realloc(ip, 1, whichfork);
754 ifp->if_flags |= XFS_IFBROOT;
759 block = ifp->if_broot;
760 if (xfs_sb_version_hascrc(&mp->m_sb))
761 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
762 XFS_BMAP_CRC_MAGIC, 1, 1, ip->i_ino,
763 XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS);
765 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
766 XFS_BMAP_MAGIC, 1, 1, ip->i_ino,
767 XFS_BTREE_LONG_PTRS);
770 * Need a cursor. Can't allocate until bb_level is filled in.
772 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
773 cur->bc_private.b.firstblock = *firstblock;
774 cur->bc_private.b.flist = flist;
775 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
777 * Convert to a btree with two levels, one record in root.
779 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
780 memset(&args, 0, sizeof(args));
783 args.firstblock = *firstblock;
784 if (*firstblock == NULLFSBLOCK) {
785 args.type = XFS_ALLOCTYPE_START_BNO;
786 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
787 } else if (flist->xbf_low) {
788 args.type = XFS_ALLOCTYPE_START_BNO;
789 args.fsbno = *firstblock;
791 args.type = XFS_ALLOCTYPE_NEAR_BNO;
792 args.fsbno = *firstblock;
794 args.minlen = args.maxlen = args.prod = 1;
795 args.wasdel = wasdel;
797 if ((error = xfs_alloc_vextent(&args))) {
798 xfs_iroot_realloc(ip, -1, whichfork);
799 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
803 * Allocation can't fail, the space was reserved.
805 ASSERT(args.fsbno != NULLFSBLOCK);
806 ASSERT(*firstblock == NULLFSBLOCK ||
807 args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) ||
809 args.agno > XFS_FSB_TO_AGNO(mp, *firstblock)));
810 *firstblock = cur->bc_private.b.firstblock = args.fsbno;
811 cur->bc_private.b.allocated++;
812 ip->i_d.di_nblocks++;
813 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
814 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
816 * Fill in the child block.
818 abp->b_ops = &xfs_bmbt_buf_ops;
819 ablock = XFS_BUF_TO_BLOCK(abp);
820 if (xfs_sb_version_hascrc(&mp->m_sb))
821 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
822 XFS_BMAP_CRC_MAGIC, 0, 0, ip->i_ino,
823 XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS);
825 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
826 XFS_BMAP_MAGIC, 0, 0, ip->i_ino,
827 XFS_BTREE_LONG_PTRS);
829 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
830 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
831 for (cnt = i = 0; i < nextents; i++) {
832 ep = xfs_iext_get_ext(ifp, i);
833 if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
834 arp->l0 = cpu_to_be64(ep->l0);
835 arp->l1 = cpu_to_be64(ep->l1);
839 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
840 xfs_btree_set_numrecs(ablock, cnt);
843 * Fill in the root key and pointer.
845 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
846 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
847 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
848 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
849 be16_to_cpu(block->bb_level)));
850 *pp = cpu_to_be64(args.fsbno);
853 * Do all this logging at the end so that
854 * the root is at the right level.
856 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
857 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
858 ASSERT(*curp == NULL);
860 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
865 * Convert a local file to an extents file.
866 * This code is out of bounds for data forks of regular files,
867 * since the file data needs to get logged so things will stay consistent.
868 * (The bmap-level manipulations are ok, though).
871 xfs_bmap_local_to_extents_empty(
872 struct xfs_inode *ip,
875 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
877 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
878 ASSERT(ifp->if_bytes == 0);
879 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
881 xfs_bmap_forkoff_reset(ip, whichfork);
882 ifp->if_flags &= ~XFS_IFINLINE;
883 ifp->if_flags |= XFS_IFEXTENTS;
884 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
888 STATIC int /* error */
889 xfs_bmap_local_to_extents(
890 xfs_trans_t *tp, /* transaction pointer */
891 xfs_inode_t *ip, /* incore inode pointer */
892 xfs_fsblock_t *firstblock, /* first block allocated in xaction */
893 xfs_extlen_t total, /* total blocks needed by transaction */
894 int *logflagsp, /* inode logging flags */
896 void (*init_fn)(struct xfs_trans *tp,
898 struct xfs_inode *ip,
899 struct xfs_ifork *ifp))
902 int flags; /* logging flags returned */
903 xfs_ifork_t *ifp; /* inode fork pointer */
904 xfs_alloc_arg_t args; /* allocation arguments */
905 xfs_buf_t *bp; /* buffer for extent block */
906 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
909 * We don't want to deal with the case of keeping inode data inline yet.
910 * So sending the data fork of a regular inode is invalid.
912 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
913 ifp = XFS_IFORK_PTR(ip, whichfork);
914 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
916 if (!ifp->if_bytes) {
917 xfs_bmap_local_to_extents_empty(ip, whichfork);
918 flags = XFS_ILOG_CORE;
924 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) ==
926 memset(&args, 0, sizeof(args));
928 args.mp = ip->i_mount;
929 args.firstblock = *firstblock;
931 * Allocate a block. We know we need only one, since the
932 * file currently fits in an inode.
934 if (*firstblock == NULLFSBLOCK) {
935 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
936 args.type = XFS_ALLOCTYPE_START_BNO;
938 args.fsbno = *firstblock;
939 args.type = XFS_ALLOCTYPE_NEAR_BNO;
942 args.minlen = args.maxlen = args.prod = 1;
943 error = xfs_alloc_vextent(&args);
947 /* Can't fail, the space was reserved. */
948 ASSERT(args.fsbno != NULLFSBLOCK);
949 ASSERT(args.len == 1);
950 *firstblock = args.fsbno;
951 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
954 * Initialize the block, copy the data and log the remote buffer.
956 * The callout is responsible for logging because the remote format
957 * might differ from the local format and thus we don't know how much to
958 * log here. Note that init_fn must also set the buffer log item type
961 init_fn(tp, bp, ip, ifp);
963 /* account for the change in fork size */
964 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
965 xfs_bmap_local_to_extents_empty(ip, whichfork);
966 flags |= XFS_ILOG_CORE;
968 xfs_iext_add(ifp, 0, 1);
969 ep = xfs_iext_get_ext(ifp, 0);
970 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
971 trace_xfs_bmap_post_update(ip, 0,
972 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
974 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
975 ip->i_d.di_nblocks = 1;
976 xfs_trans_mod_dquot_byino(tp, ip,
977 XFS_TRANS_DQ_BCOUNT, 1L);
978 flags |= xfs_ilog_fext(whichfork);
986 * Called from xfs_bmap_add_attrfork to handle btree format files.
988 STATIC int /* error */
989 xfs_bmap_add_attrfork_btree(
990 xfs_trans_t *tp, /* transaction pointer */
991 xfs_inode_t *ip, /* incore inode pointer */
992 xfs_fsblock_t *firstblock, /* first block allocated */
993 xfs_bmap_free_t *flist, /* blocks to free at commit */
994 int *flags) /* inode logging flags */
996 xfs_btree_cur_t *cur; /* btree cursor */
997 int error; /* error return value */
998 xfs_mount_t *mp; /* file system mount struct */
999 int stat; /* newroot status */
1002 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
1003 *flags |= XFS_ILOG_DBROOT;
1005 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
1006 cur->bc_private.b.flist = flist;
1007 cur->bc_private.b.firstblock = *firstblock;
1008 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
1010 /* must be at least one entry */
1011 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
1012 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
1015 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1018 *firstblock = cur->bc_private.b.firstblock;
1019 cur->bc_private.b.allocated = 0;
1020 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
1024 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
1029 * Called from xfs_bmap_add_attrfork to handle extents format files.
1031 STATIC int /* error */
1032 xfs_bmap_add_attrfork_extents(
1033 xfs_trans_t *tp, /* transaction pointer */
1034 xfs_inode_t *ip, /* incore inode pointer */
1035 xfs_fsblock_t *firstblock, /* first block allocated */
1036 xfs_bmap_free_t *flist, /* blocks to free at commit */
1037 int *flags) /* inode logging flags */
1039 xfs_btree_cur_t *cur; /* bmap btree cursor */
1040 int error; /* error return value */
1042 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
1045 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0,
1046 flags, XFS_DATA_FORK);
1048 cur->bc_private.b.allocated = 0;
1049 xfs_btree_del_cursor(cur,
1050 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
1056 * Called from xfs_bmap_add_attrfork to handle local format files. Each
1057 * different data fork content type needs a different callout to do the
1058 * conversion. Some are basic and only require special block initialisation
1059 * callouts for the data formating, others (directories) are so specialised they
1060 * handle everything themselves.
1062 * XXX (dgc): investigate whether directory conversion can use the generic
1063 * formatting callout. It should be possible - it's just a very complex
1066 STATIC int /* error */
1067 xfs_bmap_add_attrfork_local(
1068 xfs_trans_t *tp, /* transaction pointer */
1069 xfs_inode_t *ip, /* incore inode pointer */
1070 xfs_fsblock_t *firstblock, /* first block allocated */
1071 xfs_bmap_free_t *flist, /* blocks to free at commit */
1072 int *flags) /* inode logging flags */
1074 xfs_da_args_t dargs; /* args for dir/attr code */
1076 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
1079 if (S_ISDIR(VFS_I(ip)->i_mode)) {
1080 memset(&dargs, 0, sizeof(dargs));
1081 dargs.geo = ip->i_mount->m_dir_geo;
1083 dargs.firstblock = firstblock;
1084 dargs.flist = flist;
1085 dargs.total = dargs.geo->fsbcount;
1086 dargs.whichfork = XFS_DATA_FORK;
1088 return xfs_dir2_sf_to_block(&dargs);
1091 if (S_ISLNK(VFS_I(ip)->i_mode))
1092 return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
1093 flags, XFS_DATA_FORK,
1094 xfs_symlink_local_to_remote);
1096 /* should only be called for types that support local format data */
1098 return -EFSCORRUPTED;
1102 * Convert inode from non-attributed to attributed.
1103 * Must not be in a transaction, ip must not be locked.
1105 int /* error code */
1106 xfs_bmap_add_attrfork(
1107 xfs_inode_t *ip, /* incore inode pointer */
1108 int size, /* space new attribute needs */
1109 int rsvd) /* xact may use reserved blks */
1111 xfs_fsblock_t firstblock; /* 1st block/ag allocated */
1112 xfs_bmap_free_t flist; /* freed extent records */
1113 xfs_mount_t *mp; /* mount structure */
1114 xfs_trans_t *tp; /* transaction pointer */
1115 int blks; /* space reservation */
1116 int version = 1; /* superblock attr version */
1117 int logflags; /* logging flags */
1118 int error; /* error return value */
1120 ASSERT(XFS_IFORK_Q(ip) == 0);
1123 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1124 tp = xfs_trans_alloc(mp, XFS_TRANS_ADDAFORK);
1125 blks = XFS_ADDAFORK_SPACE_RES(mp);
1127 tp->t_flags |= XFS_TRANS_RESERVE;
1128 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_addafork, blks, 0);
1130 xfs_trans_cancel(tp);
1133 xfs_ilock(ip, XFS_ILOCK_EXCL);
1134 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
1135 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
1136 XFS_QMOPT_RES_REGBLKS);
1139 if (XFS_IFORK_Q(ip))
1141 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
1143 * For inodes coming from pre-6.2 filesystems.
1145 ASSERT(ip->i_d.di_aformat == 0);
1146 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1148 ASSERT(ip->i_d.di_anextents == 0);
1150 xfs_trans_ijoin(tp, ip, 0);
1151 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1153 switch (ip->i_d.di_format) {
1154 case XFS_DINODE_FMT_DEV:
1155 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
1157 case XFS_DINODE_FMT_UUID:
1158 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
1160 case XFS_DINODE_FMT_LOCAL:
1161 case XFS_DINODE_FMT_EXTENTS:
1162 case XFS_DINODE_FMT_BTREE:
1163 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1164 if (!ip->i_d.di_forkoff)
1165 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
1166 else if (mp->m_flags & XFS_MOUNT_ATTR2)
1175 ASSERT(ip->i_afp == NULL);
1176 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
1177 ip->i_afp->if_flags = XFS_IFEXTENTS;
1179 xfs_bmap_init(&flist, &firstblock);
1180 switch (ip->i_d.di_format) {
1181 case XFS_DINODE_FMT_LOCAL:
1182 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist,
1185 case XFS_DINODE_FMT_EXTENTS:
1186 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
1189 case XFS_DINODE_FMT_BTREE:
1190 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist,
1198 xfs_trans_log_inode(tp, ip, logflags);
1201 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1202 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
1203 bool log_sb = false;
1205 spin_lock(&mp->m_sb_lock);
1206 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
1207 xfs_sb_version_addattr(&mp->m_sb);
1210 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
1211 xfs_sb_version_addattr2(&mp->m_sb);
1214 spin_unlock(&mp->m_sb_lock);
1219 error = xfs_bmap_finish(&tp, &flist, NULL);
1222 error = xfs_trans_commit(tp);
1223 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1227 xfs_bmap_cancel(&flist);
1229 xfs_trans_cancel(tp);
1230 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1235 * Internal and external extent tree search functions.
1239 * Read in the extents to if_extents.
1240 * All inode fields are set up by caller, we just traverse the btree
1241 * and copy the records in. If the file system cannot contain unwritten
1242 * extents, the records are checked for no "state" flags.
1245 xfs_bmap_read_extents(
1246 xfs_trans_t *tp, /* transaction pointer */
1247 xfs_inode_t *ip, /* incore inode */
1248 int whichfork) /* data or attr fork */
1250 struct xfs_btree_block *block; /* current btree block */
1251 xfs_fsblock_t bno; /* block # of "block" */
1252 xfs_buf_t *bp; /* buffer for "block" */
1253 int error; /* error return value */
1254 xfs_exntfmt_t exntf; /* XFS_EXTFMT_NOSTATE, if checking */
1255 xfs_extnum_t i, j; /* index into the extents list */
1256 xfs_ifork_t *ifp; /* fork structure */
1257 int level; /* btree level, for checking */
1258 xfs_mount_t *mp; /* file system mount structure */
1259 __be64 *pp; /* pointer to block address */
1261 xfs_extnum_t room; /* number of entries there's room for */
1265 ifp = XFS_IFORK_PTR(ip, whichfork);
1266 exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
1267 XFS_EXTFMT_INODE(ip);
1268 block = ifp->if_broot;
1270 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
1272 level = be16_to_cpu(block->bb_level);
1274 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
1275 bno = be64_to_cpu(*pp);
1276 ASSERT(bno != NULLFSBLOCK);
1277 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
1278 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
1280 * Go down the tree until leaf level is reached, following the first
1281 * pointer (leftmost) at each level.
1283 while (level-- > 0) {
1284 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1285 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1288 block = XFS_BUF_TO_BLOCK(bp);
1291 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
1292 bno = be64_to_cpu(*pp);
1293 XFS_WANT_CORRUPTED_GOTO(mp,
1294 XFS_FSB_SANITY_CHECK(mp, bno), error0);
1295 xfs_trans_brelse(tp, bp);
1298 * Here with bp and block set to the leftmost leaf node in the tree.
1300 room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1303 * Loop over all leaf nodes. Copy information to the extent records.
1306 xfs_bmbt_rec_t *frp;
1307 xfs_fsblock_t nextbno;
1308 xfs_extnum_t num_recs;
1311 num_recs = xfs_btree_get_numrecs(block);
1312 if (unlikely(i + num_recs > room)) {
1313 ASSERT(i + num_recs <= room);
1314 xfs_warn(ip->i_mount,
1315 "corrupt dinode %Lu, (btree extents).",
1316 (unsigned long long) ip->i_ino);
1317 XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)",
1318 XFS_ERRLEVEL_LOW, ip->i_mount, block);
1322 * Read-ahead the next leaf block, if any.
1324 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
1325 if (nextbno != NULLFSBLOCK)
1326 xfs_btree_reada_bufl(mp, nextbno, 1,
1329 * Copy records into the extent records.
1331 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1333 for (j = 0; j < num_recs; j++, i++, frp++) {
1334 xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
1335 trp->l0 = be64_to_cpu(frp->l0);
1336 trp->l1 = be64_to_cpu(frp->l1);
1338 if (exntf == XFS_EXTFMT_NOSTATE) {
1340 * Check all attribute bmap btree records and
1341 * any "older" data bmap btree records for a
1342 * set bit in the "extent flag" position.
1344 if (unlikely(xfs_check_nostate_extents(ifp,
1345 start, num_recs))) {
1346 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
1352 xfs_trans_brelse(tp, bp);
1355 * If we've reached the end, stop.
1357 if (bno == NULLFSBLOCK)
1359 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1360 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1363 block = XFS_BUF_TO_BLOCK(bp);
1365 ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
1366 ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork));
1367 XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
1370 xfs_trans_brelse(tp, bp);
1371 return -EFSCORRUPTED;
1376 * Search the extent records for the entry containing block bno.
1377 * If bno lies in a hole, point to the next entry. If bno lies
1378 * past eof, *eofp will be set, and *prevp will contain the last
1379 * entry (null if none). Else, *lastxp will be set to the index
1380 * of the found entry; *gotp will contain the entry.
1382 STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
1383 xfs_bmap_search_multi_extents(
1384 xfs_ifork_t *ifp, /* inode fork pointer */
1385 xfs_fileoff_t bno, /* block number searched for */
1386 int *eofp, /* out: end of file found */
1387 xfs_extnum_t *lastxp, /* out: last extent index */
1388 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
1389 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
1391 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
1392 xfs_extnum_t lastx; /* last extent index */
1395 * Initialize the extent entry structure to catch access to
1396 * uninitialized br_startblock field.
1398 gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL;
1399 gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL;
1400 gotp->br_state = XFS_EXT_INVALID;
1401 gotp->br_startblock = 0xffffa5a5a5a5a5a5LL;
1402 prevp->br_startoff = NULLFILEOFF;
1404 ep = xfs_iext_bno_to_ext(ifp, bno, &lastx);
1406 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp);
1408 if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
1409 xfs_bmbt_get_all(ep, gotp);
1423 * Search the extents list for the inode, for the extent containing bno.
1424 * If bno lies in a hole, point to the next entry. If bno lies past eof,
1425 * *eofp will be set, and *prevp will contain the last entry (null if none).
1426 * Else, *lastxp will be set to the index of the found
1427 * entry; *gotp will contain the entry.
1429 STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
1430 xfs_bmap_search_extents(
1431 xfs_inode_t *ip, /* incore inode pointer */
1432 xfs_fileoff_t bno, /* block number searched for */
1433 int fork, /* data or attr fork */
1434 int *eofp, /* out: end of file found */
1435 xfs_extnum_t *lastxp, /* out: last extent index */
1436 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
1437 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
1439 xfs_ifork_t *ifp; /* inode fork pointer */
1440 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
1442 XFS_STATS_INC(ip->i_mount, xs_look_exlist);
1443 ifp = XFS_IFORK_PTR(ip, fork);
1445 ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp);
1447 if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) &&
1448 !(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) {
1449 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
1450 "Access to block zero in inode %llu "
1451 "start_block: %llx start_off: %llx "
1452 "blkcnt: %llx extent-state: %x lastx: %x",
1453 (unsigned long long)ip->i_ino,
1454 (unsigned long long)gotp->br_startblock,
1455 (unsigned long long)gotp->br_startoff,
1456 (unsigned long long)gotp->br_blockcount,
1457 gotp->br_state, *lastxp);
1458 *lastxp = NULLEXTNUM;
1466 * Returns the file-relative block number of the first unused block(s)
1467 * in the file with at least "len" logically contiguous blocks free.
1468 * This is the lowest-address hole if the file has holes, else the first block
1469 * past the end of file.
1470 * Return 0 if the file is currently local (in-inode).
1473 xfs_bmap_first_unused(
1474 xfs_trans_t *tp, /* transaction pointer */
1475 xfs_inode_t *ip, /* incore inode */
1476 xfs_extlen_t len, /* size of hole to find */
1477 xfs_fileoff_t *first_unused, /* unused block */
1478 int whichfork) /* data or attr fork */
1480 int error; /* error return value */
1481 int idx; /* extent record index */
1482 xfs_ifork_t *ifp; /* inode fork pointer */
1483 xfs_fileoff_t lastaddr; /* last block number seen */
1484 xfs_fileoff_t lowest; /* lowest useful block */
1485 xfs_fileoff_t max; /* starting useful block */
1486 xfs_fileoff_t off; /* offset for this block */
1487 xfs_extnum_t nextents; /* number of extent entries */
1489 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
1490 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
1491 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
1492 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1496 ifp = XFS_IFORK_PTR(ip, whichfork);
1497 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
1498 (error = xfs_iread_extents(tp, ip, whichfork)))
1500 lowest = *first_unused;
1501 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1502 for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
1503 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
1504 off = xfs_bmbt_get_startoff(ep);
1506 * See if the hole before this extent will work.
1508 if (off >= lowest + len && off - max >= len) {
1509 *first_unused = max;
1512 lastaddr = off + xfs_bmbt_get_blockcount(ep);
1513 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1515 *first_unused = max;
1520 * Returns the file-relative block number of the last block - 1 before
1521 * last_block (input value) in the file.
1522 * This is not based on i_size, it is based on the extent records.
1523 * Returns 0 for local files, as they do not have extent records.
1526 xfs_bmap_last_before(
1527 xfs_trans_t *tp, /* transaction pointer */
1528 xfs_inode_t *ip, /* incore inode */
1529 xfs_fileoff_t *last_block, /* last block */
1530 int whichfork) /* data or attr fork */
1532 xfs_fileoff_t bno; /* input file offset */
1533 int eof; /* hit end of file */
1534 xfs_bmbt_rec_host_t *ep; /* pointer to last extent */
1535 int error; /* error return value */
1536 xfs_bmbt_irec_t got; /* current extent value */
1537 xfs_ifork_t *ifp; /* inode fork pointer */
1538 xfs_extnum_t lastx; /* last extent used */
1539 xfs_bmbt_irec_t prev; /* previous extent value */
1541 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1542 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
1543 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
1545 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1549 ifp = XFS_IFORK_PTR(ip, whichfork);
1550 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
1551 (error = xfs_iread_extents(tp, ip, whichfork)))
1553 bno = *last_block - 1;
1554 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
1556 if (eof || xfs_bmbt_get_startoff(ep) > bno) {
1557 if (prev.br_startoff == NULLFILEOFF)
1560 *last_block = prev.br_startoff + prev.br_blockcount;
1563 * Otherwise *last_block is already the right answer.
1569 xfs_bmap_last_extent(
1570 struct xfs_trans *tp,
1571 struct xfs_inode *ip,
1573 struct xfs_bmbt_irec *rec,
1576 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1580 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1581 error = xfs_iread_extents(tp, ip, whichfork);
1586 nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
1587 if (nextents == 0) {
1592 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec);
1598 * Check the last inode extent to determine whether this allocation will result
1599 * in blocks being allocated at the end of the file. When we allocate new data
1600 * blocks at the end of the file which do not start at the previous data block,
1601 * we will try to align the new blocks at stripe unit boundaries.
1603 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1604 * at, or past the EOF.
1608 struct xfs_bmalloca *bma,
1611 struct xfs_bmbt_irec rec;
1616 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1627 * Check if we are allocation or past the last extent, or at least into
1628 * the last delayed allocated extent.
1630 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1631 (bma->offset >= rec.br_startoff &&
1632 isnullstartblock(rec.br_startblock));
1637 * Returns the file-relative block number of the first block past eof in
1638 * the file. This is not based on i_size, it is based on the extent records.
1639 * Returns 0 for local files, as they do not have extent records.
1642 xfs_bmap_last_offset(
1643 struct xfs_inode *ip,
1644 xfs_fileoff_t *last_block,
1647 struct xfs_bmbt_irec rec;
1653 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
1656 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1657 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1660 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1661 if (error || is_empty)
1664 *last_block = rec.br_startoff + rec.br_blockcount;
1669 * Returns whether the selected fork of the inode has exactly one
1670 * block or not. For the data fork we check this matches di_size,
1671 * implying the file's range is 0..bsize-1.
1673 int /* 1=>1 block, 0=>otherwise */
1675 xfs_inode_t *ip, /* incore inode */
1676 int whichfork) /* data or attr fork */
1678 xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */
1679 xfs_ifork_t *ifp; /* inode fork pointer */
1680 int rval; /* return value */
1681 xfs_bmbt_irec_t s; /* internal version of extent */
1684 if (whichfork == XFS_DATA_FORK)
1685 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
1687 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
1689 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1691 ifp = XFS_IFORK_PTR(ip, whichfork);
1692 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
1693 ep = xfs_iext_get_ext(ifp, 0);
1694 xfs_bmbt_get_all(ep, &s);
1695 rval = s.br_startoff == 0 && s.br_blockcount == 1;
1696 if (rval && whichfork == XFS_DATA_FORK)
1697 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
1702 * Extent tree manipulation functions used during allocation.
1706 * Convert a delayed allocation to a real allocation.
1708 STATIC int /* error */
1709 xfs_bmap_add_extent_delay_real(
1710 struct xfs_bmalloca *bma)
1712 struct xfs_bmbt_irec *new = &bma->got;
1713 int diff; /* temp value */
1714 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
1715 int error; /* error return value */
1716 int i; /* temp state */
1717 xfs_ifork_t *ifp; /* inode fork pointer */
1718 xfs_fileoff_t new_endoff; /* end offset of new entry */
1719 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1720 /* left is 0, right is 1, prev is 2 */
1721 int rval=0; /* return value (logging flags) */
1722 int state = 0;/* state bits, accessed thru macros */
1723 xfs_filblks_t da_new; /* new count del alloc blocks used */
1724 xfs_filblks_t da_old; /* old count del alloc blocks used */
1725 xfs_filblks_t temp=0; /* value for da_new calculations */
1726 xfs_filblks_t temp2=0;/* value for da_new calculations */
1727 int tmp_rval; /* partial logging flags */
1728 int whichfork = XFS_DATA_FORK;
1729 struct xfs_mount *mp;
1731 mp = bma->ip->i_mount;
1732 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1734 ASSERT(bma->idx >= 0);
1735 ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
1736 ASSERT(!isnullstartblock(new->br_startblock));
1738 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
1740 XFS_STATS_INC(mp, xs_add_exlist);
1747 * Set up a bunch of variables to make the tests simpler.
1749 ep = xfs_iext_get_ext(ifp, bma->idx);
1750 xfs_bmbt_get_all(ep, &PREV);
1751 new_endoff = new->br_startoff + new->br_blockcount;
1752 ASSERT(PREV.br_startoff <= new->br_startoff);
1753 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1755 da_old = startblockval(PREV.br_startblock);
1759 * Set flags determining what part of the previous delayed allocation
1760 * extent is being replaced by a real allocation.
1762 if (PREV.br_startoff == new->br_startoff)
1763 state |= BMAP_LEFT_FILLING;
1764 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1765 state |= BMAP_RIGHT_FILLING;
1768 * Check and set flags if this segment has a left neighbor.
1769 * Don't set contiguous if the combined extent would be too large.
1772 state |= BMAP_LEFT_VALID;
1773 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT);
1775 if (isnullstartblock(LEFT.br_startblock))
1776 state |= BMAP_LEFT_DELAY;
1779 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1780 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1781 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1782 LEFT.br_state == new->br_state &&
1783 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1784 state |= BMAP_LEFT_CONTIG;
1787 * Check and set flags if this segment has a right neighbor.
1788 * Don't set contiguous if the combined extent would be too large.
1789 * Also check for all-three-contiguous being too large.
1791 if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
1792 state |= BMAP_RIGHT_VALID;
1793 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
1795 if (isnullstartblock(RIGHT.br_startblock))
1796 state |= BMAP_RIGHT_DELAY;
1799 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1800 new_endoff == RIGHT.br_startoff &&
1801 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1802 new->br_state == RIGHT.br_state &&
1803 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1804 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1805 BMAP_RIGHT_FILLING)) !=
1806 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1807 BMAP_RIGHT_FILLING) ||
1808 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1810 state |= BMAP_RIGHT_CONTIG;
1814 * Switch out based on the FILLING and CONTIG state bits.
1816 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1817 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1818 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1819 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1821 * Filling in all of a previously delayed allocation extent.
1822 * The left and right neighbors are both contiguous with new.
1825 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1826 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1827 LEFT.br_blockcount + PREV.br_blockcount +
1828 RIGHT.br_blockcount);
1829 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1831 xfs_iext_remove(bma->ip, bma->idx + 1, 2, state);
1832 bma->ip->i_d.di_nextents--;
1833 if (bma->cur == NULL)
1834 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1836 rval = XFS_ILOG_CORE;
1837 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1838 RIGHT.br_startblock,
1839 RIGHT.br_blockcount, &i);
1842 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1843 error = xfs_btree_delete(bma->cur, &i);
1846 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1847 error = xfs_btree_decrement(bma->cur, 0, &i);
1850 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1851 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1853 LEFT.br_blockcount +
1854 PREV.br_blockcount +
1855 RIGHT.br_blockcount, LEFT.br_state);
1861 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1863 * Filling in all of a previously delayed allocation extent.
1864 * The left neighbor is contiguous, the right is not.
1868 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1869 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1870 LEFT.br_blockcount + PREV.br_blockcount);
1871 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1873 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1874 if (bma->cur == NULL)
1875 rval = XFS_ILOG_DEXT;
1878 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
1879 LEFT.br_startblock, LEFT.br_blockcount,
1883 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1884 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1886 LEFT.br_blockcount +
1887 PREV.br_blockcount, LEFT.br_state);
1893 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1895 * Filling in all of a previously delayed allocation extent.
1896 * The right neighbor is contiguous, the left is not.
1898 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1899 xfs_bmbt_set_startblock(ep, new->br_startblock);
1900 xfs_bmbt_set_blockcount(ep,
1901 PREV.br_blockcount + RIGHT.br_blockcount);
1902 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1904 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1905 if (bma->cur == NULL)
1906 rval = XFS_ILOG_DEXT;
1909 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1910 RIGHT.br_startblock,
1911 RIGHT.br_blockcount, &i);
1914 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1915 error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
1917 PREV.br_blockcount +
1918 RIGHT.br_blockcount, PREV.br_state);
1924 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1926 * Filling in all of a previously delayed allocation extent.
1927 * Neither the left nor right neighbors are contiguous with
1930 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1931 xfs_bmbt_set_startblock(ep, new->br_startblock);
1932 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1934 bma->ip->i_d.di_nextents++;
1935 if (bma->cur == NULL)
1936 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1938 rval = XFS_ILOG_CORE;
1939 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
1940 new->br_startblock, new->br_blockcount,
1944 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1945 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
1946 error = xfs_btree_insert(bma->cur, &i);
1949 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1953 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1955 * Filling in the first part of a previous delayed allocation.
1956 * The left neighbor is contiguous.
1958 trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
1959 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1),
1960 LEFT.br_blockcount + new->br_blockcount);
1961 xfs_bmbt_set_startoff(ep,
1962 PREV.br_startoff + new->br_blockcount);
1963 trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
1965 temp = PREV.br_blockcount - new->br_blockcount;
1966 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1967 xfs_bmbt_set_blockcount(ep, temp);
1968 if (bma->cur == NULL)
1969 rval = XFS_ILOG_DEXT;
1972 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
1973 LEFT.br_startblock, LEFT.br_blockcount,
1977 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1978 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1980 LEFT.br_blockcount +
1986 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1987 startblockval(PREV.br_startblock));
1988 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
1989 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1994 case BMAP_LEFT_FILLING:
1996 * Filling in the first part of a previous delayed allocation.
1997 * The left neighbor is not contiguous.
1999 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
2000 xfs_bmbt_set_startoff(ep, new_endoff);
2001 temp = PREV.br_blockcount - new->br_blockcount;
2002 xfs_bmbt_set_blockcount(ep, temp);
2003 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
2004 bma->ip->i_d.di_nextents++;
2005 if (bma->cur == NULL)
2006 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2008 rval = XFS_ILOG_CORE;
2009 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
2010 new->br_startblock, new->br_blockcount,
2014 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2015 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2016 error = xfs_btree_insert(bma->cur, &i);
2019 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2022 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2023 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2024 bma->firstblock, bma->flist,
2025 &bma->cur, 1, &tmp_rval, whichfork);
2030 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
2031 startblockval(PREV.br_startblock) -
2032 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
2033 ep = xfs_iext_get_ext(ifp, bma->idx + 1);
2034 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
2035 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
2038 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2040 * Filling in the last part of a previous delayed allocation.
2041 * The right neighbor is contiguous with the new allocation.
2043 temp = PREV.br_blockcount - new->br_blockcount;
2044 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
2045 xfs_bmbt_set_blockcount(ep, temp);
2046 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1),
2047 new->br_startoff, new->br_startblock,
2048 new->br_blockcount + RIGHT.br_blockcount,
2050 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
2051 if (bma->cur == NULL)
2052 rval = XFS_ILOG_DEXT;
2055 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
2056 RIGHT.br_startblock,
2057 RIGHT.br_blockcount, &i);
2060 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2061 error = xfs_bmbt_update(bma->cur, new->br_startoff,
2063 new->br_blockcount +
2064 RIGHT.br_blockcount,
2070 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
2071 startblockval(PREV.br_startblock));
2072 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
2073 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
2074 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2079 case BMAP_RIGHT_FILLING:
2081 * Filling in the last part of a previous delayed allocation.
2082 * The right neighbor is not contiguous.
2084 temp = PREV.br_blockcount - new->br_blockcount;
2085 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
2086 xfs_bmbt_set_blockcount(ep, temp);
2087 xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state);
2088 bma->ip->i_d.di_nextents++;
2089 if (bma->cur == NULL)
2090 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2092 rval = XFS_ILOG_CORE;
2093 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
2094 new->br_startblock, new->br_blockcount,
2098 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2099 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2100 error = xfs_btree_insert(bma->cur, &i);
2103 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2106 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2107 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2108 bma->firstblock, bma->flist, &bma->cur, 1,
2109 &tmp_rval, whichfork);
2114 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
2115 startblockval(PREV.br_startblock) -
2116 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
2117 ep = xfs_iext_get_ext(ifp, bma->idx);
2118 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
2119 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2126 * Filling in the middle part of a previous delayed allocation.
2127 * Contiguity is impossible here.
2128 * This case is avoided almost all the time.
2130 * We start with a delayed allocation:
2132 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
2135 * and we are allocating:
2136 * +rrrrrrrrrrrrrrrrr+
2139 * and we set it up for insertion as:
2140 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
2142 * PREV @ idx LEFT RIGHT
2143 * inserted at idx + 1
2145 temp = new->br_startoff - PREV.br_startoff;
2146 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
2147 trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_);
2148 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */
2150 RIGHT.br_state = PREV.br_state;
2151 RIGHT.br_startblock = nullstartblock(
2152 (int)xfs_bmap_worst_indlen(bma->ip, temp2));
2153 RIGHT.br_startoff = new_endoff;
2154 RIGHT.br_blockcount = temp2;
2155 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
2156 xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state);
2157 bma->ip->i_d.di_nextents++;
2158 if (bma->cur == NULL)
2159 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2161 rval = XFS_ILOG_CORE;
2162 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
2163 new->br_startblock, new->br_blockcount,
2167 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2168 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2169 error = xfs_btree_insert(bma->cur, &i);
2172 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2175 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2176 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2177 bma->firstblock, bma->flist, &bma->cur,
2178 1, &tmp_rval, whichfork);
2183 temp = xfs_bmap_worst_indlen(bma->ip, temp);
2184 temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
2185 diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
2186 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
2188 error = xfs_mod_fdblocks(bma->ip->i_mount,
2189 -((int64_t)diff), false);
2195 ep = xfs_iext_get_ext(ifp, bma->idx);
2196 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
2197 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2198 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
2199 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2),
2200 nullstartblock((int)temp2));
2201 trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
2204 da_new = temp + temp2;
2207 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2208 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2209 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2210 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2211 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2212 case BMAP_LEFT_CONTIG:
2213 case BMAP_RIGHT_CONTIG:
2215 * These cases are all impossible.
2220 /* convert to a btree if necessary */
2221 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2222 int tmp_logflags; /* partial log flag return val */
2224 ASSERT(bma->cur == NULL);
2225 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2226 bma->firstblock, bma->flist, &bma->cur,
2227 da_old > 0, &tmp_logflags, whichfork);
2228 bma->logflags |= tmp_logflags;
2233 /* adjust for changes in reserved delayed indirect blocks */
2234 if (da_old || da_new) {
2237 temp += bma->cur->bc_private.b.allocated;
2238 ASSERT(temp <= da_old);
2240 xfs_mod_fdblocks(bma->ip->i_mount,
2241 (int64_t)(da_old - temp), false);
2244 /* clear out the allocated field, done with it now in any case. */
2246 bma->cur->bc_private.b.allocated = 0;
2248 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
2250 bma->logflags |= rval;
2258 * Convert an unwritten allocation to a real allocation or vice versa.
2260 STATIC int /* error */
2261 xfs_bmap_add_extent_unwritten_real(
2262 struct xfs_trans *tp,
2263 xfs_inode_t *ip, /* incore inode pointer */
2264 xfs_extnum_t *idx, /* extent number to update/insert */
2265 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
2266 xfs_bmbt_irec_t *new, /* new data to add to file extents */
2267 xfs_fsblock_t *first, /* pointer to firstblock variable */
2268 xfs_bmap_free_t *flist, /* list of extents to be freed */
2269 int *logflagsp) /* inode logging flags */
2271 xfs_btree_cur_t *cur; /* btree cursor */
2272 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
2273 int error; /* error return value */
2274 int i; /* temp state */
2275 xfs_ifork_t *ifp; /* inode fork pointer */
2276 xfs_fileoff_t new_endoff; /* end offset of new entry */
2277 xfs_exntst_t newext; /* new extent state */
2278 xfs_exntst_t oldext; /* old extent state */
2279 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
2280 /* left is 0, right is 1, prev is 2 */
2281 int rval=0; /* return value (logging flags) */
2282 int state = 0;/* state bits, accessed thru macros */
2283 struct xfs_mount *mp = tp->t_mountp;
2288 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
2291 ASSERT(*idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
2292 ASSERT(!isnullstartblock(new->br_startblock));
2294 XFS_STATS_INC(mp, xs_add_exlist);
2301 * Set up a bunch of variables to make the tests simpler.
2304 ep = xfs_iext_get_ext(ifp, *idx);
2305 xfs_bmbt_get_all(ep, &PREV);
2306 newext = new->br_state;
2307 oldext = (newext == XFS_EXT_UNWRITTEN) ?
2308 XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
2309 ASSERT(PREV.br_state == oldext);
2310 new_endoff = new->br_startoff + new->br_blockcount;
2311 ASSERT(PREV.br_startoff <= new->br_startoff);
2312 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2315 * Set flags determining what part of the previous oldext allocation
2316 * extent is being replaced by a newext allocation.
2318 if (PREV.br_startoff == new->br_startoff)
2319 state |= BMAP_LEFT_FILLING;
2320 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2321 state |= BMAP_RIGHT_FILLING;
2324 * Check and set flags if this segment has a left neighbor.
2325 * Don't set contiguous if the combined extent would be too large.
2328 state |= BMAP_LEFT_VALID;
2329 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
2331 if (isnullstartblock(LEFT.br_startblock))
2332 state |= BMAP_LEFT_DELAY;
2335 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2336 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2337 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2338 LEFT.br_state == newext &&
2339 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2340 state |= BMAP_LEFT_CONTIG;
2343 * Check and set flags if this segment has a right neighbor.
2344 * Don't set contiguous if the combined extent would be too large.
2345 * Also check for all-three-contiguous being too large.
2347 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
2348 state |= BMAP_RIGHT_VALID;
2349 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
2350 if (isnullstartblock(RIGHT.br_startblock))
2351 state |= BMAP_RIGHT_DELAY;
2354 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2355 new_endoff == RIGHT.br_startoff &&
2356 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2357 newext == RIGHT.br_state &&
2358 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
2359 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2360 BMAP_RIGHT_FILLING)) !=
2361 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2362 BMAP_RIGHT_FILLING) ||
2363 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2365 state |= BMAP_RIGHT_CONTIG;
2368 * Switch out based on the FILLING and CONTIG state bits.
2370 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2371 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2372 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2373 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2375 * Setting all of a previous oldext extent to newext.
2376 * The left and right neighbors are both contiguous with new.
2380 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2381 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2382 LEFT.br_blockcount + PREV.br_blockcount +
2383 RIGHT.br_blockcount);
2384 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2386 xfs_iext_remove(ip, *idx + 1, 2, state);
2387 ip->i_d.di_nextents -= 2;
2389 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2391 rval = XFS_ILOG_CORE;
2392 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
2393 RIGHT.br_startblock,
2394 RIGHT.br_blockcount, &i)))
2396 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2397 if ((error = xfs_btree_delete(cur, &i)))
2399 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2400 if ((error = xfs_btree_decrement(cur, 0, &i)))
2402 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2403 if ((error = xfs_btree_delete(cur, &i)))
2405 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2406 if ((error = xfs_btree_decrement(cur, 0, &i)))
2408 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2409 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2411 LEFT.br_blockcount + PREV.br_blockcount +
2412 RIGHT.br_blockcount, LEFT.br_state)))
2417 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2419 * Setting all of a previous oldext extent to newext.
2420 * The left neighbor is contiguous, the right is not.
2424 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2425 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2426 LEFT.br_blockcount + PREV.br_blockcount);
2427 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2429 xfs_iext_remove(ip, *idx + 1, 1, state);
2430 ip->i_d.di_nextents--;
2432 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2434 rval = XFS_ILOG_CORE;
2435 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2436 PREV.br_startblock, PREV.br_blockcount,
2439 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2440 if ((error = xfs_btree_delete(cur, &i)))
2442 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2443 if ((error = xfs_btree_decrement(cur, 0, &i)))
2445 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2446 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2448 LEFT.br_blockcount + PREV.br_blockcount,
2454 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2456 * Setting all of a previous oldext extent to newext.
2457 * The right neighbor is contiguous, the left is not.
2459 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2460 xfs_bmbt_set_blockcount(ep,
2461 PREV.br_blockcount + RIGHT.br_blockcount);
2462 xfs_bmbt_set_state(ep, newext);
2463 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2464 xfs_iext_remove(ip, *idx + 1, 1, state);
2465 ip->i_d.di_nextents--;
2467 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2469 rval = XFS_ILOG_CORE;
2470 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
2471 RIGHT.br_startblock,
2472 RIGHT.br_blockcount, &i)))
2474 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2475 if ((error = xfs_btree_delete(cur, &i)))
2477 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2478 if ((error = xfs_btree_decrement(cur, 0, &i)))
2480 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2481 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2483 new->br_blockcount + RIGHT.br_blockcount,
2489 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2491 * Setting all of a previous oldext extent to newext.
2492 * Neither the left nor right neighbors are contiguous with
2495 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2496 xfs_bmbt_set_state(ep, newext);
2497 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2500 rval = XFS_ILOG_DEXT;
2503 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2504 new->br_startblock, new->br_blockcount,
2507 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2508 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2509 new->br_startblock, new->br_blockcount,
2515 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2517 * Setting the first part of a previous oldext extent to newext.
2518 * The left neighbor is contiguous.
2520 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
2521 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
2522 LEFT.br_blockcount + new->br_blockcount);
2523 xfs_bmbt_set_startoff(ep,
2524 PREV.br_startoff + new->br_blockcount);
2525 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
2527 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2528 xfs_bmbt_set_startblock(ep,
2529 new->br_startblock + new->br_blockcount);
2530 xfs_bmbt_set_blockcount(ep,
2531 PREV.br_blockcount - new->br_blockcount);
2532 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2537 rval = XFS_ILOG_DEXT;
2540 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2541 PREV.br_startblock, PREV.br_blockcount,
2544 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2545 if ((error = xfs_bmbt_update(cur,
2546 PREV.br_startoff + new->br_blockcount,
2547 PREV.br_startblock + new->br_blockcount,
2548 PREV.br_blockcount - new->br_blockcount,
2551 if ((error = xfs_btree_decrement(cur, 0, &i)))
2553 error = xfs_bmbt_update(cur, LEFT.br_startoff,
2555 LEFT.br_blockcount + new->br_blockcount,
2562 case BMAP_LEFT_FILLING:
2564 * Setting the first part of a previous oldext extent to newext.
2565 * The left neighbor is not contiguous.
2567 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2568 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
2569 xfs_bmbt_set_startoff(ep, new_endoff);
2570 xfs_bmbt_set_blockcount(ep,
2571 PREV.br_blockcount - new->br_blockcount);
2572 xfs_bmbt_set_startblock(ep,
2573 new->br_startblock + new->br_blockcount);
2574 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2576 xfs_iext_insert(ip, *idx, 1, new, state);
2577 ip->i_d.di_nextents++;
2579 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2581 rval = XFS_ILOG_CORE;
2582 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2583 PREV.br_startblock, PREV.br_blockcount,
2586 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2587 if ((error = xfs_bmbt_update(cur,
2588 PREV.br_startoff + new->br_blockcount,
2589 PREV.br_startblock + new->br_blockcount,
2590 PREV.br_blockcount - new->br_blockcount,
2593 cur->bc_rec.b = *new;
2594 if ((error = xfs_btree_insert(cur, &i)))
2596 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2600 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2602 * Setting the last part of a previous oldext extent to newext.
2603 * The right neighbor is contiguous with the new allocation.
2605 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2606 xfs_bmbt_set_blockcount(ep,
2607 PREV.br_blockcount - new->br_blockcount);
2608 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2612 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2613 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
2614 new->br_startoff, new->br_startblock,
2615 new->br_blockcount + RIGHT.br_blockcount, newext);
2616 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2619 rval = XFS_ILOG_DEXT;
2622 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2624 PREV.br_blockcount, &i)))
2626 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2627 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2629 PREV.br_blockcount - new->br_blockcount,
2632 if ((error = xfs_btree_increment(cur, 0, &i)))
2634 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2636 new->br_blockcount + RIGHT.br_blockcount,
2642 case BMAP_RIGHT_FILLING:
2644 * Setting the last part of a previous oldext extent to newext.
2645 * The right neighbor is not contiguous.
2647 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2648 xfs_bmbt_set_blockcount(ep,
2649 PREV.br_blockcount - new->br_blockcount);
2650 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2653 xfs_iext_insert(ip, *idx, 1, new, state);
2655 ip->i_d.di_nextents++;
2657 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2659 rval = XFS_ILOG_CORE;
2660 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2661 PREV.br_startblock, PREV.br_blockcount,
2664 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2665 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2667 PREV.br_blockcount - new->br_blockcount,
2670 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2671 new->br_startblock, new->br_blockcount,
2674 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2675 cur->bc_rec.b.br_state = XFS_EXT_NORM;
2676 if ((error = xfs_btree_insert(cur, &i)))
2678 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2684 * Setting the middle part of a previous oldext extent to
2685 * newext. Contiguity is impossible here.
2686 * One extent becomes three extents.
2688 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2689 xfs_bmbt_set_blockcount(ep,
2690 new->br_startoff - PREV.br_startoff);
2691 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2694 r[1].br_startoff = new_endoff;
2695 r[1].br_blockcount =
2696 PREV.br_startoff + PREV.br_blockcount - new_endoff;
2697 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2698 r[1].br_state = oldext;
2701 xfs_iext_insert(ip, *idx, 2, &r[0], state);
2703 ip->i_d.di_nextents += 2;
2705 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2707 rval = XFS_ILOG_CORE;
2708 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2709 PREV.br_startblock, PREV.br_blockcount,
2712 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2713 /* new right extent - oldext */
2714 if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
2715 r[1].br_startblock, r[1].br_blockcount,
2718 /* new left extent - oldext */
2719 cur->bc_rec.b = PREV;
2720 cur->bc_rec.b.br_blockcount =
2721 new->br_startoff - PREV.br_startoff;
2722 if ((error = xfs_btree_insert(cur, &i)))
2724 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2726 * Reset the cursor to the position of the new extent
2727 * we are about to insert as we can't trust it after
2728 * the previous insert.
2730 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2731 new->br_startblock, new->br_blockcount,
2734 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2735 /* new middle extent - newext */
2736 cur->bc_rec.b.br_state = new->br_state;
2737 if ((error = xfs_btree_insert(cur, &i)))
2739 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2743 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2744 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2745 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2746 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2747 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2748 case BMAP_LEFT_CONTIG:
2749 case BMAP_RIGHT_CONTIG:
2751 * These cases are all impossible.
2756 /* convert to a btree if necessary */
2757 if (xfs_bmap_needs_btree(ip, XFS_DATA_FORK)) {
2758 int tmp_logflags; /* partial log flag return val */
2760 ASSERT(cur == NULL);
2761 error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur,
2762 0, &tmp_logflags, XFS_DATA_FORK);
2763 *logflagsp |= tmp_logflags;
2768 /* clear out the allocated field, done with it now in any case. */
2770 cur->bc_private.b.allocated = 0;
2774 xfs_bmap_check_leaf_extents(*curp, ip, XFS_DATA_FORK);
2784 * Convert a hole to a delayed allocation.
2787 xfs_bmap_add_extent_hole_delay(
2788 xfs_inode_t *ip, /* incore inode pointer */
2789 xfs_extnum_t *idx, /* extent number to update/insert */
2790 xfs_bmbt_irec_t *new) /* new data to add to file extents */
2792 xfs_ifork_t *ifp; /* inode fork pointer */
2793 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2794 xfs_filblks_t newlen=0; /* new indirect size */
2795 xfs_filblks_t oldlen=0; /* old indirect size */
2796 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2797 int state; /* state bits, accessed thru macros */
2798 xfs_filblks_t temp=0; /* temp for indirect calculations */
2800 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
2802 ASSERT(isnullstartblock(new->br_startblock));
2805 * Check and set flags if this segment has a left neighbor
2808 state |= BMAP_LEFT_VALID;
2809 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
2811 if (isnullstartblock(left.br_startblock))
2812 state |= BMAP_LEFT_DELAY;
2816 * Check and set flags if the current (right) segment exists.
2817 * If it doesn't exist, we're converting the hole at end-of-file.
2819 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
2820 state |= BMAP_RIGHT_VALID;
2821 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
2823 if (isnullstartblock(right.br_startblock))
2824 state |= BMAP_RIGHT_DELAY;
2828 * Set contiguity flags on the left and right neighbors.
2829 * Don't let extents get too large, even if the pieces are contiguous.
2831 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2832 left.br_startoff + left.br_blockcount == new->br_startoff &&
2833 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2834 state |= BMAP_LEFT_CONTIG;
2836 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2837 new->br_startoff + new->br_blockcount == right.br_startoff &&
2838 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2839 (!(state & BMAP_LEFT_CONTIG) ||
2840 (left.br_blockcount + new->br_blockcount +
2841 right.br_blockcount <= MAXEXTLEN)))
2842 state |= BMAP_RIGHT_CONTIG;
2845 * Switch out based on the contiguity flags.
2847 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2848 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2850 * New allocation is contiguous with delayed allocations
2851 * on the left and on the right.
2852 * Merge all three into a single extent record.
2855 temp = left.br_blockcount + new->br_blockcount +
2856 right.br_blockcount;
2858 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2859 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
2860 oldlen = startblockval(left.br_startblock) +
2861 startblockval(new->br_startblock) +
2862 startblockval(right.br_startblock);
2863 newlen = xfs_bmap_worst_indlen(ip, temp);
2864 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
2865 nullstartblock((int)newlen));
2866 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2868 xfs_iext_remove(ip, *idx + 1, 1, state);
2871 case BMAP_LEFT_CONTIG:
2873 * New allocation is contiguous with a delayed allocation
2875 * Merge the new allocation with the left neighbor.
2878 temp = left.br_blockcount + new->br_blockcount;
2880 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2881 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
2882 oldlen = startblockval(left.br_startblock) +
2883 startblockval(new->br_startblock);
2884 newlen = xfs_bmap_worst_indlen(ip, temp);
2885 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
2886 nullstartblock((int)newlen));
2887 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2890 case BMAP_RIGHT_CONTIG:
2892 * New allocation is contiguous with a delayed allocation
2894 * Merge the new allocation with the right neighbor.
2896 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2897 temp = new->br_blockcount + right.br_blockcount;
2898 oldlen = startblockval(new->br_startblock) +
2899 startblockval(right.br_startblock);
2900 newlen = xfs_bmap_worst_indlen(ip, temp);
2901 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
2903 nullstartblock((int)newlen), temp, right.br_state);
2904 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2909 * New allocation is not contiguous with another
2910 * delayed allocation.
2911 * Insert a new entry.
2913 oldlen = newlen = 0;
2914 xfs_iext_insert(ip, *idx, 1, new, state);
2917 if (oldlen != newlen) {
2918 ASSERT(oldlen > newlen);
2919 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2922 * Nothing to do for disk quota accounting here.
2928 * Convert a hole to a real allocation.
2930 STATIC int /* error */
2931 xfs_bmap_add_extent_hole_real(
2932 struct xfs_bmalloca *bma,
2935 struct xfs_bmbt_irec *new = &bma->got;
2936 int error; /* error return value */
2937 int i; /* temp state */
2938 xfs_ifork_t *ifp; /* inode fork pointer */
2939 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2940 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2941 int rval=0; /* return value (logging flags) */
2942 int state; /* state bits, accessed thru macros */
2943 struct xfs_mount *mp;
2945 mp = bma->ip->i_mount;
2946 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
2948 ASSERT(bma->idx >= 0);
2949 ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
2950 ASSERT(!isnullstartblock(new->br_startblock));
2952 !(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
2954 XFS_STATS_INC(mp, xs_add_exlist);
2957 if (whichfork == XFS_ATTR_FORK)
2958 state |= BMAP_ATTRFORK;
2961 * Check and set flags if this segment has a left neighbor.
2964 state |= BMAP_LEFT_VALID;
2965 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &left);
2966 if (isnullstartblock(left.br_startblock))
2967 state |= BMAP_LEFT_DELAY;
2971 * Check and set flags if this segment has a current value.
2972 * Not true if we're inserting into the "hole" at eof.
2974 if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
2975 state |= BMAP_RIGHT_VALID;
2976 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &right);
2977 if (isnullstartblock(right.br_startblock))
2978 state |= BMAP_RIGHT_DELAY;
2982 * We're inserting a real allocation between "left" and "right".
2983 * Set the contiguity flags. Don't let extents get too large.
2985 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2986 left.br_startoff + left.br_blockcount == new->br_startoff &&
2987 left.br_startblock + left.br_blockcount == new->br_startblock &&
2988 left.br_state == new->br_state &&
2989 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2990 state |= BMAP_LEFT_CONTIG;
2992 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2993 new->br_startoff + new->br_blockcount == right.br_startoff &&
2994 new->br_startblock + new->br_blockcount == right.br_startblock &&
2995 new->br_state == right.br_state &&
2996 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2997 (!(state & BMAP_LEFT_CONTIG) ||
2998 left.br_blockcount + new->br_blockcount +
2999 right.br_blockcount <= MAXEXTLEN))
3000 state |= BMAP_RIGHT_CONTIG;
3004 * Select which case we're in here, and implement it.
3006 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
3007 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
3009 * New allocation is contiguous with real allocations on the
3010 * left and on the right.
3011 * Merge all three into a single extent record.
3014 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
3015 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
3016 left.br_blockcount + new->br_blockcount +
3017 right.br_blockcount);
3018 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
3020 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
3022 XFS_IFORK_NEXT_SET(bma->ip, whichfork,
3023 XFS_IFORK_NEXTENTS(bma->ip, whichfork) - 1);
3024 if (bma->cur == NULL) {
3025 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
3027 rval = XFS_ILOG_CORE;
3028 error = xfs_bmbt_lookup_eq(bma->cur, right.br_startoff,
3029 right.br_startblock, right.br_blockcount,
3033 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3034 error = xfs_btree_delete(bma->cur, &i);
3037 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3038 error = xfs_btree_decrement(bma->cur, 0, &i);
3041 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3042 error = xfs_bmbt_update(bma->cur, left.br_startoff,
3044 left.br_blockcount +
3045 new->br_blockcount +
3046 right.br_blockcount,
3053 case BMAP_LEFT_CONTIG:
3055 * New allocation is contiguous with a real allocation
3057 * Merge the new allocation with the left neighbor.
3060 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
3061 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
3062 left.br_blockcount + new->br_blockcount);
3063 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
3065 if (bma->cur == NULL) {
3066 rval = xfs_ilog_fext(whichfork);
3069 error = xfs_bmbt_lookup_eq(bma->cur, left.br_startoff,
3070 left.br_startblock, left.br_blockcount,
3074 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3075 error = xfs_bmbt_update(bma->cur, left.br_startoff,
3077 left.br_blockcount +
3085 case BMAP_RIGHT_CONTIG:
3087 * New allocation is contiguous with a real allocation
3089 * Merge the new allocation with the right neighbor.
3091 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
3092 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx),
3093 new->br_startoff, new->br_startblock,
3094 new->br_blockcount + right.br_blockcount,
3096 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
3098 if (bma->cur == NULL) {
3099 rval = xfs_ilog_fext(whichfork);
3102 error = xfs_bmbt_lookup_eq(bma->cur,
3104 right.br_startblock,
3105 right.br_blockcount, &i);
3108 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3109 error = xfs_bmbt_update(bma->cur, new->br_startoff,
3111 new->br_blockcount +
3112 right.br_blockcount,
3121 * New allocation is not contiguous with another
3123 * Insert a new entry.
3125 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
3126 XFS_IFORK_NEXT_SET(bma->ip, whichfork,
3127 XFS_IFORK_NEXTENTS(bma->ip, whichfork) + 1);
3128 if (bma->cur == NULL) {
3129 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
3131 rval = XFS_ILOG_CORE;
3132 error = xfs_bmbt_lookup_eq(bma->cur,
3135 new->br_blockcount, &i);
3138 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
3139 bma->cur->bc_rec.b.br_state = new->br_state;
3140 error = xfs_btree_insert(bma->cur, &i);
3143 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3148 /* convert to a btree if necessary */
3149 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
3150 int tmp_logflags; /* partial log flag return val */
3152 ASSERT(bma->cur == NULL);
3153 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
3154 bma->firstblock, bma->flist, &bma->cur,
3155 0, &tmp_logflags, whichfork);
3156 bma->logflags |= tmp_logflags;
3161 /* clear out the allocated field, done with it now in any case. */
3163 bma->cur->bc_private.b.allocated = 0;
3165 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
3167 bma->logflags |= rval;
3172 * Functions used in the extent read, allocate and remove paths
3176 * Adjust the size of the new extent based on di_extsize and rt extsize.
3179 xfs_bmap_extsize_align(
3181 xfs_bmbt_irec_t *gotp, /* next extent pointer */
3182 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
3183 xfs_extlen_t extsz, /* align to this extent size */
3184 int rt, /* is this a realtime inode? */
3185 int eof, /* is extent at end-of-file? */
3186 int delay, /* creating delalloc extent? */
3187 int convert, /* overwriting unwritten extent? */
3188 xfs_fileoff_t *offp, /* in/out: aligned offset */
3189 xfs_extlen_t *lenp) /* in/out: aligned length */
3191 xfs_fileoff_t orig_off; /* original offset */
3192 xfs_extlen_t orig_alen; /* original length */
3193 xfs_fileoff_t orig_end; /* original off+len */
3194 xfs_fileoff_t nexto; /* next file offset */
3195 xfs_fileoff_t prevo; /* previous file offset */
3196 xfs_fileoff_t align_off; /* temp for offset */
3197 xfs_extlen_t align_alen; /* temp for length */
3198 xfs_extlen_t temp; /* temp for calculations */
3203 orig_off = align_off = *offp;
3204 orig_alen = align_alen = *lenp;
3205 orig_end = orig_off + orig_alen;
3208 * If this request overlaps an existing extent, then don't
3209 * attempt to perform any additional alignment.
3211 if (!delay && !eof &&
3212 (orig_off >= gotp->br_startoff) &&
3213 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
3218 * If the file offset is unaligned vs. the extent size
3219 * we need to align it. This will be possible unless
3220 * the file was previously written with a kernel that didn't
3221 * perform this alignment, or if a truncate shot us in the
3224 temp = do_mod(orig_off, extsz);
3230 /* Same adjustment for the end of the requested area. */
3231 temp = (align_alen % extsz);
3233 align_alen += extsz - temp;
3236 * For large extent hint sizes, the aligned extent might be larger than
3237 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
3238 * the length back under MAXEXTLEN. The outer allocation loops handle
3239 * short allocation just fine, so it is safe to do this. We only want to
3240 * do it when we are forced to, though, because it means more allocation
3241 * operations are required.
3243 while (align_alen > MAXEXTLEN)
3244 align_alen -= extsz;
3245 ASSERT(align_alen <= MAXEXTLEN);
3248 * If the previous block overlaps with this proposed allocation
3249 * then move the start forward without adjusting the length.
3251 if (prevp->br_startoff != NULLFILEOFF) {
3252 if (prevp->br_startblock == HOLESTARTBLOCK)
3253 prevo = prevp->br_startoff;
3255 prevo = prevp->br_startoff + prevp->br_blockcount;
3258 if (align_off != orig_off && align_off < prevo)
3261 * If the next block overlaps with this proposed allocation
3262 * then move the start back without adjusting the length,
3263 * but not before offset 0.
3264 * This may of course make the start overlap previous block,
3265 * and if we hit the offset 0 limit then the next block
3266 * can still overlap too.
3268 if (!eof && gotp->br_startoff != NULLFILEOFF) {
3269 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
3270 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
3271 nexto = gotp->br_startoff + gotp->br_blockcount;
3273 nexto = gotp->br_startoff;
3275 nexto = NULLFILEOFF;
3277 align_off + align_alen != orig_end &&
3278 align_off + align_alen > nexto)
3279 align_off = nexto > align_alen ? nexto - align_alen : 0;
3281 * If we're now overlapping the next or previous extent that
3282 * means we can't fit an extsz piece in this hole. Just move
3283 * the start forward to the first valid spot and set
3284 * the length so we hit the end.
3286 if (align_off != orig_off && align_off < prevo)
3288 if (align_off + align_alen != orig_end &&
3289 align_off + align_alen > nexto &&
3290 nexto != NULLFILEOFF) {
3291 ASSERT(nexto > prevo);
3292 align_alen = nexto - align_off;
3296 * If realtime, and the result isn't a multiple of the realtime
3297 * extent size we need to remove blocks until it is.
3299 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
3301 * We're not covering the original request, or
3302 * we won't be able to once we fix the length.
3304 if (orig_off < align_off ||
3305 orig_end > align_off + align_alen ||
3306 align_alen - temp < orig_alen)
3309 * Try to fix it by moving the start up.
3311 if (align_off + temp <= orig_off) {
3316 * Try to fix it by moving the end in.
3318 else if (align_off + align_alen - temp >= orig_end)
3321 * Set the start to the minimum then trim the length.
3324 align_alen -= orig_off - align_off;
3325 align_off = orig_off;
3326 align_alen -= align_alen % mp->m_sb.sb_rextsize;
3329 * Result doesn't cover the request, fail it.
3331 if (orig_off < align_off || orig_end > align_off + align_alen)
3334 ASSERT(orig_off >= align_off);
3335 /* see MAXEXTLEN handling above */
3336 ASSERT(orig_end <= align_off + align_alen ||
3337 align_alen + extsz > MAXEXTLEN);
3341 if (!eof && gotp->br_startoff != NULLFILEOFF)
3342 ASSERT(align_off + align_alen <= gotp->br_startoff);
3343 if (prevp->br_startoff != NULLFILEOFF)
3344 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3352 #define XFS_ALLOC_GAP_UNITS 4
3356 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3358 xfs_fsblock_t adjust; /* adjustment to block numbers */
3359 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3360 xfs_mount_t *mp; /* mount point structure */
3361 int nullfb; /* true if ap->firstblock isn't set */
3362 int rt; /* true if inode is realtime */
3364 #define ISVALID(x,y) \
3366 (x) < mp->m_sb.sb_rblocks : \
3367 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3368 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3369 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3371 mp = ap->ip->i_mount;
3372 nullfb = *ap->firstblock == NULLFSBLOCK;
3373 rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
3374 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3376 * If allocating at eof, and there's a previous real block,
3377 * try to use its last block as our starting point.
3379 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3380 !isnullstartblock(ap->prev.br_startblock) &&
3381 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3382 ap->prev.br_startblock)) {
3383 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3385 * Adjust for the gap between prevp and us.
3387 adjust = ap->offset -
3388 (ap->prev.br_startoff + ap->prev.br_blockcount);
3390 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3391 ap->blkno += adjust;
3394 * If not at eof, then compare the two neighbor blocks.
3395 * Figure out whether either one gives us a good starting point,
3396 * and pick the better one.
3398 else if (!ap->eof) {
3399 xfs_fsblock_t gotbno; /* right side block number */
3400 xfs_fsblock_t gotdiff=0; /* right side difference */
3401 xfs_fsblock_t prevbno; /* left side block number */
3402 xfs_fsblock_t prevdiff=0; /* left side difference */
3405 * If there's a previous (left) block, select a requested
3406 * start block based on it.
3408 if (ap->prev.br_startoff != NULLFILEOFF &&
3409 !isnullstartblock(ap->prev.br_startblock) &&
3410 (prevbno = ap->prev.br_startblock +
3411 ap->prev.br_blockcount) &&
3412 ISVALID(prevbno, ap->prev.br_startblock)) {
3414 * Calculate gap to end of previous block.
3416 adjust = prevdiff = ap->offset -
3417 (ap->prev.br_startoff +
3418 ap->prev.br_blockcount);
3420 * Figure the startblock based on the previous block's
3421 * end and the gap size.
3423 * If the gap is large relative to the piece we're
3424 * allocating, or using it gives us an invalid block
3425 * number, then just use the end of the previous block.
3427 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3428 ISVALID(prevbno + prevdiff,
3429 ap->prev.br_startblock))
3434 * If the firstblock forbids it, can't use it,
3437 if (!rt && !nullfb &&
3438 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
3439 prevbno = NULLFSBLOCK;
3442 * No previous block or can't follow it, just default.
3445 prevbno = NULLFSBLOCK;
3447 * If there's a following (right) block, select a requested
3448 * start block based on it.
3450 if (!isnullstartblock(ap->got.br_startblock)) {
3452 * Calculate gap to start of next block.
3454 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3456 * Figure the startblock based on the next block's
3457 * start and the gap size.
3459 gotbno = ap->got.br_startblock;
3462 * If the gap is large relative to the piece we're
3463 * allocating, or using it gives us an invalid block
3464 * number, then just use the start of the next block
3465 * offset by our length.
3467 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3468 ISVALID(gotbno - gotdiff, gotbno))
3470 else if (ISVALID(gotbno - ap->length, gotbno)) {
3471 gotbno -= ap->length;
3472 gotdiff += adjust - ap->length;
3476 * If the firstblock forbids it, can't use it,
3479 if (!rt && !nullfb &&
3480 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
3481 gotbno = NULLFSBLOCK;
3484 * No next block, just default.
3487 gotbno = NULLFSBLOCK;
3489 * If both valid, pick the better one, else the only good
3490 * one, else ap->blkno is already set (to 0 or the inode block).
3492 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
3493 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3494 else if (prevbno != NULLFSBLOCK)
3495 ap->blkno = prevbno;
3496 else if (gotbno != NULLFSBLOCK)
3503 xfs_bmap_longest_free_extent(
3504 struct xfs_trans *tp,
3509 struct xfs_mount *mp = tp->t_mountp;
3510 struct xfs_perag *pag;
3511 xfs_extlen_t longest;
3514 pag = xfs_perag_get(mp, ag);
3515 if (!pag->pagf_init) {
3516 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
3520 if (!pag->pagf_init) {
3526 longest = xfs_alloc_longest_free_extent(mp, pag,
3527 xfs_alloc_min_freelist(mp, pag));
3528 if (*blen < longest)
3537 xfs_bmap_select_minlen(
3538 struct xfs_bmalloca *ap,
3539 struct xfs_alloc_arg *args,
3543 if (notinit || *blen < ap->minlen) {
3545 * Since we did a BUF_TRYLOCK above, it is possible that
3546 * there is space for this request.
3548 args->minlen = ap->minlen;
3549 } else if (*blen < args->maxlen) {
3551 * If the best seen length is less than the request length,
3552 * use the best as the minimum.
3554 args->minlen = *blen;
3557 * Otherwise we've seen an extent as big as maxlen, use that
3560 args->minlen = args->maxlen;
3565 xfs_bmap_btalloc_nullfb(
3566 struct xfs_bmalloca *ap,
3567 struct xfs_alloc_arg *args,
3570 struct xfs_mount *mp = ap->ip->i_mount;
3571 xfs_agnumber_t ag, startag;
3575 args->type = XFS_ALLOCTYPE_START_BNO;
3576 args->total = ap->total;
3578 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3579 if (startag == NULLAGNUMBER)
3582 while (*blen < args->maxlen) {
3583 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3588 if (++ag == mp->m_sb.sb_agcount)
3594 xfs_bmap_select_minlen(ap, args, blen, notinit);
3599 xfs_bmap_btalloc_filestreams(
3600 struct xfs_bmalloca *ap,
3601 struct xfs_alloc_arg *args,
3604 struct xfs_mount *mp = ap->ip->i_mount;
3609 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3610 args->total = ap->total;
3612 ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3613 if (ag == NULLAGNUMBER)
3616 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init);
3620 if (*blen < args->maxlen) {
3621 error = xfs_filestream_new_ag(ap, &ag);
3625 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3632 xfs_bmap_select_minlen(ap, args, blen, notinit);
3635 * Set the failure fallback case to look in the selected AG as stream
3638 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
3644 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3646 xfs_mount_t *mp; /* mount point structure */
3647 xfs_alloctype_t atype = 0; /* type for allocation routines */
3648 xfs_extlen_t align; /* minimum allocation alignment */
3649 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3651 xfs_alloc_arg_t args;
3653 xfs_extlen_t nextminlen = 0;
3654 int nullfb; /* true if ap->firstblock isn't set */
3662 mp = ap->ip->i_mount;
3664 /* stripe alignment for allocation is determined by mount parameters */
3666 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3667 stripe_align = mp->m_swidth;
3668 else if (mp->m_dalign)
3669 stripe_align = mp->m_dalign;
3671 align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
3672 if (unlikely(align)) {
3673 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
3674 align, 0, ap->eof, 0, ap->conv,
3675 &ap->offset, &ap->length);
3681 nullfb = *ap->firstblock == NULLFSBLOCK;
3682 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3684 if (ap->userdata && xfs_inode_is_filestream(ap->ip)) {
3685 ag = xfs_filestream_lookup_ag(ap->ip);
3686 ag = (ag != NULLAGNUMBER) ? ag : 0;
3687 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
3689 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
3692 ap->blkno = *ap->firstblock;
3694 xfs_bmap_adjacent(ap);
3697 * If allowed, use ap->blkno; otherwise must use firstblock since
3698 * it's in the right allocation group.
3700 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
3703 ap->blkno = *ap->firstblock;
3705 * Normal allocation, done through xfs_alloc_vextent.
3707 tryagain = isaligned = 0;
3708 memset(&args, 0, sizeof(args));
3711 args.fsbno = ap->blkno;
3713 /* Trim the allocation back to the maximum an AG can fit. */
3714 args.maxlen = MIN(ap->length, XFS_ALLOC_AG_MAX_USABLE(mp));
3715 args.firstblock = *ap->firstblock;
3719 * Search for an allocation group with a single extent large
3720 * enough for the request. If one isn't found, then adjust
3721 * the minimum allocation size to the largest space found.
3723 if (ap->userdata && xfs_inode_is_filestream(ap->ip))
3724 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
3726 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
3729 } else if (ap->flist->xbf_low) {
3730 if (xfs_inode_is_filestream(ap->ip))
3731 args.type = XFS_ALLOCTYPE_FIRST_AG;
3733 args.type = XFS_ALLOCTYPE_START_BNO;
3734 args.total = args.minlen = ap->minlen;
3736 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3737 args.total = ap->total;
3738 args.minlen = ap->minlen;
3740 /* apply extent size hints if obtained earlier */
3741 if (unlikely(align)) {
3743 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
3744 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3745 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3749 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3750 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
3751 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3754 * If we are not low on available data blocks, and the
3755 * underlying logical volume manager is a stripe, and
3756 * the file offset is zero then try to allocate data
3757 * blocks on stripe unit boundary.
3758 * NOTE: ap->aeof is only set if the allocation length
3759 * is >= the stripe unit and the allocation offset is
3760 * at the end of file.
3762 if (!ap->flist->xbf_low && ap->aeof) {
3764 args.alignment = stripe_align;
3768 * Adjust for alignment
3770 if (blen > args.alignment && blen <= args.maxlen)
3771 args.minlen = blen - args.alignment;
3772 args.minalignslop = 0;
3775 * First try an exact bno allocation.
3776 * If it fails then do a near or start bno
3777 * allocation with alignment turned on.
3781 args.type = XFS_ALLOCTYPE_THIS_BNO;
3784 * Compute the minlen+alignment for the
3785 * next case. Set slop so that the value
3786 * of minlen+alignment+slop doesn't go up
3787 * between the calls.
3789 if (blen > stripe_align && blen <= args.maxlen)
3790 nextminlen = blen - stripe_align;
3792 nextminlen = args.minlen;
3793 if (nextminlen + stripe_align > args.minlen + 1)
3795 nextminlen + stripe_align -
3798 args.minalignslop = 0;
3802 args.minalignslop = 0;
3804 args.minleft = ap->minleft;
3805 args.wasdel = ap->wasdel;
3807 args.userdata = ap->userdata;
3808 if (ap->userdata & XFS_ALLOC_USERDATA_ZERO)
3811 error = xfs_alloc_vextent(&args);
3815 if (tryagain && args.fsbno == NULLFSBLOCK) {
3817 * Exact allocation failed. Now try with alignment
3821 args.fsbno = ap->blkno;
3822 args.alignment = stripe_align;
3823 args.minlen = nextminlen;
3824 args.minalignslop = 0;
3826 if ((error = xfs_alloc_vextent(&args)))
3829 if (isaligned && args.fsbno == NULLFSBLOCK) {
3831 * allocation failed, so turn off alignment and
3835 args.fsbno = ap->blkno;
3837 if ((error = xfs_alloc_vextent(&args)))
3840 if (args.fsbno == NULLFSBLOCK && nullfb &&
3841 args.minlen > ap->minlen) {
3842 args.minlen = ap->minlen;
3843 args.type = XFS_ALLOCTYPE_START_BNO;
3844 args.fsbno = ap->blkno;
3845 if ((error = xfs_alloc_vextent(&args)))
3848 if (args.fsbno == NULLFSBLOCK && nullfb) {
3850 args.type = XFS_ALLOCTYPE_FIRST_AG;
3851 args.total = ap->minlen;
3853 if ((error = xfs_alloc_vextent(&args)))
3855 ap->flist->xbf_low = 1;
3857 if (args.fsbno != NULLFSBLOCK) {
3859 * check the allocation happened at the same or higher AG than
3860 * the first block that was allocated.
3862 ASSERT(*ap->firstblock == NULLFSBLOCK ||
3863 XFS_FSB_TO_AGNO(mp, *ap->firstblock) ==
3864 XFS_FSB_TO_AGNO(mp, args.fsbno) ||
3865 (ap->flist->xbf_low &&
3866 XFS_FSB_TO_AGNO(mp, *ap->firstblock) <
3867 XFS_FSB_TO_AGNO(mp, args.fsbno)));
3869 ap->blkno = args.fsbno;
3870 if (*ap->firstblock == NULLFSBLOCK)
3871 *ap->firstblock = args.fsbno;
3872 ASSERT(nullfb || fb_agno == args.agno ||
3873 (ap->flist->xbf_low && fb_agno < args.agno));
3874 ap->length = args.len;
3875 ap->ip->i_d.di_nblocks += args.len;
3876 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3878 ap->ip->i_delayed_blks -= args.len;
3880 * Adjust the disk quota also. This was reserved
3883 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3884 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
3885 XFS_TRANS_DQ_BCOUNT,
3888 ap->blkno = NULLFSBLOCK;
3895 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
3896 * It figures out where to ask the underlying allocator to put the new extent.
3900 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3902 if (XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata)
3903 return xfs_bmap_rtalloc(ap);
3904 return xfs_bmap_btalloc(ap);
3908 * Trim the returned map to the required bounds
3912 struct xfs_bmbt_irec *mval,
3913 struct xfs_bmbt_irec *got,
3921 if ((flags & XFS_BMAPI_ENTIRE) ||
3922 got->br_startoff + got->br_blockcount <= obno) {
3924 if (isnullstartblock(got->br_startblock))
3925 mval->br_startblock = DELAYSTARTBLOCK;
3931 ASSERT((*bno >= obno) || (n == 0));
3933 mval->br_startoff = *bno;
3934 if (isnullstartblock(got->br_startblock))
3935 mval->br_startblock = DELAYSTARTBLOCK;
3937 mval->br_startblock = got->br_startblock +
3938 (*bno - got->br_startoff);
3940 * Return the minimum of what we got and what we asked for for
3941 * the length. We can use the len variable here because it is
3942 * modified below and we could have been there before coming
3943 * here if the first part of the allocation didn't overlap what
3946 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3947 got->br_blockcount - (*bno - got->br_startoff));
3948 mval->br_state = got->br_state;
3949 ASSERT(mval->br_blockcount <= len);
3954 * Update and validate the extent map to return
3957 xfs_bmapi_update_map(
3958 struct xfs_bmbt_irec **map,
3966 xfs_bmbt_irec_t *mval = *map;
3968 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3969 ((mval->br_startoff + mval->br_blockcount) <= end));
3970 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3971 (mval->br_startoff < obno));
3973 *bno = mval->br_startoff + mval->br_blockcount;
3975 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3976 /* update previous map with new information */
3977 ASSERT(mval->br_startblock == mval[-1].br_startblock);
3978 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3979 ASSERT(mval->br_state == mval[-1].br_state);
3980 mval[-1].br_blockcount = mval->br_blockcount;
3981 mval[-1].br_state = mval->br_state;
3982 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3983 mval[-1].br_startblock != DELAYSTARTBLOCK &&
3984 mval[-1].br_startblock != HOLESTARTBLOCK &&
3985 mval->br_startblock == mval[-1].br_startblock +
3986 mval[-1].br_blockcount &&
3987 ((flags & XFS_BMAPI_IGSTATE) ||
3988 mval[-1].br_state == mval->br_state)) {
3989 ASSERT(mval->br_startoff ==
3990 mval[-1].br_startoff + mval[-1].br_blockcount);
3991 mval[-1].br_blockcount += mval->br_blockcount;
3992 } else if (*n > 0 &&
3993 mval->br_startblock == DELAYSTARTBLOCK &&
3994 mval[-1].br_startblock == DELAYSTARTBLOCK &&
3995 mval->br_startoff ==
3996 mval[-1].br_startoff + mval[-1].br_blockcount) {
3997 mval[-1].br_blockcount += mval->br_blockcount;
3998 mval[-1].br_state = mval->br_state;
3999 } else if (!((*n == 0) &&
4000 ((mval->br_startoff + mval->br_blockcount) <=
4009 * Map file blocks to filesystem blocks without allocation.
4013 struct xfs_inode *ip,
4016 struct xfs_bmbt_irec *mval,
4020 struct xfs_mount *mp = ip->i_mount;
4021 struct xfs_ifork *ifp;
4022 struct xfs_bmbt_irec got;
4023 struct xfs_bmbt_irec prev;
4030 int whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4031 XFS_ATTR_FORK : XFS_DATA_FORK;
4034 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
4035 XFS_BMAPI_IGSTATE)));
4036 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
4038 if (unlikely(XFS_TEST_ERROR(
4039 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4040 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4041 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4042 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
4043 return -EFSCORRUPTED;
4046 if (XFS_FORCED_SHUTDOWN(mp))
4049 XFS_STATS_INC(mp, xs_blk_mapr);
4051 ifp = XFS_IFORK_PTR(ip, whichfork);
4053 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4054 error = xfs_iread_extents(NULL, ip, whichfork);
4059 xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, &prev);
4063 while (bno < end && n < *nmap) {
4064 /* Reading past eof, act as though there's a hole up to end. */
4066 got.br_startoff = end;
4067 if (got.br_startoff > bno) {
4068 /* Reading in a hole. */
4069 mval->br_startoff = bno;
4070 mval->br_startblock = HOLESTARTBLOCK;
4071 mval->br_blockcount =
4072 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
4073 mval->br_state = XFS_EXT_NORM;
4074 bno += mval->br_blockcount;
4075 len -= mval->br_blockcount;
4081 /* set up the extent map to return. */
4082 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
4083 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4085 /* If we're done, stop now. */
4086 if (bno >= end || n >= *nmap)
4089 /* Else go on to the next record. */
4090 if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
4091 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got);
4100 xfs_bmapi_reserve_delalloc(
4101 struct xfs_inode *ip,
4104 struct xfs_bmbt_irec *got,
4105 struct xfs_bmbt_irec *prev,
4106 xfs_extnum_t *lastx,
4109 struct xfs_mount *mp = ip->i_mount;
4110 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
4112 xfs_extlen_t indlen;
4113 char rt = XFS_IS_REALTIME_INODE(ip);
4117 alen = XFS_FILBLKS_MIN(len, MAXEXTLEN);
4119 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
4121 /* Figure out the extent size, adjust alen */
4122 extsz = xfs_get_extsz_hint(ip);
4124 error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof,
4125 1, 0, &aoff, &alen);
4130 extsz = alen / mp->m_sb.sb_rextsize;
4133 * Make a transaction-less quota reservation for delayed allocation
4134 * blocks. This number gets adjusted later. We return if we haven't
4135 * allocated blocks already inside this loop.
4137 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
4138 rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4143 * Split changing sb for alen and indlen since they could be coming
4144 * from different places.
4146 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4150 error = xfs_mod_frextents(mp, -((int64_t)extsz));
4152 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
4156 goto out_unreserve_quota;
4158 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
4160 goto out_unreserve_blocks;
4163 ip->i_delayed_blks += alen;
4165 got->br_startoff = aoff;
4166 got->br_startblock = nullstartblock(indlen);
4167 got->br_blockcount = alen;
4168 got->br_state = XFS_EXT_NORM;
4169 xfs_bmap_add_extent_hole_delay(ip, lastx, got);
4172 * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay
4173 * might have merged it into one of the neighbouring ones.
4175 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got);
4177 ASSERT(got->br_startoff <= aoff);
4178 ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen);
4179 ASSERT(isnullstartblock(got->br_startblock));
4180 ASSERT(got->br_state == XFS_EXT_NORM);
4183 out_unreserve_blocks:
4185 xfs_mod_frextents(mp, extsz);
4187 xfs_mod_fdblocks(mp, alen, false);
4188 out_unreserve_quota:
4189 if (XFS_IS_QUOTA_ON(mp))
4190 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ?
4191 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4196 * Map file blocks to filesystem blocks, adding delayed allocations as needed.
4200 struct xfs_inode *ip, /* incore inode */
4201 xfs_fileoff_t bno, /* starting file offs. mapped */
4202 xfs_filblks_t len, /* length to map in file */
4203 struct xfs_bmbt_irec *mval, /* output: map values */
4204 int *nmap, /* i/o: mval size/count */
4205 int flags) /* XFS_BMAPI_... */
4207 struct xfs_mount *mp = ip->i_mount;
4208 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
4209 struct xfs_bmbt_irec got; /* current file extent record */
4210 struct xfs_bmbt_irec prev; /* previous file extent record */
4211 xfs_fileoff_t obno; /* old block number (offset) */
4212 xfs_fileoff_t end; /* end of mapped file region */
4213 xfs_extnum_t lastx; /* last useful extent number */
4214 int eof; /* we've hit the end of extents */
4215 int n = 0; /* current extent index */
4219 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4220 ASSERT(!(flags & ~XFS_BMAPI_ENTIRE));
4221 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4223 if (unlikely(XFS_TEST_ERROR(
4224 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
4225 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
4226 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4227 XFS_ERROR_REPORT("xfs_bmapi_delay", XFS_ERRLEVEL_LOW, mp);
4228 return -EFSCORRUPTED;
4231 if (XFS_FORCED_SHUTDOWN(mp))
4234 XFS_STATS_INC(mp, xs_blk_mapw);
4236 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4237 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
4242 xfs_bmap_search_extents(ip, bno, XFS_DATA_FORK, &eof, &lastx, &got, &prev);
4246 while (bno < end && n < *nmap) {
4247 if (eof || got.br_startoff > bno) {
4248 error = xfs_bmapi_reserve_delalloc(ip, bno, len, &got,
4249 &prev, &lastx, eof);
4259 /* set up the extent map to return. */
4260 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
4261 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4263 /* If we're done, stop now. */
4264 if (bno >= end || n >= *nmap)
4267 /* Else go on to the next record. */
4269 if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
4270 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got);
4282 struct xfs_bmalloca *bma)
4284 struct xfs_mount *mp = bma->ip->i_mount;
4285 int whichfork = (bma->flags & XFS_BMAPI_ATTRFORK) ?
4286 XFS_ATTR_FORK : XFS_DATA_FORK;
4287 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4288 int tmp_logflags = 0;
4291 ASSERT(bma->length > 0);
4294 * For the wasdelay case, we could also just allocate the stuff asked
4295 * for in this bmap call but that wouldn't be as good.
4298 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4299 bma->offset = bma->got.br_startoff;
4300 if (bma->idx != NULLEXTNUM && bma->idx) {
4301 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1),
4305 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4307 bma->length = XFS_FILBLKS_MIN(bma->length,
4308 bma->got.br_startoff - bma->offset);
4312 * Indicate if this is the first user data in the file, or just any
4313 * user data. And if it is userdata, indicate whether it needs to
4314 * be initialised to zero during allocation.
4316 if (!(bma->flags & XFS_BMAPI_METADATA)) {
4317 bma->userdata = (bma->offset == 0) ?
4318 XFS_ALLOC_INITIAL_USER_DATA : XFS_ALLOC_USERDATA;
4319 if (bma->flags & XFS_BMAPI_ZERO)
4320 bma->userdata |= XFS_ALLOC_USERDATA_ZERO;
4323 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
4326 * Only want to do the alignment at the eof if it is userdata and
4327 * allocation length is larger than a stripe unit.
4329 if (mp->m_dalign && bma->length >= mp->m_dalign &&
4330 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
4331 error = xfs_bmap_isaeof(bma, whichfork);
4336 error = xfs_bmap_alloc(bma);
4340 if (bma->flist->xbf_low)
4343 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4344 if (bma->blkno == NULLFSBLOCK)
4346 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4347 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4348 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4349 bma->cur->bc_private.b.flist = bma->flist;
4352 * Bump the number of extents we've allocated
4358 bma->cur->bc_private.b.flags =
4359 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
4361 bma->got.br_startoff = bma->offset;
4362 bma->got.br_startblock = bma->blkno;
4363 bma->got.br_blockcount = bma->length;
4364 bma->got.br_state = XFS_EXT_NORM;
4367 * A wasdelay extent has been initialized, so shouldn't be flagged
4370 if (!bma->wasdel && (bma->flags & XFS_BMAPI_PREALLOC) &&
4371 xfs_sb_version_hasextflgbit(&mp->m_sb))
4372 bma->got.br_state = XFS_EXT_UNWRITTEN;
4375 error = xfs_bmap_add_extent_delay_real(bma);
4377 error = xfs_bmap_add_extent_hole_real(bma, whichfork);
4379 bma->logflags |= tmp_logflags;
4384 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4385 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4386 * the neighbouring ones.
4388 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4390 ASSERT(bma->got.br_startoff <= bma->offset);
4391 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4392 bma->offset + bma->length);
4393 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4394 bma->got.br_state == XFS_EXT_UNWRITTEN);
4399 xfs_bmapi_convert_unwritten(
4400 struct xfs_bmalloca *bma,
4401 struct xfs_bmbt_irec *mval,
4405 int whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4406 XFS_ATTR_FORK : XFS_DATA_FORK;
4407 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4408 int tmp_logflags = 0;
4411 /* check if we need to do unwritten->real conversion */
4412 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4413 (flags & XFS_BMAPI_PREALLOC))
4416 /* check if we need to do real->unwritten conversion */
4417 if (mval->br_state == XFS_EXT_NORM &&
4418 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4419 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4423 * Modify (by adding) the state flag, if writing.
4425 ASSERT(mval->br_blockcount <= len);
4426 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4427 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4428 bma->ip, whichfork);
4429 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4430 bma->cur->bc_private.b.flist = bma->flist;
4432 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4433 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4436 * Before insertion into the bmbt, zero the range being converted
4439 if (flags & XFS_BMAPI_ZERO) {
4440 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4441 mval->br_blockcount);
4446 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, &bma->idx,
4447 &bma->cur, mval, bma->firstblock, bma->flist,
4450 * Log the inode core unconditionally in the unwritten extent conversion
4451 * path because the conversion might not have done so (e.g., if the
4452 * extent count hasn't changed). We need to make sure the inode is dirty
4453 * in the transaction for the sake of fsync(), even if nothing has
4454 * changed, because fsync() will not force the log for this transaction
4455 * unless it sees the inode pinned.
4457 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4462 * Update our extent pointer, given that
4463 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4464 * of the neighbouring ones.
4466 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4469 * We may have combined previously unwritten space with written space,
4470 * so generate another request.
4472 if (mval->br_blockcount < len)
4478 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4479 * extent state if necessary. Details behaviour is controlled by the flags
4480 * parameter. Only allocates blocks from a single allocation group, to avoid
4483 * The returned value in "firstblock" from the first call in a transaction
4484 * must be remembered and presented to subsequent calls in "firstblock".
4485 * An upper bound for the number of blocks to be allocated is supplied to
4486 * the first call in "total"; if no allocation group has that many free
4487 * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4491 struct xfs_trans *tp, /* transaction pointer */
4492 struct xfs_inode *ip, /* incore inode */
4493 xfs_fileoff_t bno, /* starting file offs. mapped */
4494 xfs_filblks_t len, /* length to map in file */
4495 int flags, /* XFS_BMAPI_... */
4496 xfs_fsblock_t *firstblock, /* first allocated block
4497 controls a.g. for allocs */
4498 xfs_extlen_t total, /* total blocks needed */
4499 struct xfs_bmbt_irec *mval, /* output: map values */
4500 int *nmap, /* i/o: mval size/count */
4501 struct xfs_bmap_free *flist) /* i/o: list extents to free */
4503 struct xfs_mount *mp = ip->i_mount;
4504 struct xfs_ifork *ifp;
4505 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */
4506 xfs_fileoff_t end; /* end of mapped file region */
4507 int eof; /* after the end of extents */
4508 int error; /* error return */
4509 int n; /* current extent index */
4510 xfs_fileoff_t obno; /* old block number (offset) */
4511 int whichfork; /* data or attr fork */
4512 char inhole; /* current location is hole in file */
4513 char wasdelay; /* old extent was delayed */
4516 xfs_fileoff_t orig_bno; /* original block number value */
4517 int orig_flags; /* original flags arg value */
4518 xfs_filblks_t orig_len; /* original value of len arg */
4519 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4520 int orig_nmap; /* original value of *nmap */
4528 whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4529 XFS_ATTR_FORK : XFS_DATA_FORK;
4532 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4533 ASSERT(!(flags & XFS_BMAPI_IGSTATE));
4536 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
4537 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4539 /* zeroing is for currently only for data extents, not metadata */
4540 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4541 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4543 * we can allocate unwritten extents or pre-zero allocated blocks,
4544 * but it makes no sense to do both at once. This would result in
4545 * zeroing the unwritten extent twice, but it still being an
4546 * unwritten extent....
4548 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4549 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4551 if (unlikely(XFS_TEST_ERROR(
4552 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4553 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4554 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4555 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
4556 return -EFSCORRUPTED;
4559 if (XFS_FORCED_SHUTDOWN(mp))
4562 ifp = XFS_IFORK_PTR(ip, whichfork);
4564 XFS_STATS_INC(mp, xs_blk_mapw);
4566 if (*firstblock == NULLFSBLOCK) {
4567 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
4568 bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
4575 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4576 error = xfs_iread_extents(tp, ip, whichfork);
4581 xfs_bmap_search_extents(ip, bno, whichfork, &eof, &bma.idx, &bma.got,
4592 bma.firstblock = firstblock;
4594 while (bno < end && n < *nmap) {
4595 inhole = eof || bma.got.br_startoff > bno;
4596 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
4599 * First, deal with the hole before the allocated space
4600 * that we found, if any.
4602 if (inhole || wasdelay) {
4604 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4605 bma.wasdel = wasdelay;
4610 * There's a 32/64 bit type mismatch between the
4611 * allocation length request (which can be 64 bits in
4612 * length) and the bma length request, which is
4613 * xfs_extlen_t and therefore 32 bits. Hence we have to
4614 * check for 32-bit overflows and handle them here.
4616 if (len > (xfs_filblks_t)MAXEXTLEN)
4617 bma.length = MAXEXTLEN;
4622 ASSERT(bma.length > 0);
4623 error = xfs_bmapi_allocate(&bma);
4626 if (bma.blkno == NULLFSBLOCK)
4630 /* Deal with the allocated space we found. */
4631 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4634 /* Execute unwritten extent conversion if necessary */
4635 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4636 if (error == -EAGAIN)
4641 /* update the extent map to return */
4642 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4645 * If we're done, stop now. Stop when we've allocated
4646 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4647 * the transaction may get too big.
4649 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4652 /* Else go on to the next record. */
4654 if (++bma.idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)) {
4655 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma.idx),
4663 * Transform from btree to extents, give it cur.
4665 if (xfs_bmap_wants_extents(ip, whichfork)) {
4666 int tmp_logflags = 0;
4669 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur,
4670 &tmp_logflags, whichfork);
4671 bma.logflags |= tmp_logflags;
4676 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
4677 XFS_IFORK_NEXTENTS(ip, whichfork) >
4678 XFS_IFORK_MAXEXT(ip, whichfork));
4682 * Log everything. Do this after conversion, there's no point in
4683 * logging the extent records if we've converted to btree format.
4685 if ((bma.logflags & xfs_ilog_fext(whichfork)) &&
4686 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4687 bma.logflags &= ~xfs_ilog_fext(whichfork);
4688 else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) &&
4689 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
4690 bma.logflags &= ~xfs_ilog_fbroot(whichfork);
4692 * Log whatever the flags say, even if error. Otherwise we might miss
4693 * detecting a case where the data is changed, there's an error,
4694 * and it's not logged so we don't shutdown when we should.
4697 xfs_trans_log_inode(tp, ip, bma.logflags);
4701 ASSERT(*firstblock == NULLFSBLOCK ||
4702 XFS_FSB_TO_AGNO(mp, *firstblock) ==
4704 bma.cur->bc_private.b.firstblock) ||
4706 XFS_FSB_TO_AGNO(mp, *firstblock) <
4708 bma.cur->bc_private.b.firstblock)));
4709 *firstblock = bma.cur->bc_private.b.firstblock;
4711 xfs_btree_del_cursor(bma.cur,
4712 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
4715 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4721 * When a delalloc extent is split (e.g., due to a hole punch), the original
4722 * indlen reservation must be shared across the two new extents that are left
4725 * Given the original reservation and the worst case indlen for the two new
4726 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4727 * reservation fairly across the two new extents. If necessary, steal available
4728 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4729 * ores == 1). The number of stolen blocks is returned. The availability and
4730 * subsequent accounting of stolen blocks is the responsibility of the caller.
4732 static xfs_filblks_t
4733 xfs_bmap_split_indlen(
4734 xfs_filblks_t ores, /* original res. */
4735 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4736 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4737 xfs_filblks_t avail) /* stealable blocks */
4739 xfs_filblks_t len1 = *indlen1;
4740 xfs_filblks_t len2 = *indlen2;
4741 xfs_filblks_t nres = len1 + len2; /* new total res. */
4742 xfs_filblks_t stolen = 0;
4745 * Steal as many blocks as we can to try and satisfy the worst case
4746 * indlen for both new extents.
4748 while (nres > ores && avail) {
4755 * The only blocks available are those reserved for the original
4756 * extent and what we can steal from the extent being removed.
4757 * If this still isn't enough to satisfy the combined
4758 * requirements for the two new extents, skim blocks off of each
4759 * of the new reservations until they match what is available.
4761 while (nres > ores) {
4781 * Called by xfs_bmapi to update file extent records and the btree
4782 * after removing space (or undoing a delayed allocation).
4784 STATIC int /* error */
4785 xfs_bmap_del_extent(
4786 xfs_inode_t *ip, /* incore inode pointer */
4787 xfs_trans_t *tp, /* current transaction pointer */
4788 xfs_extnum_t *idx, /* extent number to update/delete */
4789 xfs_bmap_free_t *flist, /* list of extents to be freed */
4790 xfs_btree_cur_t *cur, /* if null, not a btree */
4791 xfs_bmbt_irec_t *del, /* data to remove from extents */
4792 int *logflagsp, /* inode logging flags */
4793 int whichfork) /* data or attr fork */
4795 xfs_filblks_t da_new; /* new delay-alloc indirect blocks */
4796 xfs_filblks_t da_old; /* old delay-alloc indirect blocks */
4797 xfs_fsblock_t del_endblock=0; /* first block past del */
4798 xfs_fileoff_t del_endoff; /* first offset past del */
4799 int delay; /* current block is delayed allocated */
4800 int do_fx; /* free extent at end of routine */
4801 xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */
4802 int error; /* error return value */
4803 int flags; /* inode logging flags */
4804 xfs_bmbt_irec_t got; /* current extent entry */
4805 xfs_fileoff_t got_endoff; /* first offset past got */
4806 int i; /* temp state */
4807 xfs_ifork_t *ifp; /* inode fork pointer */
4808 xfs_mount_t *mp; /* mount structure */
4809 xfs_filblks_t nblks; /* quota/sb block count */
4810 xfs_bmbt_irec_t new; /* new record to be inserted */
4812 uint qfield; /* quota field to update */
4813 xfs_filblks_t temp; /* for indirect length calculations */
4814 xfs_filblks_t temp2; /* for indirect length calculations */
4818 XFS_STATS_INC(mp, xs_del_exlist);
4820 if (whichfork == XFS_ATTR_FORK)
4821 state |= BMAP_ATTRFORK;
4823 ifp = XFS_IFORK_PTR(ip, whichfork);
4824 ASSERT((*idx >= 0) && (*idx < ifp->if_bytes /
4825 (uint)sizeof(xfs_bmbt_rec_t)));
4826 ASSERT(del->br_blockcount > 0);
4827 ep = xfs_iext_get_ext(ifp, *idx);
4828 xfs_bmbt_get_all(ep, &got);
4829 ASSERT(got.br_startoff <= del->br_startoff);
4830 del_endoff = del->br_startoff + del->br_blockcount;
4831 got_endoff = got.br_startoff + got.br_blockcount;
4832 ASSERT(got_endoff >= del_endoff);
4833 delay = isnullstartblock(got.br_startblock);
4834 ASSERT(isnullstartblock(del->br_startblock) == delay);
4839 * If deleting a real allocation, must free up the disk space.
4842 flags = XFS_ILOG_CORE;
4844 * Realtime allocation. Free it and record di_nblocks update.
4846 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
4850 ASSERT(do_mod(del->br_blockcount,
4851 mp->m_sb.sb_rextsize) == 0);
4852 ASSERT(do_mod(del->br_startblock,
4853 mp->m_sb.sb_rextsize) == 0);
4854 bno = del->br_startblock;
4855 len = del->br_blockcount;
4856 do_div(bno, mp->m_sb.sb_rextsize);
4857 do_div(len, mp->m_sb.sb_rextsize);
4858 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
4862 nblks = len * mp->m_sb.sb_rextsize;
4863 qfield = XFS_TRANS_DQ_RTBCOUNT;
4866 * Ordinary allocation.
4870 nblks = del->br_blockcount;
4871 qfield = XFS_TRANS_DQ_BCOUNT;
4874 * Set up del_endblock and cur for later.
4876 del_endblock = del->br_startblock + del->br_blockcount;
4878 if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
4879 got.br_startblock, got.br_blockcount,
4882 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
4884 da_old = da_new = 0;
4886 da_old = startblockval(got.br_startblock);
4892 * Set flag value to use in switch statement.
4893 * Left-contig is 2, right-contig is 1.
4895 switch (((got.br_startoff == del->br_startoff) << 1) |
4896 (got_endoff == del_endoff)) {
4899 * Matches the whole extent. Delete the entry.
4901 xfs_iext_remove(ip, *idx, 1,
4902 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
4907 XFS_IFORK_NEXT_SET(ip, whichfork,
4908 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
4909 flags |= XFS_ILOG_CORE;
4911 flags |= xfs_ilog_fext(whichfork);
4914 if ((error = xfs_btree_delete(cur, &i)))
4916 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
4921 * Deleting the first part of the extent.
4923 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4924 xfs_bmbt_set_startoff(ep, del_endoff);
4925 temp = got.br_blockcount - del->br_blockcount;
4926 xfs_bmbt_set_blockcount(ep, temp);
4928 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
4930 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
4931 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4935 xfs_bmbt_set_startblock(ep, del_endblock);
4936 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4938 flags |= xfs_ilog_fext(whichfork);
4941 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
4942 got.br_blockcount - del->br_blockcount,
4949 * Deleting the last part of the extent.
4951 temp = got.br_blockcount - del->br_blockcount;
4952 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4953 xfs_bmbt_set_blockcount(ep, temp);
4955 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
4957 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
4958 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4962 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4964 flags |= xfs_ilog_fext(whichfork);
4967 if ((error = xfs_bmbt_update(cur, got.br_startoff,
4969 got.br_blockcount - del->br_blockcount,
4976 * Deleting the middle of the extent.
4978 temp = del->br_startoff - got.br_startoff;
4979 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4980 xfs_bmbt_set_blockcount(ep, temp);
4981 new.br_startoff = del_endoff;
4982 temp2 = got_endoff - del_endoff;
4983 new.br_blockcount = temp2;
4984 new.br_state = got.br_state;
4986 new.br_startblock = del_endblock;
4987 flags |= XFS_ILOG_CORE;
4989 if ((error = xfs_bmbt_update(cur,
4991 got.br_startblock, temp,
4994 if ((error = xfs_btree_increment(cur, 0, &i)))
4996 cur->bc_rec.b = new;
4997 error = xfs_btree_insert(cur, &i);
4998 if (error && error != -ENOSPC)
5001 * If get no-space back from btree insert,
5002 * it tried a split, and we have a zero
5003 * block reservation.
5004 * Fix up our state and return the error.
5006 if (error == -ENOSPC) {
5008 * Reset the cursor, don't trust
5009 * it after any insert operation.
5011 if ((error = xfs_bmbt_lookup_eq(cur,
5016 XFS_WANT_CORRUPTED_GOTO(mp,
5019 * Update the btree record back
5020 * to the original value.
5022 if ((error = xfs_bmbt_update(cur,
5029 * Reset the extent record back
5030 * to the original value.
5032 xfs_bmbt_set_blockcount(ep,
5038 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5040 flags |= xfs_ilog_fext(whichfork);
5041 XFS_IFORK_NEXT_SET(ip, whichfork,
5042 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5044 xfs_filblks_t stolen;
5045 ASSERT(whichfork == XFS_DATA_FORK);
5048 * Distribute the original indlen reservation across the
5049 * two new extents. Steal blocks from the deleted extent
5050 * if necessary. Stealing blocks simply fudges the
5051 * fdblocks accounting in xfs_bunmapi().
5053 temp = xfs_bmap_worst_indlen(ip, got.br_blockcount);
5054 temp2 = xfs_bmap_worst_indlen(ip, new.br_blockcount);
5055 stolen = xfs_bmap_split_indlen(da_old, &temp, &temp2,
5056 del->br_blockcount);
5057 da_new = temp + temp2 - stolen;
5058 del->br_blockcount -= stolen;
5061 * Set the reservation for each extent. Warn if either
5062 * is zero as this can lead to delalloc problems.
5064 WARN_ON_ONCE(!temp || !temp2);
5065 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5066 new.br_startblock = nullstartblock((int)temp2);
5068 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5069 xfs_iext_insert(ip, *idx + 1, 1, &new, state);
5074 * If we need to, add to list of extents to delete.
5077 xfs_bmap_add_free(del->br_startblock, del->br_blockcount, flist,
5080 * Adjust inode # blocks in the file.
5083 ip->i_d.di_nblocks -= nblks;
5085 * Adjust quota data.
5088 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5091 * Account for change in delayed indirect blocks.
5092 * Nothing to do for disk quota accounting here.
5094 ASSERT(da_old >= da_new);
5095 if (da_old > da_new)
5096 xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);
5103 * Unmap (remove) blocks from a file.
5104 * If nexts is nonzero then the number of extents to remove is limited to
5105 * that value. If not all extents in the block range can be removed then
5110 xfs_trans_t *tp, /* transaction pointer */
5111 struct xfs_inode *ip, /* incore inode */
5112 xfs_fileoff_t bno, /* starting offset to unmap */
5113 xfs_filblks_t len, /* length to unmap in file */
5114 int flags, /* misc flags */
5115 xfs_extnum_t nexts, /* number of extents max */
5116 xfs_fsblock_t *firstblock, /* first allocated block
5117 controls a.g. for allocs */
5118 xfs_bmap_free_t *flist, /* i/o: list extents to free */
5119 int *done) /* set if not done yet */
5121 xfs_btree_cur_t *cur; /* bmap btree cursor */
5122 xfs_bmbt_irec_t del; /* extent being deleted */
5123 int eof; /* is deleting at eof */
5124 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
5125 int error; /* error return value */
5126 xfs_extnum_t extno; /* extent number in list */
5127 xfs_bmbt_irec_t got; /* current extent record */
5128 xfs_ifork_t *ifp; /* inode fork pointer */
5129 int isrt; /* freeing in rt area */
5130 xfs_extnum_t lastx; /* last extent index used */
5131 int logflags; /* transaction logging flags */
5132 xfs_extlen_t mod; /* rt extent offset */
5133 xfs_mount_t *mp; /* mount structure */
5134 xfs_extnum_t nextents; /* number of file extents */
5135 xfs_bmbt_irec_t prev; /* previous extent record */
5136 xfs_fileoff_t start; /* first file offset deleted */
5137 int tmp_logflags; /* partial logging flags */
5138 int wasdel; /* was a delayed alloc extent */
5139 int whichfork; /* data or attribute fork */
5142 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
5144 whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
5145 XFS_ATTR_FORK : XFS_DATA_FORK;
5146 ifp = XFS_IFORK_PTR(ip, whichfork);
5148 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5149 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5150 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5152 return -EFSCORRUPTED;
5155 if (XFS_FORCED_SHUTDOWN(mp))
5158 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5162 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5163 (error = xfs_iread_extents(tp, ip, whichfork)))
5165 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5166 if (nextents == 0) {
5170 XFS_STATS_INC(mp, xs_blk_unmap);
5171 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5173 bno = start + len - 1;
5174 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
5178 * Check to see if the given block number is past the end of the
5179 * file, back up to the last block if so...
5182 ep = xfs_iext_get_ext(ifp, --lastx);
5183 xfs_bmbt_get_all(ep, &got);
5184 bno = got.br_startoff + got.br_blockcount - 1;
5187 if (ifp->if_flags & XFS_IFBROOT) {
5188 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5189 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5190 cur->bc_private.b.firstblock = *firstblock;
5191 cur->bc_private.b.flist = flist;
5192 cur->bc_private.b.flags = 0;
5198 * Synchronize by locking the bitmap inode.
5200 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
5201 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5205 while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
5206 (nexts == 0 || extno < nexts)) {
5208 * Is the found extent after a hole in which bno lives?
5209 * Just back up to the previous extent, if so.
5211 if (got.br_startoff > bno) {
5214 ep = xfs_iext_get_ext(ifp, lastx);
5215 xfs_bmbt_get_all(ep, &got);
5218 * Is the last block of this extent before the range
5219 * we're supposed to delete? If so, we're done.
5221 bno = XFS_FILEOFF_MIN(bno,
5222 got.br_startoff + got.br_blockcount - 1);
5226 * Then deal with the (possibly delayed) allocated space
5231 wasdel = isnullstartblock(del.br_startblock);
5232 if (got.br_startoff < start) {
5233 del.br_startoff = start;
5234 del.br_blockcount -= start - got.br_startoff;
5236 del.br_startblock += start - got.br_startoff;
5238 if (del.br_startoff + del.br_blockcount > bno + 1)
5239 del.br_blockcount = bno + 1 - del.br_startoff;
5240 sum = del.br_startblock + del.br_blockcount;
5242 (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
5244 * Realtime extent not lined up at the end.
5245 * The extent could have been split into written
5246 * and unwritten pieces, or we could just be
5247 * unmapping part of it. But we can't really
5248 * get rid of part of a realtime extent.
5250 if (del.br_state == XFS_EXT_UNWRITTEN ||
5251 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5253 * This piece is unwritten, or we're not
5254 * using unwritten extents. Skip over it.
5257 bno -= mod > del.br_blockcount ?
5258 del.br_blockcount : mod;
5259 if (bno < got.br_startoff) {
5261 xfs_bmbt_get_all(xfs_iext_get_ext(
5267 * It's written, turn it unwritten.
5268 * This is better than zeroing it.
5270 ASSERT(del.br_state == XFS_EXT_NORM);
5271 ASSERT(tp->t_blk_res > 0);
5273 * If this spans a realtime extent boundary,
5274 * chop it back to the start of the one we end at.
5276 if (del.br_blockcount > mod) {
5277 del.br_startoff += del.br_blockcount - mod;
5278 del.br_startblock += del.br_blockcount - mod;
5279 del.br_blockcount = mod;
5281 del.br_state = XFS_EXT_UNWRITTEN;
5282 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5283 &lastx, &cur, &del, firstblock, flist,
5289 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
5291 * Realtime extent is lined up at the end but not
5292 * at the front. We'll get rid of full extents if
5295 mod = mp->m_sb.sb_rextsize - mod;
5296 if (del.br_blockcount > mod) {
5297 del.br_blockcount -= mod;
5298 del.br_startoff += mod;
5299 del.br_startblock += mod;
5300 } else if ((del.br_startoff == start &&
5301 (del.br_state == XFS_EXT_UNWRITTEN ||
5302 tp->t_blk_res == 0)) ||
5303 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5305 * Can't make it unwritten. There isn't
5306 * a full extent here so just skip it.
5308 ASSERT(bno >= del.br_blockcount);
5309 bno -= del.br_blockcount;
5310 if (got.br_startoff > bno) {
5312 ep = xfs_iext_get_ext(ifp,
5314 xfs_bmbt_get_all(ep, &got);
5318 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5320 * This one is already unwritten.
5321 * It must have a written left neighbor.
5322 * Unwrite the killed part of that one and
5326 xfs_bmbt_get_all(xfs_iext_get_ext(ifp,
5328 ASSERT(prev.br_state == XFS_EXT_NORM);
5329 ASSERT(!isnullstartblock(prev.br_startblock));
5330 ASSERT(del.br_startblock ==
5331 prev.br_startblock + prev.br_blockcount);
5332 if (prev.br_startoff < start) {
5333 mod = start - prev.br_startoff;
5334 prev.br_blockcount -= mod;
5335 prev.br_startblock += mod;
5336 prev.br_startoff = start;
5338 prev.br_state = XFS_EXT_UNWRITTEN;
5340 error = xfs_bmap_add_extent_unwritten_real(tp,
5341 ip, &lastx, &cur, &prev,
5342 firstblock, flist, &logflags);
5347 ASSERT(del.br_state == XFS_EXT_NORM);
5348 del.br_state = XFS_EXT_UNWRITTEN;
5349 error = xfs_bmap_add_extent_unwritten_real(tp,
5350 ip, &lastx, &cur, &del,
5351 firstblock, flist, &logflags);
5359 * If it's the case where the directory code is running
5360 * with no block reservation, and the deleted block is in
5361 * the middle of its extent, and the resulting insert
5362 * of an extent would cause transformation to btree format,
5363 * then reject it. The calling code will then swap
5364 * blocks around instead.
5365 * We have to do this now, rather than waiting for the
5366 * conversion to btree format, since the transaction
5369 if (!wasdel && tp->t_blk_res == 0 &&
5370 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5371 XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */
5372 XFS_IFORK_MAXEXT(ip, whichfork) &&
5373 del.br_startoff > got.br_startoff &&
5374 del.br_startoff + del.br_blockcount <
5375 got.br_startoff + got.br_blockcount) {
5381 * Unreserve quota and update realtime free space, if
5382 * appropriate. If delayed allocation, update the inode delalloc
5383 * counter now and wait to update the sb counters as
5384 * xfs_bmap_del_extent() might need to borrow some blocks.
5387 ASSERT(startblockval(del.br_startblock) > 0);
5389 xfs_filblks_t rtexts;
5391 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
5392 do_div(rtexts, mp->m_sb.sb_rextsize);
5393 xfs_mod_frextents(mp, (int64_t)rtexts);
5394 (void)xfs_trans_reserve_quota_nblks(NULL,
5395 ip, -((long)del.br_blockcount), 0,
5396 XFS_QMOPT_RES_RTBLKS);
5398 (void)xfs_trans_reserve_quota_nblks(NULL,
5399 ip, -((long)del.br_blockcount), 0,
5400 XFS_QMOPT_RES_REGBLKS);
5402 ip->i_delayed_blks -= del.br_blockcount;
5404 cur->bc_private.b.flags |=
5405 XFS_BTCUR_BPRV_WASDEL;
5407 cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
5409 error = xfs_bmap_del_extent(ip, tp, &lastx, flist, cur, &del,
5410 &tmp_logflags, whichfork);
5411 logflags |= tmp_logflags;
5415 if (!isrt && wasdel)
5416 xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount, false);
5418 bno = del.br_startoff - 1;
5421 * If not done go on to the next (previous) record.
5423 if (bno != (xfs_fileoff_t)-1 && bno >= start) {
5425 ep = xfs_iext_get_ext(ifp, lastx);
5426 if (xfs_bmbt_get_startoff(ep) > bno) {
5428 ep = xfs_iext_get_ext(ifp,
5431 xfs_bmbt_get_all(ep, &got);
5436 *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0;
5439 * Convert to a btree if necessary.
5441 if (xfs_bmap_needs_btree(ip, whichfork)) {
5442 ASSERT(cur == NULL);
5443 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist,
5444 &cur, 0, &tmp_logflags, whichfork);
5445 logflags |= tmp_logflags;
5450 * transform from btree to extents, give it cur
5452 else if (xfs_bmap_wants_extents(ip, whichfork)) {
5453 ASSERT(cur != NULL);
5454 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5456 logflags |= tmp_logflags;
5461 * transform from extents to local?
5466 * Log everything. Do this after conversion, there's no point in
5467 * logging the extent records if we've converted to btree format.
5469 if ((logflags & xfs_ilog_fext(whichfork)) &&
5470 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5471 logflags &= ~xfs_ilog_fext(whichfork);
5472 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5473 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5474 logflags &= ~xfs_ilog_fbroot(whichfork);
5476 * Log inode even in the error case, if the transaction
5477 * is dirty we'll need to shut down the filesystem.
5480 xfs_trans_log_inode(tp, ip, logflags);
5483 *firstblock = cur->bc_private.b.firstblock;
5484 cur->bc_private.b.allocated = 0;
5486 xfs_btree_del_cursor(cur,
5487 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5493 * Determine whether an extent shift can be accomplished by a merge with the
5494 * extent that precedes the target hole of the shift.
5498 struct xfs_bmbt_irec *left, /* preceding extent */
5499 struct xfs_bmbt_irec *got, /* current extent to shift */
5500 xfs_fileoff_t shift) /* shift fsb */
5502 xfs_fileoff_t startoff;
5504 startoff = got->br_startoff - shift;
5507 * The extent, once shifted, must be adjacent in-file and on-disk with
5508 * the preceding extent.
5510 if ((left->br_startoff + left->br_blockcount != startoff) ||
5511 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5512 (left->br_state != got->br_state) ||
5513 (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
5520 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5521 * hole in the file. If an extent shift would result in the extent being fully
5522 * adjacent to the extent that currently precedes the hole, we can merge with
5523 * the preceding extent rather than do the shift.
5525 * This function assumes the caller has verified a shift-by-merge is possible
5526 * with the provided extents via xfs_bmse_can_merge().
5530 struct xfs_inode *ip,
5532 xfs_fileoff_t shift, /* shift fsb */
5533 int current_ext, /* idx of gotp */
5534 struct xfs_bmbt_rec_host *gotp, /* extent to shift */
5535 struct xfs_bmbt_rec_host *leftp, /* preceding extent */
5536 struct xfs_btree_cur *cur,
5537 int *logflags) /* output */
5539 struct xfs_bmbt_irec got;
5540 struct xfs_bmbt_irec left;
5541 xfs_filblks_t blockcount;
5543 struct xfs_mount *mp = ip->i_mount;
5545 xfs_bmbt_get_all(gotp, &got);
5546 xfs_bmbt_get_all(leftp, &left);
5547 blockcount = left.br_blockcount + got.br_blockcount;
5549 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5550 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5551 ASSERT(xfs_bmse_can_merge(&left, &got, shift));
5554 * Merge the in-core extents. Note that the host record pointers and
5555 * current_ext index are invalid once the extent has been removed via
5556 * xfs_iext_remove().
5558 xfs_bmbt_set_blockcount(leftp, blockcount);
5559 xfs_iext_remove(ip, current_ext, 1, 0);
5562 * Update the on-disk extent count, the btree if necessary and log the
5565 XFS_IFORK_NEXT_SET(ip, whichfork,
5566 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5567 *logflags |= XFS_ILOG_CORE;
5569 *logflags |= XFS_ILOG_DEXT;
5573 /* lookup and remove the extent to merge */
5574 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
5575 got.br_blockcount, &i);
5578 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5580 error = xfs_btree_delete(cur, &i);
5583 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5585 /* lookup and update size of the previous extent */
5586 error = xfs_bmbt_lookup_eq(cur, left.br_startoff, left.br_startblock,
5587 left.br_blockcount, &i);
5590 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5592 left.br_blockcount = blockcount;
5594 return xfs_bmbt_update(cur, left.br_startoff, left.br_startblock,
5595 left.br_blockcount, left.br_state);
5599 * Shift a single extent.
5603 struct xfs_inode *ip,
5605 xfs_fileoff_t offset_shift_fsb,
5607 struct xfs_bmbt_rec_host *gotp,
5608 struct xfs_btree_cur *cur,
5610 enum shift_direction direction)
5612 struct xfs_ifork *ifp;
5613 struct xfs_mount *mp;
5614 xfs_fileoff_t startoff;
5615 struct xfs_bmbt_rec_host *adj_irecp;
5616 struct xfs_bmbt_irec got;
5617 struct xfs_bmbt_irec adj_irec;
5623 ifp = XFS_IFORK_PTR(ip, whichfork);
5624 total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
5626 xfs_bmbt_get_all(gotp, &got);
5628 /* delalloc extents should be prevented by caller */
5629 XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock));
5631 if (direction == SHIFT_LEFT) {
5632 startoff = got.br_startoff - offset_shift_fsb;
5635 * Check for merge if we've got an extent to the left,
5636 * otherwise make sure there's enough room at the start
5637 * of the file for the shift.
5639 if (!*current_ext) {
5640 if (got.br_startoff < offset_shift_fsb)
5642 goto update_current_ext;
5645 * grab the left extent and check for a large
5648 adj_irecp = xfs_iext_get_ext(ifp, *current_ext - 1);
5649 xfs_bmbt_get_all(adj_irecp, &adj_irec);
5652 adj_irec.br_startoff + adj_irec.br_blockcount)
5655 /* check whether to merge the extent or shift it down */
5656 if (xfs_bmse_can_merge(&adj_irec, &got,
5657 offset_shift_fsb)) {
5658 return xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
5659 *current_ext, gotp, adj_irecp,
5663 startoff = got.br_startoff + offset_shift_fsb;
5664 /* nothing to move if this is the last extent */
5665 if (*current_ext >= (total_extents - 1))
5666 goto update_current_ext;
5668 * If this is not the last extent in the file, make sure there
5669 * is enough room between current extent and next extent for
5670 * accommodating the shift.
5672 adj_irecp = xfs_iext_get_ext(ifp, *current_ext + 1);
5673 xfs_bmbt_get_all(adj_irecp, &adj_irec);
5674 if (startoff + got.br_blockcount > adj_irec.br_startoff)
5677 * Unlike a left shift (which involves a hole punch),
5678 * a right shift does not modify extent neighbors
5679 * in any way. We should never find mergeable extents
5680 * in this scenario. Check anyways and warn if we
5681 * encounter two extents that could be one.
5683 if (xfs_bmse_can_merge(&got, &adj_irec, offset_shift_fsb))
5687 * Increment the extent index for the next iteration, update the start
5688 * offset of the in-core extent and update the btree if applicable.
5691 if (direction == SHIFT_LEFT)
5695 xfs_bmbt_set_startoff(gotp, startoff);
5696 *logflags |= XFS_ILOG_CORE;
5698 *logflags |= XFS_ILOG_DEXT;
5702 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
5703 got.br_blockcount, &i);
5706 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5708 got.br_startoff = startoff;
5709 return xfs_bmbt_update(cur, got.br_startoff, got.br_startblock,
5710 got.br_blockcount, got.br_state);
5714 * Shift extent records to the left/right to cover/create a hole.
5716 * The maximum number of extents to be shifted in a single operation is
5717 * @num_exts. @stop_fsb specifies the file offset at which to stop shift and the
5718 * file offset where we've left off is returned in @next_fsb. @offset_shift_fsb
5719 * is the length by which each extent is shifted. If there is no hole to shift
5720 * the extents into, this will be considered invalid operation and we abort
5724 xfs_bmap_shift_extents(
5725 struct xfs_trans *tp,
5726 struct xfs_inode *ip,
5727 xfs_fileoff_t *next_fsb,
5728 xfs_fileoff_t offset_shift_fsb,
5730 xfs_fileoff_t stop_fsb,
5731 xfs_fsblock_t *firstblock,
5732 struct xfs_bmap_free *flist,
5733 enum shift_direction direction,
5736 struct xfs_btree_cur *cur = NULL;
5737 struct xfs_bmbt_rec_host *gotp;
5738 struct xfs_bmbt_irec got;
5739 struct xfs_mount *mp = ip->i_mount;
5740 struct xfs_ifork *ifp;
5741 xfs_extnum_t nexts = 0;
5742 xfs_extnum_t current_ext;
5743 xfs_extnum_t total_extents;
5744 xfs_extnum_t stop_extent;
5746 int whichfork = XFS_DATA_FORK;
5749 if (unlikely(XFS_TEST_ERROR(
5750 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5751 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5752 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
5753 XFS_ERROR_REPORT("xfs_bmap_shift_extents",
5754 XFS_ERRLEVEL_LOW, mp);
5755 return -EFSCORRUPTED;
5758 if (XFS_FORCED_SHUTDOWN(mp))
5761 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5762 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5763 ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
5764 ASSERT(*next_fsb != NULLFSBLOCK || direction == SHIFT_RIGHT);
5766 ifp = XFS_IFORK_PTR(ip, whichfork);
5767 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5768 /* Read in all the extents */
5769 error = xfs_iread_extents(tp, ip, whichfork);
5774 if (ifp->if_flags & XFS_IFBROOT) {
5775 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5776 cur->bc_private.b.firstblock = *firstblock;
5777 cur->bc_private.b.flist = flist;
5778 cur->bc_private.b.flags = 0;
5782 * There may be delalloc extents in the data fork before the range we
5783 * are collapsing out, so we cannot use the count of real extents here.
5784 * Instead we have to calculate it from the incore fork.
5786 total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
5787 if (total_extents == 0) {
5793 * In case of first right shift, we need to initialize next_fsb
5795 if (*next_fsb == NULLFSBLOCK) {
5796 gotp = xfs_iext_get_ext(ifp, total_extents - 1);
5797 xfs_bmbt_get_all(gotp, &got);
5798 *next_fsb = got.br_startoff;
5799 if (stop_fsb > *next_fsb) {
5805 /* Lookup the extent index at which we have to stop */
5806 if (direction == SHIFT_RIGHT) {
5807 gotp = xfs_iext_bno_to_ext(ifp, stop_fsb, &stop_extent);
5808 /* Make stop_extent exclusive of shift range */
5811 stop_extent = total_extents;
5814 * Look up the extent index for the fsb where we start shifting. We can
5815 * henceforth iterate with current_ext as extent list changes are locked
5818 * gotp can be null in 2 cases: 1) if there are no extents or 2)
5819 * *next_fsb lies in a hole beyond which there are no extents. Either
5822 gotp = xfs_iext_bno_to_ext(ifp, *next_fsb, ¤t_ext);
5828 /* some sanity checking before we finally start shifting extents */
5829 if ((direction == SHIFT_LEFT && current_ext >= stop_extent) ||
5830 (direction == SHIFT_RIGHT && current_ext <= stop_extent)) {
5835 while (nexts++ < num_exts) {
5836 error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb,
5837 ¤t_ext, gotp, cur, &logflags,
5842 * If there was an extent merge during the shift, the extent
5843 * count can change. Update the total and grade the next record.
5845 if (direction == SHIFT_LEFT) {
5846 total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
5847 stop_extent = total_extents;
5850 if (current_ext == stop_extent) {
5852 *next_fsb = NULLFSBLOCK;
5855 gotp = xfs_iext_get_ext(ifp, current_ext);
5859 xfs_bmbt_get_all(gotp, &got);
5860 *next_fsb = got.br_startoff;
5865 xfs_btree_del_cursor(cur,
5866 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5869 xfs_trans_log_inode(tp, ip, logflags);
5875 * Splits an extent into two extents at split_fsb block such that it is
5876 * the first block of the current_ext. @current_ext is a target extent
5877 * to be split. @split_fsb is a block where the extents is split.
5878 * If split_fsb lies in a hole or the first block of extents, just return 0.
5881 xfs_bmap_split_extent_at(
5882 struct xfs_trans *tp,
5883 struct xfs_inode *ip,
5884 xfs_fileoff_t split_fsb,
5885 xfs_fsblock_t *firstfsb,
5886 struct xfs_bmap_free *free_list)
5888 int whichfork = XFS_DATA_FORK;
5889 struct xfs_btree_cur *cur = NULL;
5890 struct xfs_bmbt_rec_host *gotp;
5891 struct xfs_bmbt_irec got;
5892 struct xfs_bmbt_irec new; /* split extent */
5893 struct xfs_mount *mp = ip->i_mount;
5894 struct xfs_ifork *ifp;
5895 xfs_fsblock_t gotblkcnt; /* new block count for got */
5896 xfs_extnum_t current_ext;
5901 if (unlikely(XFS_TEST_ERROR(
5902 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5903 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5904 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
5905 XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
5906 XFS_ERRLEVEL_LOW, mp);
5907 return -EFSCORRUPTED;
5910 if (XFS_FORCED_SHUTDOWN(mp))
5913 ifp = XFS_IFORK_PTR(ip, whichfork);
5914 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5915 /* Read in all the extents */
5916 error = xfs_iread_extents(tp, ip, whichfork);
5922 * gotp can be null in 2 cases: 1) if there are no extents
5923 * or 2) split_fsb lies in a hole beyond which there are
5924 * no extents. Either way, we are done.
5926 gotp = xfs_iext_bno_to_ext(ifp, split_fsb, ¤t_ext);
5930 xfs_bmbt_get_all(gotp, &got);
5933 * Check split_fsb lies in a hole or the start boundary offset
5936 if (got.br_startoff >= split_fsb)
5939 gotblkcnt = split_fsb - got.br_startoff;
5940 new.br_startoff = split_fsb;
5941 new.br_startblock = got.br_startblock + gotblkcnt;
5942 new.br_blockcount = got.br_blockcount - gotblkcnt;
5943 new.br_state = got.br_state;
5945 if (ifp->if_flags & XFS_IFBROOT) {
5946 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5947 cur->bc_private.b.firstblock = *firstfsb;
5948 cur->bc_private.b.flist = free_list;
5949 cur->bc_private.b.flags = 0;
5950 error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
5956 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
5959 xfs_bmbt_set_blockcount(gotp, gotblkcnt);
5960 got.br_blockcount = gotblkcnt;
5962 logflags = XFS_ILOG_CORE;
5964 error = xfs_bmbt_update(cur, got.br_startoff,
5971 logflags |= XFS_ILOG_DEXT;
5973 /* Add new extent */
5975 xfs_iext_insert(ip, current_ext, 1, &new, 0);
5976 XFS_IFORK_NEXT_SET(ip, whichfork,
5977 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5980 error = xfs_bmbt_lookup_eq(cur, new.br_startoff,
5981 new.br_startblock, new.br_blockcount,
5985 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
5986 cur->bc_rec.b.br_state = new.br_state;
5988 error = xfs_btree_insert(cur, &i);
5991 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
5995 * Convert to a btree if necessary.
5997 if (xfs_bmap_needs_btree(ip, whichfork)) {
5998 int tmp_logflags; /* partial log flag return val */
6000 ASSERT(cur == NULL);
6001 error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, free_list,
6002 &cur, 0, &tmp_logflags, whichfork);
6003 logflags |= tmp_logflags;
6008 cur->bc_private.b.allocated = 0;
6009 xfs_btree_del_cursor(cur,
6010 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
6014 xfs_trans_log_inode(tp, ip, logflags);
6019 xfs_bmap_split_extent(
6020 struct xfs_inode *ip,
6021 xfs_fileoff_t split_fsb)
6023 struct xfs_mount *mp = ip->i_mount;
6024 struct xfs_trans *tp;
6025 struct xfs_bmap_free free_list;
6026 xfs_fsblock_t firstfsb;
6029 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
6030 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
6031 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0);
6033 xfs_trans_cancel(tp);
6037 xfs_ilock(ip, XFS_ILOCK_EXCL);
6038 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
6040 xfs_bmap_init(&free_list, &firstfsb);
6042 error = xfs_bmap_split_extent_at(tp, ip, split_fsb,
6043 &firstfsb, &free_list);
6047 error = xfs_bmap_finish(&tp, &free_list, NULL);
6051 return xfs_trans_commit(tp);
6054 xfs_bmap_cancel(&free_list);
6055 xfs_trans_cancel(tp);