1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2013 Red Hat, Inc.
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_inode.h"
17 #include "xfs_dir2_priv.h"
18 #include "xfs_trans.h"
20 #include "xfs_attr_leaf.h"
21 #include "xfs_error.h"
22 #include "xfs_trace.h"
23 #include "xfs_buf_item.h"
25 #include "xfs_errortag.h"
30 * Routines to implement directories as Btrees of hashed names.
33 /*========================================================================
34 * Function prototypes for the kernel.
35 *========================================================================*/
38 * Routines used for growing the Btree.
40 STATIC int xfs_da3_root_split(xfs_da_state_t *state,
41 xfs_da_state_blk_t *existing_root,
42 xfs_da_state_blk_t *new_child);
43 STATIC int xfs_da3_node_split(xfs_da_state_t *state,
44 xfs_da_state_blk_t *existing_blk,
45 xfs_da_state_blk_t *split_blk,
46 xfs_da_state_blk_t *blk_to_add,
49 STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state,
50 xfs_da_state_blk_t *node_blk_1,
51 xfs_da_state_blk_t *node_blk_2);
52 STATIC void xfs_da3_node_add(xfs_da_state_t *state,
53 xfs_da_state_blk_t *old_node_blk,
54 xfs_da_state_blk_t *new_node_blk);
57 * Routines used for shrinking the Btree.
59 STATIC int xfs_da3_root_join(xfs_da_state_t *state,
60 xfs_da_state_blk_t *root_blk);
61 STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval);
62 STATIC void xfs_da3_node_remove(xfs_da_state_t *state,
63 xfs_da_state_blk_t *drop_blk);
64 STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state,
65 xfs_da_state_blk_t *src_node_blk,
66 xfs_da_state_blk_t *dst_node_blk);
71 STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state,
72 xfs_da_state_blk_t *drop_blk,
73 xfs_da_state_blk_t *save_blk);
76 struct kmem_cache *xfs_da_state_cache; /* anchor for dir/attr state */
79 * Allocate a dir-state structure.
80 * We don't put them on the stack since they're large.
84 struct xfs_da_args *args)
86 struct xfs_da_state *state;
88 state = kmem_cache_zalloc(xfs_da_state_cache, GFP_NOFS | __GFP_NOFAIL);
90 state->mp = args->dp->i_mount;
95 * Kill the altpath contents of a da-state structure.
98 xfs_da_state_kill_altpath(xfs_da_state_t *state)
102 for (i = 0; i < state->altpath.active; i++)
103 state->altpath.blk[i].bp = NULL;
104 state->altpath.active = 0;
108 * Free a da-state structure.
111 xfs_da_state_free(xfs_da_state_t *state)
113 xfs_da_state_kill_altpath(state);
115 memset((char *)state, 0, sizeof(*state));
117 kmem_cache_free(xfs_da_state_cache, state);
122 struct xfs_da_state *state,
123 struct xfs_da_args *args)
125 xfs_da_state_kill_altpath(state);
126 memset(state, 0, sizeof(struct xfs_da_state));
128 state->mp = state->args->dp->i_mount;
131 static inline int xfs_dabuf_nfsb(struct xfs_mount *mp, int whichfork)
133 if (whichfork == XFS_DATA_FORK)
134 return mp->m_dir_geo->fsbcount;
135 return mp->m_attr_geo->fsbcount;
139 xfs_da3_node_hdr_from_disk(
140 struct xfs_mount *mp,
141 struct xfs_da3_icnode_hdr *to,
142 struct xfs_da_intnode *from)
144 if (xfs_has_crc(mp)) {
145 struct xfs_da3_intnode *from3 = (struct xfs_da3_intnode *)from;
147 to->forw = be32_to_cpu(from3->hdr.info.hdr.forw);
148 to->back = be32_to_cpu(from3->hdr.info.hdr.back);
149 to->magic = be16_to_cpu(from3->hdr.info.hdr.magic);
150 to->count = be16_to_cpu(from3->hdr.__count);
151 to->level = be16_to_cpu(from3->hdr.__level);
152 to->btree = from3->__btree;
153 ASSERT(to->magic == XFS_DA3_NODE_MAGIC);
155 to->forw = be32_to_cpu(from->hdr.info.forw);
156 to->back = be32_to_cpu(from->hdr.info.back);
157 to->magic = be16_to_cpu(from->hdr.info.magic);
158 to->count = be16_to_cpu(from->hdr.__count);
159 to->level = be16_to_cpu(from->hdr.__level);
160 to->btree = from->__btree;
161 ASSERT(to->magic == XFS_DA_NODE_MAGIC);
166 xfs_da3_node_hdr_to_disk(
167 struct xfs_mount *mp,
168 struct xfs_da_intnode *to,
169 struct xfs_da3_icnode_hdr *from)
171 if (xfs_has_crc(mp)) {
172 struct xfs_da3_intnode *to3 = (struct xfs_da3_intnode *)to;
174 ASSERT(from->magic == XFS_DA3_NODE_MAGIC);
175 to3->hdr.info.hdr.forw = cpu_to_be32(from->forw);
176 to3->hdr.info.hdr.back = cpu_to_be32(from->back);
177 to3->hdr.info.hdr.magic = cpu_to_be16(from->magic);
178 to3->hdr.__count = cpu_to_be16(from->count);
179 to3->hdr.__level = cpu_to_be16(from->level);
181 ASSERT(from->magic == XFS_DA_NODE_MAGIC);
182 to->hdr.info.forw = cpu_to_be32(from->forw);
183 to->hdr.info.back = cpu_to_be32(from->back);
184 to->hdr.info.magic = cpu_to_be16(from->magic);
185 to->hdr.__count = cpu_to_be16(from->count);
186 to->hdr.__level = cpu_to_be16(from->level);
191 * Verify an xfs_da3_blkinfo structure. Note that the da3 fields are only
192 * accessible on v5 filesystems. This header format is common across da node,
193 * attr leaf and dir leaf blocks.
196 xfs_da3_blkinfo_verify(
198 struct xfs_da3_blkinfo *hdr3)
200 struct xfs_mount *mp = bp->b_mount;
201 struct xfs_da_blkinfo *hdr = &hdr3->hdr;
203 if (!xfs_verify_magic16(bp, hdr->magic))
204 return __this_address;
206 if (xfs_has_crc(mp)) {
207 if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
208 return __this_address;
209 if (be64_to_cpu(hdr3->blkno) != xfs_buf_daddr(bp))
210 return __this_address;
211 if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn)))
212 return __this_address;
218 static xfs_failaddr_t
222 struct xfs_mount *mp = bp->b_mount;
223 struct xfs_da_intnode *hdr = bp->b_addr;
224 struct xfs_da3_icnode_hdr ichdr;
227 xfs_da3_node_hdr_from_disk(mp, &ichdr, hdr);
229 fa = xfs_da3_blkinfo_verify(bp, bp->b_addr);
233 if (ichdr.level == 0)
234 return __this_address;
235 if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
236 return __this_address;
237 if (ichdr.count == 0)
238 return __this_address;
241 * we don't know if the node is for and attribute or directory tree,
242 * so only fail if the count is outside both bounds
244 if (ichdr.count > mp->m_dir_geo->node_ents &&
245 ichdr.count > mp->m_attr_geo->node_ents)
246 return __this_address;
248 /* XXX: hash order check? */
254 xfs_da3_node_write_verify(
257 struct xfs_mount *mp = bp->b_mount;
258 struct xfs_buf_log_item *bip = bp->b_log_item;
259 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
262 fa = xfs_da3_node_verify(bp);
264 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
268 if (!xfs_has_crc(mp))
272 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
274 xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF);
278 * leaf/node format detection on trees is sketchy, so a node read can be done on
279 * leaf level blocks when detection identifies the tree as a node format tree
280 * incorrectly. In this case, we need to swap the verifier to match the correct
281 * format of the block being read.
284 xfs_da3_node_read_verify(
287 struct xfs_da_blkinfo *info = bp->b_addr;
290 switch (be16_to_cpu(info->magic)) {
291 case XFS_DA3_NODE_MAGIC:
292 if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) {
293 xfs_verifier_error(bp, -EFSBADCRC,
298 case XFS_DA_NODE_MAGIC:
299 fa = xfs_da3_node_verify(bp);
301 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
303 case XFS_ATTR_LEAF_MAGIC:
304 case XFS_ATTR3_LEAF_MAGIC:
305 bp->b_ops = &xfs_attr3_leaf_buf_ops;
306 bp->b_ops->verify_read(bp);
308 case XFS_DIR2_LEAFN_MAGIC:
309 case XFS_DIR3_LEAFN_MAGIC:
310 bp->b_ops = &xfs_dir3_leafn_buf_ops;
311 bp->b_ops->verify_read(bp);
314 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
319 /* Verify the structure of a da3 block. */
320 static xfs_failaddr_t
321 xfs_da3_node_verify_struct(
324 struct xfs_da_blkinfo *info = bp->b_addr;
326 switch (be16_to_cpu(info->magic)) {
327 case XFS_DA3_NODE_MAGIC:
328 case XFS_DA_NODE_MAGIC:
329 return xfs_da3_node_verify(bp);
330 case XFS_ATTR_LEAF_MAGIC:
331 case XFS_ATTR3_LEAF_MAGIC:
332 bp->b_ops = &xfs_attr3_leaf_buf_ops;
333 return bp->b_ops->verify_struct(bp);
334 case XFS_DIR2_LEAFN_MAGIC:
335 case XFS_DIR3_LEAFN_MAGIC:
336 bp->b_ops = &xfs_dir3_leafn_buf_ops;
337 return bp->b_ops->verify_struct(bp);
339 return __this_address;
343 const struct xfs_buf_ops xfs_da3_node_buf_ops = {
344 .name = "xfs_da3_node",
345 .magic16 = { cpu_to_be16(XFS_DA_NODE_MAGIC),
346 cpu_to_be16(XFS_DA3_NODE_MAGIC) },
347 .verify_read = xfs_da3_node_read_verify,
348 .verify_write = xfs_da3_node_write_verify,
349 .verify_struct = xfs_da3_node_verify_struct,
353 xfs_da3_node_set_type(
354 struct xfs_trans *tp,
357 struct xfs_da_blkinfo *info = bp->b_addr;
359 switch (be16_to_cpu(info->magic)) {
360 case XFS_DA_NODE_MAGIC:
361 case XFS_DA3_NODE_MAGIC:
362 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
364 case XFS_ATTR_LEAF_MAGIC:
365 case XFS_ATTR3_LEAF_MAGIC:
366 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_ATTR_LEAF_BUF);
368 case XFS_DIR2_LEAFN_MAGIC:
369 case XFS_DIR3_LEAFN_MAGIC:
370 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
373 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, tp->t_mountp,
374 info, sizeof(*info));
375 xfs_trans_brelse(tp, bp);
376 return -EFSCORRUPTED;
382 struct xfs_trans *tp,
383 struct xfs_inode *dp,
385 struct xfs_buf **bpp,
390 error = xfs_da_read_buf(tp, dp, bno, 0, bpp, whichfork,
391 &xfs_da3_node_buf_ops);
392 if (error || !*bpp || !tp)
394 return xfs_da3_node_set_type(tp, *bpp);
398 xfs_da3_node_read_mapped(
399 struct xfs_trans *tp,
400 struct xfs_inode *dp,
401 xfs_daddr_t mappedbno,
402 struct xfs_buf **bpp,
405 struct xfs_mount *mp = dp->i_mount;
408 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, mappedbno,
409 XFS_FSB_TO_BB(mp, xfs_dabuf_nfsb(mp, whichfork)), 0,
410 bpp, &xfs_da3_node_buf_ops);
414 if (whichfork == XFS_ATTR_FORK)
415 xfs_buf_set_ref(*bpp, XFS_ATTR_BTREE_REF);
417 xfs_buf_set_ref(*bpp, XFS_DIR_BTREE_REF);
421 return xfs_da3_node_set_type(tp, *bpp);
424 /*========================================================================
425 * Routines used for growing the Btree.
426 *========================================================================*/
429 * Create the initial contents of an intermediate node.
433 struct xfs_da_args *args,
436 struct xfs_buf **bpp,
439 struct xfs_da_intnode *node;
440 struct xfs_trans *tp = args->trans;
441 struct xfs_mount *mp = tp->t_mountp;
442 struct xfs_da3_icnode_hdr ichdr = {0};
445 struct xfs_inode *dp = args->dp;
447 trace_xfs_da_node_create(args);
448 ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
450 error = xfs_da_get_buf(tp, dp, blkno, &bp, whichfork);
453 bp->b_ops = &xfs_da3_node_buf_ops;
454 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
457 if (xfs_has_crc(mp)) {
458 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
460 memset(hdr3, 0, sizeof(struct xfs_da3_node_hdr));
461 ichdr.magic = XFS_DA3_NODE_MAGIC;
462 hdr3->info.blkno = cpu_to_be64(xfs_buf_daddr(bp));
463 hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
464 uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid);
466 ichdr.magic = XFS_DA_NODE_MAGIC;
470 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &ichdr);
471 xfs_trans_log_buf(tp, bp,
472 XFS_DA_LOGRANGE(node, &node->hdr, args->geo->node_hdr_size));
479 * Split a leaf node, rebalance, then possibly split
480 * intermediate nodes, rebalance, etc.
484 struct xfs_da_state *state)
486 struct xfs_da_state_blk *oldblk;
487 struct xfs_da_state_blk *newblk;
488 struct xfs_da_state_blk *addblk;
489 struct xfs_da_intnode *node;
495 trace_xfs_da_split(state->args);
497 if (XFS_TEST_ERROR(false, state->mp, XFS_ERRTAG_DA_LEAF_SPLIT))
501 * Walk back up the tree splitting/inserting/adjusting as necessary.
502 * If we need to insert and there isn't room, split the node, then
503 * decide which fragment to insert the new block from below into.
504 * Note that we may split the root this way, but we need more fixup.
506 max = state->path.active - 1;
507 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
508 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
509 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
511 addblk = &state->path.blk[max]; /* initial dummy value */
512 for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
513 oldblk = &state->path.blk[i];
514 newblk = &state->altpath.blk[i];
517 * If a leaf node then
518 * Allocate a new leaf node, then rebalance across them.
519 * else if an intermediate node then
520 * We split on the last layer, must we split the node?
522 switch (oldblk->magic) {
523 case XFS_ATTR_LEAF_MAGIC:
524 error = xfs_attr3_leaf_split(state, oldblk, newblk);
525 if ((error != 0) && (error != -ENOSPC)) {
526 return error; /* GROT: attr is inconsistent */
533 * Entry wouldn't fit, split the leaf again. The new
534 * extrablk will be consumed by xfs_da3_node_split if
537 state->extravalid = 1;
539 state->extraafter = 0; /* before newblk */
540 trace_xfs_attr_leaf_split_before(state->args);
541 error = xfs_attr3_leaf_split(state, oldblk,
544 state->extraafter = 1; /* after newblk */
545 trace_xfs_attr_leaf_split_after(state->args);
546 error = xfs_attr3_leaf_split(state, newblk,
550 return error; /* GROT: attr inconsistent */
553 case XFS_DIR2_LEAFN_MAGIC:
554 error = xfs_dir2_leafn_split(state, oldblk, newblk);
559 case XFS_DA_NODE_MAGIC:
560 error = xfs_da3_node_split(state, oldblk, newblk, addblk,
564 return error; /* GROT: dir is inconsistent */
566 * Record the newly split block for the next time thru?
576 * Update the btree to show the new hashval for this child.
578 xfs_da3_fixhashpath(state, &state->path);
584 * xfs_da3_node_split() should have consumed any extra blocks we added
585 * during a double leaf split in the attr fork. This is guaranteed as
586 * we can't be here if the attr fork only has a single leaf block.
588 ASSERT(state->extravalid == 0 ||
589 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
592 * Split the root node.
594 ASSERT(state->path.active == 0);
595 oldblk = &state->path.blk[0];
596 error = xfs_da3_root_split(state, oldblk, addblk);
601 * Update pointers to the node which used to be block 0 and just got
602 * bumped because of the addition of a new root node. Note that the
603 * original block 0 could be at any position in the list of blocks in
606 * Note: the magic numbers and sibling pointers are in the same physical
607 * place for both v2 and v3 headers (by design). Hence it doesn't matter
608 * which version of the xfs_da_intnode structure we use here as the
609 * result will be the same using either structure.
611 node = oldblk->bp->b_addr;
612 if (node->hdr.info.forw) {
613 if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
614 xfs_buf_mark_corrupt(oldblk->bp);
615 error = -EFSCORRUPTED;
618 node = addblk->bp->b_addr;
619 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
620 xfs_trans_log_buf(state->args->trans, addblk->bp,
621 XFS_DA_LOGRANGE(node, &node->hdr.info,
622 sizeof(node->hdr.info)));
624 node = oldblk->bp->b_addr;
625 if (node->hdr.info.back) {
626 if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
627 xfs_buf_mark_corrupt(oldblk->bp);
628 error = -EFSCORRUPTED;
631 node = addblk->bp->b_addr;
632 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
633 xfs_trans_log_buf(state->args->trans, addblk->bp,
634 XFS_DA_LOGRANGE(node, &node->hdr.info,
635 sizeof(node->hdr.info)));
643 * Split the root. We have to create a new root and point to the two
644 * parts (the split old root) that we just created. Copy block zero to
645 * the EOF, extending the inode in process.
647 STATIC int /* error */
649 struct xfs_da_state *state,
650 struct xfs_da_state_blk *blk1,
651 struct xfs_da_state_blk *blk2)
653 struct xfs_da_intnode *node;
654 struct xfs_da_intnode *oldroot;
655 struct xfs_da_node_entry *btree;
656 struct xfs_da3_icnode_hdr nodehdr;
657 struct xfs_da_args *args;
659 struct xfs_inode *dp;
660 struct xfs_trans *tp;
661 struct xfs_dir2_leaf *leaf;
667 trace_xfs_da_root_split(state->args);
670 * Copy the existing (incorrect) block from the root node position
671 * to a free space somewhere.
674 error = xfs_da_grow_inode(args, &blkno);
680 error = xfs_da_get_buf(tp, dp, blkno, &bp, args->whichfork);
684 oldroot = blk1->bp->b_addr;
685 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
686 oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
687 struct xfs_da3_icnode_hdr icnodehdr;
689 xfs_da3_node_hdr_from_disk(dp->i_mount, &icnodehdr, oldroot);
690 btree = icnodehdr.btree;
691 size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot);
692 level = icnodehdr.level;
695 * we are about to copy oldroot to bp, so set up the type
696 * of bp while we know exactly what it will be.
698 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
700 struct xfs_dir3_icleaf_hdr leafhdr;
702 leaf = (xfs_dir2_leaf_t *)oldroot;
703 xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf);
705 ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
706 leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
707 size = (int)((char *)&leafhdr.ents[leafhdr.count] -
712 * we are about to copy oldroot to bp, so set up the type
713 * of bp while we know exactly what it will be.
715 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
719 * we can copy most of the information in the node from one block to
720 * another, but for CRC enabled headers we have to make sure that the
721 * block specific identifiers are kept intact. We update the buffer
724 memcpy(node, oldroot, size);
725 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
726 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
727 struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node;
729 node3->hdr.info.blkno = cpu_to_be64(xfs_buf_daddr(bp));
731 xfs_trans_log_buf(tp, bp, 0, size - 1);
733 bp->b_ops = blk1->bp->b_ops;
734 xfs_trans_buf_copy_type(bp, blk1->bp);
739 * Set up the new root node.
741 error = xfs_da3_node_create(args,
742 (args->whichfork == XFS_DATA_FORK) ? args->geo->leafblk : 0,
743 level + 1, &bp, args->whichfork);
748 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
749 btree = nodehdr.btree;
750 btree[0].hashval = cpu_to_be32(blk1->hashval);
751 btree[0].before = cpu_to_be32(blk1->blkno);
752 btree[1].hashval = cpu_to_be32(blk2->hashval);
753 btree[1].before = cpu_to_be32(blk2->blkno);
755 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
758 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
759 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
760 ASSERT(blk1->blkno >= args->geo->leafblk &&
761 blk1->blkno < args->geo->freeblk);
762 ASSERT(blk2->blkno >= args->geo->leafblk &&
763 blk2->blkno < args->geo->freeblk);
767 /* Header is already logged by xfs_da_node_create */
768 xfs_trans_log_buf(tp, bp,
769 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
775 * Split the node, rebalance, then add the new entry.
777 STATIC int /* error */
779 struct xfs_da_state *state,
780 struct xfs_da_state_blk *oldblk,
781 struct xfs_da_state_blk *newblk,
782 struct xfs_da_state_blk *addblk,
786 struct xfs_da_intnode *node;
787 struct xfs_da3_icnode_hdr nodehdr;
792 struct xfs_inode *dp = state->args->dp;
794 trace_xfs_da_node_split(state->args);
796 node = oldblk->bp->b_addr;
797 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
800 * With V2 dirs the extra block is data or freespace.
802 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
803 newcount = 1 + useextra;
805 * Do we have to split the node?
807 if (nodehdr.count + newcount > state->args->geo->node_ents) {
809 * Allocate a new node, add to the doubly linked chain of
810 * nodes, then move some of our excess entries into it.
812 error = xfs_da_grow_inode(state->args, &blkno);
814 return error; /* GROT: dir is inconsistent */
816 error = xfs_da3_node_create(state->args, blkno, treelevel,
817 &newblk->bp, state->args->whichfork);
819 return error; /* GROT: dir is inconsistent */
820 newblk->blkno = blkno;
821 newblk->magic = XFS_DA_NODE_MAGIC;
822 xfs_da3_node_rebalance(state, oldblk, newblk);
823 error = xfs_da3_blk_link(state, oldblk, newblk);
832 * Insert the new entry(s) into the correct block
833 * (updating last hashval in the process).
835 * xfs_da3_node_add() inserts BEFORE the given index,
836 * and as a result of using node_lookup_int() we always
837 * point to a valid entry (not after one), but a split
838 * operation always results in a new block whose hashvals
839 * FOLLOW the current block.
841 * If we had double-split op below us, then add the extra block too.
843 node = oldblk->bp->b_addr;
844 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
845 if (oldblk->index <= nodehdr.count) {
847 xfs_da3_node_add(state, oldblk, addblk);
849 if (state->extraafter)
851 xfs_da3_node_add(state, oldblk, &state->extrablk);
852 state->extravalid = 0;
856 xfs_da3_node_add(state, newblk, addblk);
858 if (state->extraafter)
860 xfs_da3_node_add(state, newblk, &state->extrablk);
861 state->extravalid = 0;
869 * Balance the btree elements between two intermediate nodes,
870 * usually one full and one empty.
872 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
875 xfs_da3_node_rebalance(
876 struct xfs_da_state *state,
877 struct xfs_da_state_blk *blk1,
878 struct xfs_da_state_blk *blk2)
880 struct xfs_da_intnode *node1;
881 struct xfs_da_intnode *node2;
882 struct xfs_da_node_entry *btree1;
883 struct xfs_da_node_entry *btree2;
884 struct xfs_da_node_entry *btree_s;
885 struct xfs_da_node_entry *btree_d;
886 struct xfs_da3_icnode_hdr nodehdr1;
887 struct xfs_da3_icnode_hdr nodehdr2;
888 struct xfs_trans *tp;
892 struct xfs_inode *dp = state->args->dp;
894 trace_xfs_da_node_rebalance(state->args);
896 node1 = blk1->bp->b_addr;
897 node2 = blk2->bp->b_addr;
898 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
899 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
900 btree1 = nodehdr1.btree;
901 btree2 = nodehdr2.btree;
904 * Figure out how many entries need to move, and in which direction.
905 * Swap the nodes around if that makes it simpler.
907 if (nodehdr1.count > 0 && nodehdr2.count > 0 &&
908 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
909 (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) <
910 be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) {
912 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
913 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
914 btree1 = nodehdr1.btree;
915 btree2 = nodehdr2.btree;
919 count = (nodehdr1.count - nodehdr2.count) / 2;
922 tp = state->args->trans;
924 * Two cases: high-to-low and low-to-high.
928 * Move elements in node2 up to make a hole.
930 tmp = nodehdr2.count;
932 tmp *= (uint)sizeof(xfs_da_node_entry_t);
933 btree_s = &btree2[0];
934 btree_d = &btree2[count];
935 memmove(btree_d, btree_s, tmp);
939 * Move the req'd B-tree elements from high in node1 to
942 nodehdr2.count += count;
943 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
944 btree_s = &btree1[nodehdr1.count - count];
945 btree_d = &btree2[0];
946 memcpy(btree_d, btree_s, tmp);
947 nodehdr1.count -= count;
950 * Move the req'd B-tree elements from low in node2 to
954 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
955 btree_s = &btree2[0];
956 btree_d = &btree1[nodehdr1.count];
957 memcpy(btree_d, btree_s, tmp);
958 nodehdr1.count += count;
960 xfs_trans_log_buf(tp, blk1->bp,
961 XFS_DA_LOGRANGE(node1, btree_d, tmp));
964 * Move elements in node2 down to fill the hole.
966 tmp = nodehdr2.count - count;
967 tmp *= (uint)sizeof(xfs_da_node_entry_t);
968 btree_s = &btree2[count];
969 btree_d = &btree2[0];
970 memmove(btree_d, btree_s, tmp);
971 nodehdr2.count -= count;
975 * Log header of node 1 and all current bits of node 2.
977 xfs_da3_node_hdr_to_disk(dp->i_mount, node1, &nodehdr1);
978 xfs_trans_log_buf(tp, blk1->bp,
979 XFS_DA_LOGRANGE(node1, &node1->hdr,
980 state->args->geo->node_hdr_size));
982 xfs_da3_node_hdr_to_disk(dp->i_mount, node2, &nodehdr2);
983 xfs_trans_log_buf(tp, blk2->bp,
984 XFS_DA_LOGRANGE(node2, &node2->hdr,
985 state->args->geo->node_hdr_size +
986 (sizeof(btree2[0]) * nodehdr2.count)));
989 * Record the last hashval from each block for upward propagation.
990 * (note: don't use the swapped node pointers)
993 node1 = blk1->bp->b_addr;
994 node2 = blk2->bp->b_addr;
995 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
996 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
997 btree1 = nodehdr1.btree;
998 btree2 = nodehdr2.btree;
1000 blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
1001 blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
1004 * Adjust the expected index for insertion.
1006 if (blk1->index >= nodehdr1.count) {
1007 blk2->index = blk1->index - nodehdr1.count;
1008 blk1->index = nodehdr1.count + 1; /* make it invalid */
1013 * Add a new entry to an intermediate node.
1017 struct xfs_da_state *state,
1018 struct xfs_da_state_blk *oldblk,
1019 struct xfs_da_state_blk *newblk)
1021 struct xfs_da_intnode *node;
1022 struct xfs_da3_icnode_hdr nodehdr;
1023 struct xfs_da_node_entry *btree;
1025 struct xfs_inode *dp = state->args->dp;
1027 trace_xfs_da_node_add(state->args);
1029 node = oldblk->bp->b_addr;
1030 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1031 btree = nodehdr.btree;
1033 ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
1034 ASSERT(newblk->blkno != 0);
1035 if (state->args->whichfork == XFS_DATA_FORK)
1036 ASSERT(newblk->blkno >= state->args->geo->leafblk &&
1037 newblk->blkno < state->args->geo->freeblk);
1040 * We may need to make some room before we insert the new node.
1043 if (oldblk->index < nodehdr.count) {
1044 tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree);
1045 memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp);
1047 btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval);
1048 btree[oldblk->index].before = cpu_to_be32(newblk->blkno);
1049 xfs_trans_log_buf(state->args->trans, oldblk->bp,
1050 XFS_DA_LOGRANGE(node, &btree[oldblk->index],
1051 tmp + sizeof(*btree)));
1054 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
1055 xfs_trans_log_buf(state->args->trans, oldblk->bp,
1056 XFS_DA_LOGRANGE(node, &node->hdr,
1057 state->args->geo->node_hdr_size));
1060 * Copy the last hash value from the oldblk to propagate upwards.
1062 oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1065 /*========================================================================
1066 * Routines used for shrinking the Btree.
1067 *========================================================================*/
1070 * Deallocate an empty leaf node, remove it from its parent,
1071 * possibly deallocating that block, etc...
1075 struct xfs_da_state *state)
1077 struct xfs_da_state_blk *drop_blk;
1078 struct xfs_da_state_blk *save_blk;
1082 trace_xfs_da_join(state->args);
1084 drop_blk = &state->path.blk[ state->path.active-1 ];
1085 save_blk = &state->altpath.blk[ state->path.active-1 ];
1086 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
1087 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
1088 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
1091 * Walk back up the tree joining/deallocating as necessary.
1092 * When we stop dropping blocks, break out.
1094 for ( ; state->path.active >= 2; drop_blk--, save_blk--,
1095 state->path.active--) {
1097 * See if we can combine the block with a neighbor.
1098 * (action == 0) => no options, just leave
1099 * (action == 1) => coalesce, then unlink
1100 * (action == 2) => block empty, unlink it
1102 switch (drop_blk->magic) {
1103 case XFS_ATTR_LEAF_MAGIC:
1104 error = xfs_attr3_leaf_toosmall(state, &action);
1109 xfs_attr3_leaf_unbalance(state, drop_blk, save_blk);
1111 case XFS_DIR2_LEAFN_MAGIC:
1112 error = xfs_dir2_leafn_toosmall(state, &action);
1117 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
1119 case XFS_DA_NODE_MAGIC:
1121 * Remove the offending node, fixup hashvals,
1122 * check for a toosmall neighbor.
1124 xfs_da3_node_remove(state, drop_blk);
1125 xfs_da3_fixhashpath(state, &state->path);
1126 error = xfs_da3_node_toosmall(state, &action);
1131 xfs_da3_node_unbalance(state, drop_blk, save_blk);
1134 xfs_da3_fixhashpath(state, &state->altpath);
1135 error = xfs_da3_blk_unlink(state, drop_blk, save_blk);
1136 xfs_da_state_kill_altpath(state);
1139 error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
1141 drop_blk->bp = NULL;
1146 * We joined all the way to the top. If it turns out that
1147 * we only have one entry in the root, make the child block
1150 xfs_da3_node_remove(state, drop_blk);
1151 xfs_da3_fixhashpath(state, &state->path);
1152 error = xfs_da3_root_join(state, &state->path.blk[0]);
1158 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
1160 __be16 magic = blkinfo->magic;
1163 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1164 magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1165 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1166 magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1168 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1169 magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
1171 ASSERT(!blkinfo->forw);
1172 ASSERT(!blkinfo->back);
1175 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
1179 * We have only one entry in the root. Copy the only remaining child of
1180 * the old root to block 0 as the new root node.
1184 struct xfs_da_state *state,
1185 struct xfs_da_state_blk *root_blk)
1187 struct xfs_da_intnode *oldroot;
1188 struct xfs_da_args *args;
1191 struct xfs_da3_icnode_hdr oldroothdr;
1193 struct xfs_inode *dp = state->args->dp;
1195 trace_xfs_da_root_join(state->args);
1197 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
1200 oldroot = root_blk->bp->b_addr;
1201 xfs_da3_node_hdr_from_disk(dp->i_mount, &oldroothdr, oldroot);
1202 ASSERT(oldroothdr.forw == 0);
1203 ASSERT(oldroothdr.back == 0);
1206 * If the root has more than one child, then don't do anything.
1208 if (oldroothdr.count > 1)
1212 * Read in the (only) child block, then copy those bytes into
1213 * the root block's buffer and free the original child block.
1215 child = be32_to_cpu(oldroothdr.btree[0].before);
1217 error = xfs_da3_node_read(args->trans, dp, child, &bp, args->whichfork);
1220 xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
1223 * This could be copying a leaf back into the root block in the case of
1224 * there only being a single leaf block left in the tree. Hence we have
1225 * to update the b_ops pointer as well to match the buffer type change
1226 * that could occur. For dir3 blocks we also need to update the block
1227 * number in the buffer header.
1229 memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize);
1230 root_blk->bp->b_ops = bp->b_ops;
1231 xfs_trans_buf_copy_type(root_blk->bp, bp);
1232 if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) {
1233 struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr;
1234 da3->blkno = cpu_to_be64(xfs_buf_daddr(root_blk->bp));
1236 xfs_trans_log_buf(args->trans, root_blk->bp, 0,
1237 args->geo->blksize - 1);
1238 error = xfs_da_shrink_inode(args, child, bp);
1243 * Check a node block and its neighbors to see if the block should be
1244 * collapsed into one or the other neighbor. Always keep the block
1245 * with the smaller block number.
1246 * If the current block is over 50% full, don't try to join it, return 0.
1247 * If the block is empty, fill in the state structure and return 2.
1248 * If it can be collapsed, fill in the state structure and return 1.
1249 * If nothing can be done, return 0.
1252 xfs_da3_node_toosmall(
1253 struct xfs_da_state *state,
1256 struct xfs_da_intnode *node;
1257 struct xfs_da_state_blk *blk;
1258 struct xfs_da_blkinfo *info;
1261 struct xfs_da3_icnode_hdr nodehdr;
1267 struct xfs_inode *dp = state->args->dp;
1269 trace_xfs_da_node_toosmall(state->args);
1272 * Check for the degenerate case of the block being over 50% full.
1273 * If so, it's not worth even looking to see if we might be able
1274 * to coalesce with a sibling.
1276 blk = &state->path.blk[ state->path.active-1 ];
1277 info = blk->bp->b_addr;
1278 node = (xfs_da_intnode_t *)info;
1279 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1280 if (nodehdr.count > (state->args->geo->node_ents >> 1)) {
1281 *action = 0; /* blk over 50%, don't try to join */
1282 return 0; /* blk over 50%, don't try to join */
1286 * Check for the degenerate case of the block being empty.
1287 * If the block is empty, we'll simply delete it, no need to
1288 * coalesce it with a sibling block. We choose (arbitrarily)
1289 * to merge with the forward block unless it is NULL.
1291 if (nodehdr.count == 0) {
1293 * Make altpath point to the block we want to keep and
1294 * path point to the block we want to drop (this one).
1296 forward = (info->forw != 0);
1297 memcpy(&state->altpath, &state->path, sizeof(state->path));
1298 error = xfs_da3_path_shift(state, &state->altpath, forward,
1311 * Examine each sibling block to see if we can coalesce with
1312 * at least 25% free space to spare. We need to figure out
1313 * whether to merge with the forward or the backward block.
1314 * We prefer coalescing with the lower numbered sibling so as
1315 * to shrink a directory over time.
1317 count = state->args->geo->node_ents;
1318 count -= state->args->geo->node_ents >> 2;
1319 count -= nodehdr.count;
1321 /* start with smaller blk num */
1322 forward = nodehdr.forw < nodehdr.back;
1323 for (i = 0; i < 2; forward = !forward, i++) {
1324 struct xfs_da3_icnode_hdr thdr;
1326 blkno = nodehdr.forw;
1328 blkno = nodehdr.back;
1331 error = xfs_da3_node_read(state->args->trans, dp, blkno, &bp,
1332 state->args->whichfork);
1337 xfs_da3_node_hdr_from_disk(dp->i_mount, &thdr, node);
1338 xfs_trans_brelse(state->args->trans, bp);
1340 if (count - thdr.count >= 0)
1341 break; /* fits with at least 25% to spare */
1349 * Make altpath point to the block we want to keep (the lower
1350 * numbered block) and path point to the block we want to drop.
1352 memcpy(&state->altpath, &state->path, sizeof(state->path));
1353 if (blkno < blk->blkno) {
1354 error = xfs_da3_path_shift(state, &state->altpath, forward,
1357 error = xfs_da3_path_shift(state, &state->path, forward,
1371 * Pick up the last hashvalue from an intermediate node.
1374 xfs_da3_node_lasthash(
1375 struct xfs_inode *dp,
1379 struct xfs_da3_icnode_hdr nodehdr;
1381 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, bp->b_addr);
1383 *count = nodehdr.count;
1386 return be32_to_cpu(nodehdr.btree[nodehdr.count - 1].hashval);
1390 * Walk back up the tree adjusting hash values as necessary,
1391 * when we stop making changes, return.
1394 xfs_da3_fixhashpath(
1395 struct xfs_da_state *state,
1396 struct xfs_da_state_path *path)
1398 struct xfs_da_state_blk *blk;
1399 struct xfs_da_intnode *node;
1400 struct xfs_da_node_entry *btree;
1401 xfs_dahash_t lasthash=0;
1404 struct xfs_inode *dp = state->args->dp;
1406 trace_xfs_da_fixhashpath(state->args);
1408 level = path->active-1;
1409 blk = &path->blk[ level ];
1410 switch (blk->magic) {
1411 case XFS_ATTR_LEAF_MAGIC:
1412 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
1416 case XFS_DIR2_LEAFN_MAGIC:
1417 lasthash = xfs_dir2_leaf_lasthash(dp, blk->bp, &count);
1421 case XFS_DA_NODE_MAGIC:
1422 lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count);
1427 for (blk--, level--; level >= 0; blk--, level--) {
1428 struct xfs_da3_icnode_hdr nodehdr;
1430 node = blk->bp->b_addr;
1431 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1432 btree = nodehdr.btree;
1433 if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
1435 blk->hashval = lasthash;
1436 btree[blk->index].hashval = cpu_to_be32(lasthash);
1437 xfs_trans_log_buf(state->args->trans, blk->bp,
1438 XFS_DA_LOGRANGE(node, &btree[blk->index],
1441 lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1446 * Remove an entry from an intermediate node.
1449 xfs_da3_node_remove(
1450 struct xfs_da_state *state,
1451 struct xfs_da_state_blk *drop_blk)
1453 struct xfs_da_intnode *node;
1454 struct xfs_da3_icnode_hdr nodehdr;
1455 struct xfs_da_node_entry *btree;
1458 struct xfs_inode *dp = state->args->dp;
1460 trace_xfs_da_node_remove(state->args);
1462 node = drop_blk->bp->b_addr;
1463 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1464 ASSERT(drop_blk->index < nodehdr.count);
1465 ASSERT(drop_blk->index >= 0);
1468 * Copy over the offending entry, or just zero it out.
1470 index = drop_blk->index;
1471 btree = nodehdr.btree;
1472 if (index < nodehdr.count - 1) {
1473 tmp = nodehdr.count - index - 1;
1474 tmp *= (uint)sizeof(xfs_da_node_entry_t);
1475 memmove(&btree[index], &btree[index + 1], tmp);
1476 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1477 XFS_DA_LOGRANGE(node, &btree[index], tmp));
1478 index = nodehdr.count - 1;
1480 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
1481 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1482 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
1484 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
1485 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1486 XFS_DA_LOGRANGE(node, &node->hdr, state->args->geo->node_hdr_size));
1489 * Copy the last hash value from the block to propagate upwards.
1491 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval);
1495 * Unbalance the elements between two intermediate nodes,
1496 * move all Btree elements from one node into another.
1499 xfs_da3_node_unbalance(
1500 struct xfs_da_state *state,
1501 struct xfs_da_state_blk *drop_blk,
1502 struct xfs_da_state_blk *save_blk)
1504 struct xfs_da_intnode *drop_node;
1505 struct xfs_da_intnode *save_node;
1506 struct xfs_da_node_entry *drop_btree;
1507 struct xfs_da_node_entry *save_btree;
1508 struct xfs_da3_icnode_hdr drop_hdr;
1509 struct xfs_da3_icnode_hdr save_hdr;
1510 struct xfs_trans *tp;
1513 struct xfs_inode *dp = state->args->dp;
1515 trace_xfs_da_node_unbalance(state->args);
1517 drop_node = drop_blk->bp->b_addr;
1518 save_node = save_blk->bp->b_addr;
1519 xfs_da3_node_hdr_from_disk(dp->i_mount, &drop_hdr, drop_node);
1520 xfs_da3_node_hdr_from_disk(dp->i_mount, &save_hdr, save_node);
1521 drop_btree = drop_hdr.btree;
1522 save_btree = save_hdr.btree;
1523 tp = state->args->trans;
1526 * If the dying block has lower hashvals, then move all the
1527 * elements in the remaining block up to make a hole.
1529 if ((be32_to_cpu(drop_btree[0].hashval) <
1530 be32_to_cpu(save_btree[0].hashval)) ||
1531 (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) <
1532 be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) {
1533 /* XXX: check this - is memmove dst correct? */
1534 tmp = save_hdr.count * sizeof(xfs_da_node_entry_t);
1535 memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp);
1538 xfs_trans_log_buf(tp, save_blk->bp,
1539 XFS_DA_LOGRANGE(save_node, &save_btree[0],
1540 (save_hdr.count + drop_hdr.count) *
1541 sizeof(xfs_da_node_entry_t)));
1543 sindex = save_hdr.count;
1544 xfs_trans_log_buf(tp, save_blk->bp,
1545 XFS_DA_LOGRANGE(save_node, &save_btree[sindex],
1546 drop_hdr.count * sizeof(xfs_da_node_entry_t)));
1550 * Move all the B-tree elements from drop_blk to save_blk.
1552 tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t);
1553 memcpy(&save_btree[sindex], &drop_btree[0], tmp);
1554 save_hdr.count += drop_hdr.count;
1556 xfs_da3_node_hdr_to_disk(dp->i_mount, save_node, &save_hdr);
1557 xfs_trans_log_buf(tp, save_blk->bp,
1558 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1559 state->args->geo->node_hdr_size));
1562 * Save the last hashval in the remaining block for upward propagation.
1564 save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval);
1567 /*========================================================================
1568 * Routines used for finding things in the Btree.
1569 *========================================================================*/
1572 * Walk down the Btree looking for a particular filename, filling
1573 * in the state structure as we go.
1575 * We will set the state structure to point to each of the elements
1576 * in each of the nodes where either the hashval is or should be.
1578 * We support duplicate hashval's so for each entry in the current
1579 * node that could contain the desired hashval, descend. This is a
1580 * pruned depth-first tree search.
1583 xfs_da3_node_lookup_int(
1584 struct xfs_da_state *state,
1587 struct xfs_da_state_blk *blk;
1588 struct xfs_da_blkinfo *curr;
1589 struct xfs_da_intnode *node;
1590 struct xfs_da_node_entry *btree;
1591 struct xfs_da3_icnode_hdr nodehdr;
1592 struct xfs_da_args *args;
1594 xfs_dahash_t hashval;
1595 xfs_dahash_t btreehashval;
1601 unsigned int expected_level = 0;
1603 struct xfs_inode *dp = state->args->dp;
1608 * Descend thru the B-tree searching each level for the right
1609 * node to use, until the right hashval is found.
1611 blkno = args->geo->leafblk;
1612 for (blk = &state->path.blk[0], state->path.active = 1;
1613 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1614 blk++, state->path.active++) {
1616 * Read the next node down in the tree.
1619 error = xfs_da3_node_read(args->trans, args->dp, blkno,
1620 &blk->bp, args->whichfork);
1623 state->path.active--;
1626 curr = blk->bp->b_addr;
1627 magic = be16_to_cpu(curr->magic);
1629 if (magic == XFS_ATTR_LEAF_MAGIC ||
1630 magic == XFS_ATTR3_LEAF_MAGIC) {
1631 blk->magic = XFS_ATTR_LEAF_MAGIC;
1632 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1636 if (magic == XFS_DIR2_LEAFN_MAGIC ||
1637 magic == XFS_DIR3_LEAFN_MAGIC) {
1638 blk->magic = XFS_DIR2_LEAFN_MAGIC;
1639 blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
1644 if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC) {
1645 xfs_buf_mark_corrupt(blk->bp);
1646 return -EFSCORRUPTED;
1649 blk->magic = XFS_DA_NODE_MAGIC;
1652 * Search an intermediate node for a match.
1654 node = blk->bp->b_addr;
1655 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1656 btree = nodehdr.btree;
1658 /* Tree taller than we can handle; bail out! */
1659 if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) {
1660 xfs_buf_mark_corrupt(blk->bp);
1661 return -EFSCORRUPTED;
1664 /* Check the level from the root. */
1665 if (blkno == args->geo->leafblk)
1666 expected_level = nodehdr.level - 1;
1667 else if (expected_level != nodehdr.level) {
1668 xfs_buf_mark_corrupt(blk->bp);
1669 return -EFSCORRUPTED;
1673 max = nodehdr.count;
1674 blk->hashval = be32_to_cpu(btree[max - 1].hashval);
1677 * Binary search. (note: small blocks will skip loop)
1679 probe = span = max / 2;
1680 hashval = args->hashval;
1683 btreehashval = be32_to_cpu(btree[probe].hashval);
1684 if (btreehashval < hashval)
1686 else if (btreehashval > hashval)
1691 ASSERT((probe >= 0) && (probe < max));
1692 ASSERT((span <= 4) ||
1693 (be32_to_cpu(btree[probe].hashval) == hashval));
1696 * Since we may have duplicate hashval's, find the first
1697 * matching hashval in the node.
1700 be32_to_cpu(btree[probe].hashval) >= hashval) {
1703 while (probe < max &&
1704 be32_to_cpu(btree[probe].hashval) < hashval) {
1709 * Pick the right block to descend on.
1712 blk->index = max - 1;
1713 blkno = be32_to_cpu(btree[max - 1].before);
1716 blkno = be32_to_cpu(btree[probe].before);
1719 /* We can't point back to the root. */
1720 if (XFS_IS_CORRUPT(dp->i_mount, blkno == args->geo->leafblk))
1721 return -EFSCORRUPTED;
1724 if (XFS_IS_CORRUPT(dp->i_mount, expected_level != 0))
1725 return -EFSCORRUPTED;
1728 * A leaf block that ends in the hashval that we are interested in
1729 * (final hashval == search hashval) means that the next block may
1730 * contain more entries with the same hashval, shift upward to the
1731 * next leaf and keep searching.
1734 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1735 retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1736 &blk->index, state);
1737 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1738 retval = xfs_attr3_leaf_lookup_int(blk->bp, args);
1739 blk->index = args->index;
1740 args->blkno = blk->blkno;
1743 return -EFSCORRUPTED;
1745 if (((retval == -ENOENT) || (retval == -ENOATTR)) &&
1746 (blk->hashval == args->hashval)) {
1747 error = xfs_da3_path_shift(state, &state->path, 1, 1,
1753 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1754 /* path_shift() gives ENOENT */
1764 /*========================================================================
1766 *========================================================================*/
1769 * Compare two intermediate nodes for "order".
1773 struct xfs_inode *dp,
1774 struct xfs_buf *node1_bp,
1775 struct xfs_buf *node2_bp)
1777 struct xfs_da_intnode *node1;
1778 struct xfs_da_intnode *node2;
1779 struct xfs_da_node_entry *btree1;
1780 struct xfs_da_node_entry *btree2;
1781 struct xfs_da3_icnode_hdr node1hdr;
1782 struct xfs_da3_icnode_hdr node2hdr;
1784 node1 = node1_bp->b_addr;
1785 node2 = node2_bp->b_addr;
1786 xfs_da3_node_hdr_from_disk(dp->i_mount, &node1hdr, node1);
1787 xfs_da3_node_hdr_from_disk(dp->i_mount, &node2hdr, node2);
1788 btree1 = node1hdr.btree;
1789 btree2 = node2hdr.btree;
1791 if (node1hdr.count > 0 && node2hdr.count > 0 &&
1792 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
1793 (be32_to_cpu(btree2[node2hdr.count - 1].hashval) <
1794 be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) {
1801 * Link a new block into a doubly linked list of blocks (of whatever type).
1805 struct xfs_da_state *state,
1806 struct xfs_da_state_blk *old_blk,
1807 struct xfs_da_state_blk *new_blk)
1809 struct xfs_da_blkinfo *old_info;
1810 struct xfs_da_blkinfo *new_info;
1811 struct xfs_da_blkinfo *tmp_info;
1812 struct xfs_da_args *args;
1816 struct xfs_inode *dp = state->args->dp;
1819 * Set up environment.
1822 ASSERT(args != NULL);
1823 old_info = old_blk->bp->b_addr;
1824 new_info = new_blk->bp->b_addr;
1825 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1826 old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1827 old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1829 switch (old_blk->magic) {
1830 case XFS_ATTR_LEAF_MAGIC:
1831 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1833 case XFS_DIR2_LEAFN_MAGIC:
1834 before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp);
1836 case XFS_DA_NODE_MAGIC:
1837 before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp);
1842 * Link blocks in appropriate order.
1846 * Link new block in before existing block.
1848 trace_xfs_da_link_before(args);
1849 new_info->forw = cpu_to_be32(old_blk->blkno);
1850 new_info->back = old_info->back;
1851 if (old_info->back) {
1852 error = xfs_da3_node_read(args->trans, dp,
1853 be32_to_cpu(old_info->back),
1854 &bp, args->whichfork);
1858 tmp_info = bp->b_addr;
1859 ASSERT(tmp_info->magic == old_info->magic);
1860 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1861 tmp_info->forw = cpu_to_be32(new_blk->blkno);
1862 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1864 old_info->back = cpu_to_be32(new_blk->blkno);
1867 * Link new block in after existing block.
1869 trace_xfs_da_link_after(args);
1870 new_info->forw = old_info->forw;
1871 new_info->back = cpu_to_be32(old_blk->blkno);
1872 if (old_info->forw) {
1873 error = xfs_da3_node_read(args->trans, dp,
1874 be32_to_cpu(old_info->forw),
1875 &bp, args->whichfork);
1879 tmp_info = bp->b_addr;
1880 ASSERT(tmp_info->magic == old_info->magic);
1881 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1882 tmp_info->back = cpu_to_be32(new_blk->blkno);
1883 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1885 old_info->forw = cpu_to_be32(new_blk->blkno);
1888 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1889 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1894 * Unlink a block from a doubly linked list of blocks.
1896 STATIC int /* error */
1898 struct xfs_da_state *state,
1899 struct xfs_da_state_blk *drop_blk,
1900 struct xfs_da_state_blk *save_blk)
1902 struct xfs_da_blkinfo *drop_info;
1903 struct xfs_da_blkinfo *save_info;
1904 struct xfs_da_blkinfo *tmp_info;
1905 struct xfs_da_args *args;
1910 * Set up environment.
1913 ASSERT(args != NULL);
1914 save_info = save_blk->bp->b_addr;
1915 drop_info = drop_blk->bp->b_addr;
1916 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1917 save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1918 save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1919 ASSERT(save_blk->magic == drop_blk->magic);
1920 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1921 (be32_to_cpu(save_info->back) == drop_blk->blkno));
1922 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1923 (be32_to_cpu(drop_info->back) == save_blk->blkno));
1926 * Unlink the leaf block from the doubly linked chain of leaves.
1928 if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1929 trace_xfs_da_unlink_back(args);
1930 save_info->back = drop_info->back;
1931 if (drop_info->back) {
1932 error = xfs_da3_node_read(args->trans, args->dp,
1933 be32_to_cpu(drop_info->back),
1934 &bp, args->whichfork);
1938 tmp_info = bp->b_addr;
1939 ASSERT(tmp_info->magic == save_info->magic);
1940 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1941 tmp_info->forw = cpu_to_be32(save_blk->blkno);
1942 xfs_trans_log_buf(args->trans, bp, 0,
1943 sizeof(*tmp_info) - 1);
1946 trace_xfs_da_unlink_forward(args);
1947 save_info->forw = drop_info->forw;
1948 if (drop_info->forw) {
1949 error = xfs_da3_node_read(args->trans, args->dp,
1950 be32_to_cpu(drop_info->forw),
1951 &bp, args->whichfork);
1955 tmp_info = bp->b_addr;
1956 ASSERT(tmp_info->magic == save_info->magic);
1957 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1958 tmp_info->back = cpu_to_be32(save_blk->blkno);
1959 xfs_trans_log_buf(args->trans, bp, 0,
1960 sizeof(*tmp_info) - 1);
1964 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1969 * Move a path "forward" or "!forward" one block at the current level.
1971 * This routine will adjust a "path" to point to the next block
1972 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1973 * Btree, including updating pointers to the intermediate nodes between
1974 * the new bottom and the root.
1978 struct xfs_da_state *state,
1979 struct xfs_da_state_path *path,
1984 struct xfs_da_state_blk *blk;
1985 struct xfs_da_blkinfo *info;
1986 struct xfs_da_args *args;
1987 struct xfs_da_node_entry *btree;
1988 struct xfs_da3_icnode_hdr nodehdr;
1990 xfs_dablk_t blkno = 0;
1993 struct xfs_inode *dp = state->args->dp;
1995 trace_xfs_da_path_shift(state->args);
1998 * Roll up the Btree looking for the first block where our
1999 * current index is not at the edge of the block. Note that
2000 * we skip the bottom layer because we want the sibling block.
2003 ASSERT(args != NULL);
2004 ASSERT(path != NULL);
2005 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
2006 level = (path->active-1) - 1; /* skip bottom layer in path */
2007 for (; level >= 0; level--) {
2008 blk = &path->blk[level];
2009 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr,
2012 if (forward && (blk->index < nodehdr.count - 1)) {
2014 blkno = be32_to_cpu(nodehdr.btree[blk->index].before);
2016 } else if (!forward && (blk->index > 0)) {
2018 blkno = be32_to_cpu(nodehdr.btree[blk->index].before);
2023 *result = -ENOENT; /* we're out of our tree */
2024 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
2029 * Roll down the edge of the subtree until we reach the
2030 * same depth we were at originally.
2032 for (blk++, level++; level < path->active; blk++, level++) {
2034 * Read the next child block into a local buffer.
2036 error = xfs_da3_node_read(args->trans, dp, blkno, &bp,
2042 * Release the old block (if it's dirty, the trans doesn't
2043 * actually let go) and swap the local buffer into the path
2044 * structure. This ensures failure of the above read doesn't set
2045 * a NULL buffer in an active slot in the path.
2048 xfs_trans_brelse(args->trans, blk->bp);
2052 info = blk->bp->b_addr;
2053 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
2054 info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
2055 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
2056 info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
2057 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
2058 info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
2062 * Note: we flatten the magic number to a single type so we
2063 * don't have to compare against crc/non-crc types elsewhere.
2065 switch (be16_to_cpu(info->magic)) {
2066 case XFS_DA_NODE_MAGIC:
2067 case XFS_DA3_NODE_MAGIC:
2068 blk->magic = XFS_DA_NODE_MAGIC;
2069 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr,
2071 btree = nodehdr.btree;
2072 blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
2076 blk->index = nodehdr.count - 1;
2077 blkno = be32_to_cpu(btree[blk->index].before);
2079 case XFS_ATTR_LEAF_MAGIC:
2080 case XFS_ATTR3_LEAF_MAGIC:
2081 blk->magic = XFS_ATTR_LEAF_MAGIC;
2082 ASSERT(level == path->active-1);
2084 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
2086 case XFS_DIR2_LEAFN_MAGIC:
2087 case XFS_DIR3_LEAFN_MAGIC:
2088 blk->magic = XFS_DIR2_LEAFN_MAGIC;
2089 ASSERT(level == path->active-1);
2091 blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
2104 /*========================================================================
2106 *========================================================================*/
2109 * Implement a simple hash on a character string.
2110 * Rotate the hash value by 7 bits, then XOR each character in.
2111 * This is implemented with some source-level loop unrolling.
2114 xfs_da_hashname(const uint8_t *name, int namelen)
2119 * Do four characters at a time as long as we can.
2121 for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
2122 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
2123 (name[3] << 0) ^ rol32(hash, 7 * 4);
2126 * Now do the rest of the characters.
2130 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
2133 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
2135 return (name[0] << 0) ^ rol32(hash, 7 * 1);
2136 default: /* case 0: */
2143 struct xfs_da_args *args,
2144 const unsigned char *name,
2147 return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
2148 XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
2152 xfs_da_grow_inode_int(
2153 struct xfs_da_args *args,
2157 struct xfs_trans *tp = args->trans;
2158 struct xfs_inode *dp = args->dp;
2159 int w = args->whichfork;
2160 xfs_rfsblock_t nblks = dp->i_nblocks;
2161 struct xfs_bmbt_irec map, *mapp;
2162 int nmap, error, got, i, mapi;
2165 * Find a spot in the file space to put the new block.
2167 error = xfs_bmap_first_unused(tp, dp, count, bno, w);
2172 * Try mapping it in one filesystem block.
2175 error = xfs_bmapi_write(tp, dp, *bno, count,
2176 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
2177 args->total, &map, &nmap);
2185 } else if (nmap == 0 && count > 1) {
2190 * If we didn't get it and the block might work if fragmented,
2191 * try without the CONTIG flag. Loop until we get it all.
2193 mapp = kmem_alloc(sizeof(*mapp) * count, 0);
2194 for (b = *bno, mapi = 0; b < *bno + count; ) {
2195 nmap = min(XFS_BMAP_MAX_NMAP, count);
2196 c = (int)(*bno + count - b);
2197 error = xfs_bmapi_write(tp, dp, b, c,
2198 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
2199 args->total, &mapp[mapi], &nmap);
2205 b = mapp[mapi - 1].br_startoff +
2206 mapp[mapi - 1].br_blockcount;
2214 * Count the blocks we got, make sure it matches the total.
2216 for (i = 0, got = 0; i < mapi; i++)
2217 got += mapp[i].br_blockcount;
2218 if (got != count || mapp[0].br_startoff != *bno ||
2219 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
2225 /* account for newly allocated blocks in reserved blocks total */
2226 args->total -= dp->i_nblocks - nblks;
2235 * Add a block to the btree ahead of the file.
2236 * Return the new block number to the caller.
2240 struct xfs_da_args *args,
2241 xfs_dablk_t *new_blkno)
2246 trace_xfs_da_grow_inode(args);
2248 bno = args->geo->leafblk;
2249 error = xfs_da_grow_inode_int(args, &bno, args->geo->fsbcount);
2251 *new_blkno = (xfs_dablk_t)bno;
2256 * Ick. We need to always be able to remove a btree block, even
2257 * if there's no space reservation because the filesystem is full.
2258 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2259 * It swaps the target block with the last block in the file. The
2260 * last block in the file can always be removed since it can't cause
2261 * a bmap btree split to do that.
2264 xfs_da3_swap_lastblock(
2265 struct xfs_da_args *args,
2266 xfs_dablk_t *dead_blknop,
2267 struct xfs_buf **dead_bufp)
2269 struct xfs_da_blkinfo *dead_info;
2270 struct xfs_da_blkinfo *sib_info;
2271 struct xfs_da_intnode *par_node;
2272 struct xfs_da_intnode *dead_node;
2273 struct xfs_dir2_leaf *dead_leaf2;
2274 struct xfs_da_node_entry *btree;
2275 struct xfs_da3_icnode_hdr par_hdr;
2276 struct xfs_inode *dp;
2277 struct xfs_trans *tp;
2278 struct xfs_mount *mp;
2279 struct xfs_buf *dead_buf;
2280 struct xfs_buf *last_buf;
2281 struct xfs_buf *sib_buf;
2282 struct xfs_buf *par_buf;
2283 xfs_dahash_t dead_hash;
2284 xfs_fileoff_t lastoff;
2285 xfs_dablk_t dead_blkno;
2286 xfs_dablk_t last_blkno;
2287 xfs_dablk_t sib_blkno;
2288 xfs_dablk_t par_blkno;
2295 trace_xfs_da_swap_lastblock(args);
2297 dead_buf = *dead_bufp;
2298 dead_blkno = *dead_blknop;
2301 w = args->whichfork;
2302 ASSERT(w == XFS_DATA_FORK);
2304 lastoff = args->geo->freeblk;
2305 error = xfs_bmap_last_before(tp, dp, &lastoff, w);
2308 if (XFS_IS_CORRUPT(mp, lastoff == 0))
2309 return -EFSCORRUPTED;
2311 * Read the last block in the btree space.
2313 last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount;
2314 error = xfs_da3_node_read(tp, dp, last_blkno, &last_buf, w);
2318 * Copy the last block into the dead buffer and log it.
2320 memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize);
2321 xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1);
2322 dead_info = dead_buf->b_addr;
2324 * Get values from the moved block.
2326 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
2327 dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
2328 struct xfs_dir3_icleaf_hdr leafhdr;
2329 struct xfs_dir2_leaf_entry *ents;
2331 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
2332 xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr,
2334 ents = leafhdr.ents;
2336 dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
2338 struct xfs_da3_icnode_hdr deadhdr;
2340 dead_node = (xfs_da_intnode_t *)dead_info;
2341 xfs_da3_node_hdr_from_disk(dp->i_mount, &deadhdr, dead_node);
2342 btree = deadhdr.btree;
2343 dead_level = deadhdr.level;
2344 dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
2346 sib_buf = par_buf = NULL;
2348 * If the moved block has a left sibling, fix up the pointers.
2350 if ((sib_blkno = be32_to_cpu(dead_info->back))) {
2351 error = xfs_da3_node_read(tp, dp, sib_blkno, &sib_buf, w);
2354 sib_info = sib_buf->b_addr;
2355 if (XFS_IS_CORRUPT(mp,
2356 be32_to_cpu(sib_info->forw) != last_blkno ||
2357 sib_info->magic != dead_info->magic)) {
2358 error = -EFSCORRUPTED;
2361 sib_info->forw = cpu_to_be32(dead_blkno);
2362 xfs_trans_log_buf(tp, sib_buf,
2363 XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
2364 sizeof(sib_info->forw)));
2368 * If the moved block has a right sibling, fix up the pointers.
2370 if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
2371 error = xfs_da3_node_read(tp, dp, sib_blkno, &sib_buf, w);
2374 sib_info = sib_buf->b_addr;
2375 if (XFS_IS_CORRUPT(mp,
2376 be32_to_cpu(sib_info->back) != last_blkno ||
2377 sib_info->magic != dead_info->magic)) {
2378 error = -EFSCORRUPTED;
2381 sib_info->back = cpu_to_be32(dead_blkno);
2382 xfs_trans_log_buf(tp, sib_buf,
2383 XFS_DA_LOGRANGE(sib_info, &sib_info->back,
2384 sizeof(sib_info->back)));
2387 par_blkno = args->geo->leafblk;
2390 * Walk down the tree looking for the parent of the moved block.
2393 error = xfs_da3_node_read(tp, dp, par_blkno, &par_buf, w);
2396 par_node = par_buf->b_addr;
2397 xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node);
2398 if (XFS_IS_CORRUPT(mp,
2399 level >= 0 && level != par_hdr.level + 1)) {
2400 error = -EFSCORRUPTED;
2403 level = par_hdr.level;
2404 btree = par_hdr.btree;
2406 entno < par_hdr.count &&
2407 be32_to_cpu(btree[entno].hashval) < dead_hash;
2410 if (XFS_IS_CORRUPT(mp, entno == par_hdr.count)) {
2411 error = -EFSCORRUPTED;
2414 par_blkno = be32_to_cpu(btree[entno].before);
2415 if (level == dead_level + 1)
2417 xfs_trans_brelse(tp, par_buf);
2421 * We're in the right parent block.
2422 * Look for the right entry.
2426 entno < par_hdr.count &&
2427 be32_to_cpu(btree[entno].before) != last_blkno;
2430 if (entno < par_hdr.count)
2432 par_blkno = par_hdr.forw;
2433 xfs_trans_brelse(tp, par_buf);
2435 if (XFS_IS_CORRUPT(mp, par_blkno == 0)) {
2436 error = -EFSCORRUPTED;
2439 error = xfs_da3_node_read(tp, dp, par_blkno, &par_buf, w);
2442 par_node = par_buf->b_addr;
2443 xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node);
2444 if (XFS_IS_CORRUPT(mp, par_hdr.level != level)) {
2445 error = -EFSCORRUPTED;
2448 btree = par_hdr.btree;
2452 * Update the parent entry pointing to the moved block.
2454 btree[entno].before = cpu_to_be32(dead_blkno);
2455 xfs_trans_log_buf(tp, par_buf,
2456 XFS_DA_LOGRANGE(par_node, &btree[entno].before,
2457 sizeof(btree[entno].before)));
2458 *dead_blknop = last_blkno;
2459 *dead_bufp = last_buf;
2463 xfs_trans_brelse(tp, par_buf);
2465 xfs_trans_brelse(tp, sib_buf);
2466 xfs_trans_brelse(tp, last_buf);
2471 * Remove a btree block from a directory or attribute.
2474 xfs_da_shrink_inode(
2475 struct xfs_da_args *args,
2476 xfs_dablk_t dead_blkno,
2477 struct xfs_buf *dead_buf)
2479 struct xfs_inode *dp;
2480 int done, error, w, count;
2481 struct xfs_trans *tp;
2483 trace_xfs_da_shrink_inode(args);
2486 w = args->whichfork;
2488 count = args->geo->fsbcount;
2491 * Remove extents. If we get ENOSPC for a dir we have to move
2492 * the last block to the place we want to kill.
2494 error = xfs_bunmapi(tp, dp, dead_blkno, count,
2495 xfs_bmapi_aflag(w), 0, &done);
2496 if (error == -ENOSPC) {
2497 if (w != XFS_DATA_FORK)
2499 error = xfs_da3_swap_lastblock(args, &dead_blkno,
2507 xfs_trans_binval(tp, dead_buf);
2513 struct xfs_inode *dp,
2517 struct xfs_buf_map **mapp,
2520 struct xfs_mount *mp = dp->i_mount;
2521 int nfsb = xfs_dabuf_nfsb(mp, whichfork);
2522 struct xfs_bmbt_irec irec, *irecs = &irec;
2523 struct xfs_buf_map *map = *mapp;
2524 xfs_fileoff_t off = bno;
2525 int error = 0, nirecs, i;
2528 irecs = kmem_zalloc(sizeof(irec) * nfsb, KM_NOFS);
2531 error = xfs_bmapi_read(dp, bno, nfsb, irecs, &nirecs,
2532 xfs_bmapi_aflag(whichfork));
2534 goto out_free_irecs;
2537 * Use the caller provided map for the single map case, else allocate a
2538 * larger one that needs to be free by the caller.
2541 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_NOFS);
2544 goto out_free_irecs;
2549 for (i = 0; i < nirecs; i++) {
2550 if (irecs[i].br_startblock == HOLESTARTBLOCK ||
2551 irecs[i].br_startblock == DELAYSTARTBLOCK)
2552 goto invalid_mapping;
2553 if (off != irecs[i].br_startoff)
2554 goto invalid_mapping;
2556 map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
2557 map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
2558 off += irecs[i].br_blockcount;
2561 if (off != bno + nfsb)
2562 goto invalid_mapping;
2571 /* Caller ok with no mapping. */
2572 if (XFS_IS_CORRUPT(mp, !(flags & XFS_DABUF_MAP_HOLE_OK))) {
2573 error = -EFSCORRUPTED;
2574 if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2575 xfs_alert(mp, "%s: bno %u inode %llu",
2576 __func__, bno, dp->i_ino);
2578 for (i = 0; i < nirecs; i++) {
2580 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2581 i, irecs[i].br_startoff,
2582 irecs[i].br_startblock,
2583 irecs[i].br_blockcount,
2590 goto out_free_irecs;
2594 * Get a buffer for the dir/attr block.
2598 struct xfs_trans *tp,
2599 struct xfs_inode *dp,
2601 struct xfs_buf **bpp,
2604 struct xfs_mount *mp = dp->i_mount;
2606 struct xfs_buf_map map, *mapp = ↦
2611 error = xfs_dabuf_map(dp, bno, 0, whichfork, &mapp, &nmap);
2612 if (error || nmap == 0)
2615 error = xfs_trans_get_buf_map(tp, mp->m_ddev_targp, mapp, nmap, 0, &bp);
2629 * Get a buffer for the dir/attr block, fill in the contents.
2633 struct xfs_trans *tp,
2634 struct xfs_inode *dp,
2637 struct xfs_buf **bpp,
2639 const struct xfs_buf_ops *ops)
2641 struct xfs_mount *mp = dp->i_mount;
2643 struct xfs_buf_map map, *mapp = ↦
2648 error = xfs_dabuf_map(dp, bno, flags, whichfork, &mapp, &nmap);
2652 error = xfs_trans_read_buf_map(mp, tp, mp->m_ddev_targp, mapp, nmap, 0,
2657 if (whichfork == XFS_ATTR_FORK)
2658 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
2660 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
2670 * Readahead the dir/attr block.
2674 struct xfs_inode *dp,
2678 const struct xfs_buf_ops *ops)
2680 struct xfs_buf_map map;
2681 struct xfs_buf_map *mapp;
2687 error = xfs_dabuf_map(dp, bno, flags, whichfork, &mapp, &nmap);
2691 xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops);