1 #include <linux/module.h>
4 #include "print-tree.h"
5 #include "transaction.h"
7 static int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
8 *orig_root, u64 num_blocks, u64 search_start, u64
9 search_end, struct btrfs_key *ins, int data);
10 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
11 btrfs_root *extent_root);
12 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
13 btrfs_root *extent_root);
15 struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
16 struct btrfs_block_group_cache
19 struct btrfs_block_group_cache *cache[8];
20 struct btrfs_block_group_cache *found_group = NULL;
21 struct btrfs_fs_info *info = root->fs_info;
29 used = btrfs_block_group_used(&hint->item);
30 if (used < (hint->key.offset * 2) / 3) {
33 radix_tree_tag_clear(&info->block_group_radix,
34 hint->key.objectid + hint->key.offset - 1,
35 BTRFS_BLOCK_GROUP_AVAIL);
36 last = hint->key.objectid + hint->key.offset;
43 ret = radix_tree_gang_lookup_tag(&info->block_group_radix,
45 last, ARRAY_SIZE(cache),
46 BTRFS_BLOCK_GROUP_AVAIL);
49 for (i = 0; i < ret; i++) {
50 last = cache[i]->key.objectid +
52 if (!full_search && !data &&
53 (cache[i]->key.objectid & cache[i]->key.offset))
55 if (!full_search && data &&
56 (cache[i]->key.objectid & cache[i]->key.offset) == 0)
58 used = btrfs_block_group_used(&cache[i]->item);
59 if (used < (cache[i]->key.offset * 2) / 3) {
60 info->block_group_cache = cache[i];
61 found_group = cache[i];
64 radix_tree_tag_clear(&info->block_group_radix,
65 cache[i]->key.objectid +
66 cache[i]->key.offset - 1,
67 BTRFS_BLOCK_GROUP_AVAIL);
73 ret = radix_tree_gang_lookup(&info->block_group_radix,
75 last, ARRAY_SIZE(cache));
78 for (i = 0; i < ret; i++) {
79 last = cache[i]->key.objectid +
81 if (!full_search && !data &&
82 (cache[i]->key.objectid & cache[i]->key.offset))
84 if (!full_search && data &&
85 (cache[i]->key.objectid & cache[i]->key.offset) == 0)
87 used = btrfs_block_group_used(&cache[i]->item);
88 if (used < cache[i]->key.offset) {
89 info->block_group_cache = cache[i];
90 found_group = cache[i];
93 radix_tree_tag_clear(&info->block_group_radix,
94 cache[i]->key.objectid +
95 cache[i]->key.offset - 1,
96 BTRFS_BLOCK_GROUP_AVAIL);
99 info->block_group_cache = NULL;
107 ret = radix_tree_gang_lookup(&info->block_group_radix,
108 (void **)&found_group, 0, 1);
114 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
115 struct btrfs_root *root,
116 u64 blocknr, u64 num_blocks)
118 struct btrfs_path *path;
120 struct btrfs_key key;
121 struct btrfs_leaf *l;
122 struct btrfs_extent_item *item;
123 struct btrfs_key ins;
126 find_free_extent(trans, root->fs_info->extent_root, 0, 0, (u64)-1,
128 path = btrfs_alloc_path();
130 btrfs_init_path(path);
131 key.objectid = blocknr;
133 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
134 key.offset = num_blocks;
135 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
138 printk("can't find block %Lu %Lu\n", blocknr, num_blocks);
142 l = btrfs_buffer_leaf(path->nodes[0]);
143 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
144 refs = btrfs_extent_refs(item);
145 btrfs_set_extent_refs(item, refs + 1);
146 btrfs_mark_buffer_dirty(path->nodes[0]);
148 btrfs_release_path(root->fs_info->extent_root, path);
149 btrfs_free_path(path);
150 finish_current_insert(trans, root->fs_info->extent_root);
151 del_pending_extents(trans, root->fs_info->extent_root);
155 static int lookup_extent_ref(struct btrfs_trans_handle *trans,
156 struct btrfs_root *root, u64 blocknr,
157 u64 num_blocks, u32 *refs)
159 struct btrfs_path *path;
161 struct btrfs_key key;
162 struct btrfs_leaf *l;
163 struct btrfs_extent_item *item;
165 path = btrfs_alloc_path();
166 btrfs_init_path(path);
167 key.objectid = blocknr;
168 key.offset = num_blocks;
170 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
171 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
175 l = btrfs_buffer_leaf(path->nodes[0]);
176 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
177 *refs = btrfs_extent_refs(item);
178 btrfs_release_path(root->fs_info->extent_root, path);
179 btrfs_free_path(path);
183 int btrfs_inc_root_ref(struct btrfs_trans_handle *trans,
184 struct btrfs_root *root)
186 return btrfs_inc_extent_ref(trans, root, bh_blocknr(root->node), 1);
189 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
190 struct buffer_head *buf)
193 struct btrfs_node *buf_node;
194 struct btrfs_leaf *buf_leaf;
195 struct btrfs_disk_key *key;
196 struct btrfs_file_extent_item *fi;
203 buf_node = btrfs_buffer_node(buf);
204 leaf = btrfs_is_leaf(buf_node);
205 buf_leaf = btrfs_buffer_leaf(buf);
206 for (i = 0; i < btrfs_header_nritems(&buf_node->header); i++) {
208 key = &buf_leaf->items[i].key;
209 if (btrfs_disk_key_type(key) != BTRFS_EXTENT_DATA_KEY)
211 fi = btrfs_item_ptr(buf_leaf, i,
212 struct btrfs_file_extent_item);
213 if (btrfs_file_extent_type(fi) ==
214 BTRFS_FILE_EXTENT_INLINE)
216 ret = btrfs_inc_extent_ref(trans, root,
217 btrfs_file_extent_disk_blocknr(fi),
218 btrfs_file_extent_disk_num_blocks(fi));
221 blocknr = btrfs_node_blockptr(buf_node, i);
222 ret = btrfs_inc_extent_ref(trans, root, blocknr, 1);
229 static int write_one_cache_group(struct btrfs_trans_handle *trans,
230 struct btrfs_root *root,
231 struct btrfs_path *path,
232 struct btrfs_block_group_cache *cache)
236 struct btrfs_root *extent_root = root->fs_info->extent_root;
237 struct btrfs_block_group_item *bi;
238 struct btrfs_key ins;
240 find_free_extent(trans, extent_root, 0, 0, (u64)-1, &ins, 0);
241 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
243 bi = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]), path->slots[0],
244 struct btrfs_block_group_item);
245 memcpy(bi, &cache->item, sizeof(*bi));
246 mark_buffer_dirty(path->nodes[0]);
247 btrfs_release_path(extent_root, path);
249 finish_current_insert(trans, extent_root);
250 pending_ret = del_pending_extents(trans, extent_root);
259 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
260 struct btrfs_root *root)
262 struct btrfs_block_group_cache *cache[8];
266 struct radix_tree_root *radix = &root->fs_info->block_group_radix;
268 struct btrfs_path *path;
270 path = btrfs_alloc_path();
275 ret = radix_tree_gang_lookup_tag(radix, (void **)cache,
276 0, ARRAY_SIZE(cache),
277 BTRFS_BLOCK_GROUP_DIRTY);
280 for (i = 0; i < ret; i++) {
281 radix_tree_tag_clear(radix, cache[i]->key.objectid +
282 cache[i]->key.offset - 1,
283 BTRFS_BLOCK_GROUP_DIRTY);
284 err = write_one_cache_group(trans, root,
288 cache[i]->last_alloc = cache[i]->first_free;
291 btrfs_free_path(path);
295 static int update_block_group(struct btrfs_trans_handle *trans,
296 struct btrfs_root *root,
297 u64 blocknr, u64 num, int alloc)
299 struct btrfs_block_group_cache *cache;
300 struct btrfs_fs_info *info = root->fs_info;
306 ret = radix_tree_gang_lookup(&info->block_group_radix,
307 (void **)&cache, blocknr, 1);
309 printk(KERN_CRIT "blocknr %Lu lookup failed\n",
313 block_in_group = blocknr - cache->key.objectid;
314 WARN_ON(block_in_group > cache->key.offset);
315 radix_tree_tag_set(&info->block_group_radix,
316 cache->key.objectid + cache->key.offset - 1,
317 BTRFS_BLOCK_GROUP_DIRTY);
319 old_val = btrfs_block_group_used(&cache->item);
320 num = min(total, cache->key.offset - block_in_group);
325 if (blocknr > cache->last_alloc)
326 cache->last_alloc = blocknr;
329 if (blocknr < cache->first_free)
330 cache->first_free = blocknr;
332 btrfs_set_block_group_used(&cache->item, old_val);
337 static int try_remove_page(struct address_space *mapping, unsigned long index)
340 ret = invalidate_mapping_pages(mapping, index, index);
344 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, struct
347 unsigned long gang[8];
348 struct inode *btree_inode = root->fs_info->btree_inode;
352 struct radix_tree_root *pinned_radix = &root->fs_info->pinned_radix;
355 ret = find_first_radix_bit(pinned_radix, gang,
361 for (i = 0; i < ret; i++) {
362 clear_radix_bit(pinned_radix, gang[i]);
363 try_remove_page(btree_inode->i_mapping,
364 gang[i] << (PAGE_CACHE_SHIFT -
365 btree_inode->i_blkbits));
371 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
372 btrfs_root *extent_root)
374 struct btrfs_key ins;
375 struct btrfs_extent_item extent_item;
378 u64 super_blocks_used;
379 struct btrfs_fs_info *info = extent_root->fs_info;
381 btrfs_set_extent_refs(&extent_item, 1);
384 btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
385 btrfs_set_extent_owner(&extent_item, extent_root->root_key.objectid);
387 for (i = 0; i < extent_root->fs_info->extent_tree_insert_nr; i++) {
388 ins.objectid = extent_root->fs_info->extent_tree_insert[i];
389 super_blocks_used = btrfs_super_blocks_used(info->disk_super);
390 btrfs_set_super_blocks_used(info->disk_super,
391 super_blocks_used + 1);
392 ret = btrfs_insert_item(trans, extent_root, &ins, &extent_item,
393 sizeof(extent_item));
396 extent_root->fs_info->extent_tree_insert_nr = 0;
397 extent_root->fs_info->extent_tree_prealloc_nr = 0;
401 static int pin_down_block(struct btrfs_root *root, u64 blocknr, int pending)
404 struct btrfs_header *header;
405 struct buffer_head *bh;
408 bh = btrfs_find_tree_block(root, blocknr);
410 if (buffer_uptodate(bh)) {
412 root->fs_info->running_transaction->transid;
413 header = btrfs_buffer_header(bh);
414 if (btrfs_header_generation(header) ==
416 btrfs_block_release(root, bh);
420 btrfs_block_release(root, bh);
422 err = set_radix_bit(&root->fs_info->pinned_radix, blocknr);
424 err = set_radix_bit(&root->fs_info->pending_del_radix, blocknr);
431 * remove an extent from the root, returns 0 on success
433 static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
434 *root, u64 blocknr, u64 num_blocks, int pin)
436 struct btrfs_path *path;
437 struct btrfs_key key;
438 struct btrfs_fs_info *info = root->fs_info;
439 struct btrfs_root *extent_root = info->extent_root;
441 struct btrfs_extent_item *ei;
442 struct btrfs_key ins;
445 key.objectid = blocknr;
447 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
448 key.offset = num_blocks;
450 find_free_extent(trans, root, 0, 0, (u64)-1, &ins, 0);
451 path = btrfs_alloc_path();
453 btrfs_init_path(path);
455 ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
457 printk("failed to find %Lu\n", key.objectid);
458 btrfs_print_tree(extent_root, extent_root->node);
459 printk("failed to find %Lu\n", key.objectid);
462 ei = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]), path->slots[0],
463 struct btrfs_extent_item);
464 BUG_ON(ei->refs == 0);
465 refs = btrfs_extent_refs(ei) - 1;
466 btrfs_set_extent_refs(ei, refs);
467 btrfs_mark_buffer_dirty(path->nodes[0]);
469 u64 super_blocks_used;
472 ret = pin_down_block(root, blocknr, 0);
476 super_blocks_used = btrfs_super_blocks_used(info->disk_super);
477 btrfs_set_super_blocks_used(info->disk_super,
478 super_blocks_used - num_blocks);
479 ret = btrfs_del_item(trans, extent_root, path);
482 ret = update_block_group(trans, root, blocknr, num_blocks, 0);
485 btrfs_release_path(extent_root, path);
486 btrfs_free_path(path);
487 finish_current_insert(trans, extent_root);
492 * find all the blocks marked as pending in the radix tree and remove
493 * them from the extent map
495 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
496 btrfs_root *extent_root)
501 unsigned long gang[4];
503 struct radix_tree_root *pending_radix;
504 struct radix_tree_root *pinned_radix;
506 pending_radix = &extent_root->fs_info->pending_del_radix;
507 pinned_radix = &extent_root->fs_info->pinned_radix;
510 ret = find_first_radix_bit(pending_radix, gang,
514 for (i = 0; i < ret; i++) {
515 wret = set_radix_bit(pinned_radix, gang[i]);
517 wret = clear_radix_bit(pending_radix, gang[i]);
519 wret = __free_extent(trans, extent_root,
529 * remove an extent from the root, returns 0 on success
531 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
532 *root, u64 blocknr, u64 num_blocks, int pin)
534 struct btrfs_root *extent_root = root->fs_info->extent_root;
538 if (root == extent_root) {
539 pin_down_block(root, blocknr, 1);
542 ret = __free_extent(trans, root, blocknr, num_blocks, pin);
543 pending_ret = del_pending_extents(trans, root->fs_info->extent_root);
544 return ret ? ret : pending_ret;
548 * walks the btree of allocated extents and find a hole of a given size.
549 * The key ins is changed to record the hole:
550 * ins->objectid == block start
551 * ins->flags = BTRFS_EXTENT_ITEM_KEY
552 * ins->offset == number of blocks
553 * Any available blocks before search_start are skipped.
555 static int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
556 *orig_root, u64 num_blocks, u64 search_start, u64
557 search_end, struct btrfs_key *ins, int data)
559 struct btrfs_path *path;
560 struct btrfs_key key;
567 struct btrfs_leaf *l;
568 struct btrfs_root * root = orig_root->fs_info->extent_root;
569 struct btrfs_fs_info *info = root->fs_info;
570 int total_needed = num_blocks;
572 int fill_prealloc = 0;
574 struct btrfs_block_group_cache *block_group;
576 path = btrfs_alloc_path();
578 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
580 level = btrfs_header_level(btrfs_buffer_header(root->node));
581 if (num_blocks == 0) {
584 total_needed = (min(level + 1, BTRFS_MAX_LEVEL) + 2) * 3;
586 block_group = btrfs_find_block_group(root, trans->block_group, data);
587 if (block_group->last_alloc > search_start)
588 search_start = block_group->last_alloc;
590 btrfs_init_path(path);
591 ins->objectid = search_start;
594 ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
598 if (path->slots[0] > 0)
602 l = btrfs_buffer_leaf(path->nodes[0]);
603 slot = path->slots[0];
604 if (slot >= btrfs_header_nritems(&l->header)) {
606 info->extent_tree_prealloc_nr = 0;
609 ret = btrfs_next_leaf(root, path);
615 ins->objectid = search_start;
616 ins->offset = (u64)-1 - search_start;
620 ins->objectid = last_block > search_start ?
621 last_block : search_start;
622 ins->offset = (u64)-1 - ins->objectid;
625 btrfs_disk_key_to_cpu(&key, &l->items[slot].key);
626 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
628 if (key.objectid >= search_start) {
630 if (last_block < search_start)
631 last_block = search_start;
632 hole_size = key.objectid - last_block;
633 if (hole_size >= num_blocks) {
634 ins->objectid = last_block;
635 ins->offset = hole_size;
641 last_block = key.objectid + key.offset;
647 /* we have to make sure we didn't find an extent that has already
648 * been allocated by the map tree or the original allocation
650 btrfs_release_path(root, path);
651 BUG_ON(ins->objectid < search_start);
652 if (ins->objectid >= btrfs_super_total_blocks(info->disk_super)) {
653 if (search_start == 0)
658 for (test_block = ins->objectid;
659 test_block < ins->objectid + num_blocks; test_block++) {
660 if (test_radix_bit(&info->pinned_radix, test_block)) {
661 search_start = test_block + 1;
665 if (!fill_prealloc && info->extent_tree_insert_nr) {
667 info->extent_tree_insert[info->extent_tree_insert_nr - 1];
668 if (ins->objectid + num_blocks >
669 info->extent_tree_insert[0] &&
670 ins->objectid <= last) {
671 search_start = last + 1;
676 if (!fill_prealloc && info->extent_tree_prealloc_nr) {
678 info->extent_tree_prealloc[info->extent_tree_prealloc_nr - 1];
679 if (ins->objectid + num_blocks > first &&
680 ins->objectid <= info->extent_tree_prealloc[0]) {
681 search_start = info->extent_tree_prealloc[0] + 1;
688 test_block = ins->objectid;
689 while(test_block < ins->objectid + ins->offset &&
690 total_found < total_needed) {
691 nr = total_needed - total_found - 1;
693 info->extent_tree_prealloc[nr] = test_block;
697 if (total_found < total_needed) {
698 search_start = test_block;
701 info->extent_tree_prealloc_nr = total_found;
703 ret = radix_tree_gang_lookup(&info->block_group_radix,
704 (void **)&block_group,
707 block_group->last_alloc = ins->objectid;
709 trans->block_group = block_group;
711 ins->offset = num_blocks;
712 btrfs_free_path(path);
715 btrfs_release_path(root, path);
716 btrfs_free_path(path);
720 * finds a free extent and does all the dirty work required for allocation
721 * returns the key for the extent through ins, and a tree buffer for
722 * the first block of the extent through buf.
724 * returns 0 if everything worked, non-zero otherwise.
726 int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
727 struct btrfs_root *root, u64 owner,
728 u64 num_blocks, u64 search_start,
729 u64 search_end, struct btrfs_key *ins, int data)
733 u64 super_blocks_used;
734 struct btrfs_fs_info *info = root->fs_info;
735 struct btrfs_root *extent_root = info->extent_root;
736 struct btrfs_extent_item extent_item;
737 struct btrfs_key prealloc_key;
739 btrfs_set_extent_refs(&extent_item, 1);
740 btrfs_set_extent_owner(&extent_item, owner);
742 if (root == extent_root) {
744 BUG_ON(info->extent_tree_prealloc_nr == 0);
745 BUG_ON(num_blocks != 1);
747 info->extent_tree_prealloc_nr--;
748 nr = info->extent_tree_prealloc_nr;
749 ins->objectid = info->extent_tree_prealloc[nr];
750 info->extent_tree_insert[info->extent_tree_insert_nr++] =
752 ret = update_block_group(trans, root,
753 ins->objectid, ins->offset, 1);
757 /* do the real allocation */
758 ret = find_free_extent(trans, root, num_blocks, search_start,
759 search_end, ins, data);
763 /* then do prealloc for the extent tree */
764 ret = find_free_extent(trans, root, 0, ins->objectid + ins->offset,
765 search_end, &prealloc_key, 0);
769 super_blocks_used = btrfs_super_blocks_used(info->disk_super);
770 btrfs_set_super_blocks_used(info->disk_super, super_blocks_used +
772 ret = btrfs_insert_item(trans, extent_root, ins, &extent_item,
773 sizeof(extent_item));
775 finish_current_insert(trans, extent_root);
776 pending_ret = del_pending_extents(trans, extent_root);
781 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
786 * helper function to allocate a block for a given tree
787 * returns the tree buffer or NULL.
789 struct buffer_head *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
790 struct btrfs_root *root, u64 hint)
792 struct btrfs_key ins;
794 struct buffer_head *buf;
796 ret = btrfs_alloc_extent(trans, root, root->root_key.objectid,
797 1, 0, (unsigned long)-1, &ins, 0);
803 buf = btrfs_find_create_tree_block(root, ins.objectid);
804 set_buffer_uptodate(buf);
805 set_buffer_checked(buf);
806 set_radix_bit(&trans->transaction->dirty_pages, buf->b_page->index);
810 static int drop_leaf_ref(struct btrfs_trans_handle *trans,
811 struct btrfs_root *root, struct buffer_head *cur)
813 struct btrfs_disk_key *key;
814 struct btrfs_leaf *leaf;
815 struct btrfs_file_extent_item *fi;
820 BUG_ON(!btrfs_is_leaf(btrfs_buffer_node(cur)));
821 leaf = btrfs_buffer_leaf(cur);
822 nritems = btrfs_header_nritems(&leaf->header);
823 for (i = 0; i < nritems; i++) {
824 key = &leaf->items[i].key;
825 if (btrfs_disk_key_type(key) != BTRFS_EXTENT_DATA_KEY)
827 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
828 if (btrfs_file_extent_type(fi) == BTRFS_FILE_EXTENT_INLINE)
831 * FIXME make sure to insert a trans record that
832 * repeats the snapshot del on crash
834 ret = btrfs_free_extent(trans, root,
835 btrfs_file_extent_disk_blocknr(fi),
836 btrfs_file_extent_disk_num_blocks(fi),
844 * helper function for drop_snapshot, this walks down the tree dropping ref
847 static int walk_down_tree(struct btrfs_trans_handle *trans, struct btrfs_root
848 *root, struct btrfs_path *path, int *level)
850 struct buffer_head *next;
851 struct buffer_head *cur;
857 WARN_ON(*level >= BTRFS_MAX_LEVEL);
858 ret = lookup_extent_ref(trans, root, bh_blocknr(path->nodes[*level]),
864 * walk down to the last node level and free all the leaves
868 WARN_ON(*level >= BTRFS_MAX_LEVEL);
869 cur = path->nodes[*level];
870 if (btrfs_header_level(btrfs_buffer_header(cur)) != *level)
872 if (path->slots[*level] >=
873 btrfs_header_nritems(btrfs_buffer_header(cur)))
876 ret = drop_leaf_ref(trans, root, cur);
880 blocknr = btrfs_node_blockptr(btrfs_buffer_node(cur),
881 path->slots[*level]);
882 ret = lookup_extent_ref(trans, root, blocknr, 1, &refs);
885 path->slots[*level]++;
886 ret = btrfs_free_extent(trans, root, blocknr, 1, 1);
890 next = read_tree_block(root, blocknr);
891 WARN_ON(*level <= 0);
892 if (path->nodes[*level-1])
893 btrfs_block_release(root, path->nodes[*level-1]);
894 path->nodes[*level-1] = next;
895 *level = btrfs_header_level(btrfs_buffer_header(next));
896 path->slots[*level] = 0;
900 WARN_ON(*level >= BTRFS_MAX_LEVEL);
901 ret = btrfs_free_extent(trans, root,
902 bh_blocknr(path->nodes[*level]), 1, 1);
903 btrfs_block_release(root, path->nodes[*level]);
904 path->nodes[*level] = NULL;
911 * helper for dropping snapshots. This walks back up the tree in the path
912 * to find the first node higher up where we haven't yet gone through
915 static int walk_up_tree(struct btrfs_trans_handle *trans, struct btrfs_root
916 *root, struct btrfs_path *path, int *level)
921 for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
922 slot = path->slots[i];
923 if (slot < btrfs_header_nritems(
924 btrfs_buffer_header(path->nodes[i])) - 1) {
929 ret = btrfs_free_extent(trans, root,
930 bh_blocknr(path->nodes[*level]),
933 btrfs_block_release(root, path->nodes[*level]);
934 path->nodes[*level] = NULL;
942 * drop the reference count on the tree rooted at 'snap'. This traverses
943 * the tree freeing any blocks that have a ref count of zero after being
946 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
947 *root, struct buffer_head *snap)
952 struct btrfs_path *path;
956 path = btrfs_alloc_path();
958 btrfs_init_path(path);
960 level = btrfs_header_level(btrfs_buffer_header(snap));
962 path->nodes[level] = snap;
963 path->slots[level] = 0;
965 wret = walk_down_tree(trans, root, path, &level);
971 wret = walk_up_tree(trans, root, path, &level);
976 btrfs_btree_balance_dirty(root);
978 for (i = 0; i <= orig_level; i++) {
979 if (path->nodes[i]) {
980 btrfs_block_release(root, path->nodes[i]);
983 btrfs_free_path(path);
987 int btrfs_free_block_groups(struct btrfs_fs_info *info)
990 struct btrfs_block_group_cache *cache[8];
994 ret = radix_tree_gang_lookup(&info->block_group_radix,
999 for (i = 0; i < ret; i++) {
1000 radix_tree_delete(&info->block_group_radix,
1001 cache[i]->key.objectid +
1002 cache[i]->key.offset - 1);
1009 int btrfs_read_block_groups(struct btrfs_root *root)
1011 struct btrfs_path *path;
1014 struct btrfs_block_group_item *bi;
1015 struct btrfs_block_group_cache *cache;
1016 struct btrfs_key key;
1017 struct btrfs_key found_key;
1018 struct btrfs_leaf *leaf;
1019 u64 group_size_blocks = BTRFS_BLOCK_GROUP_SIZE / root->blocksize;
1022 root = root->fs_info->extent_root;
1024 key.offset = group_size_blocks;
1026 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
1028 path = btrfs_alloc_path();
1033 ret = btrfs_search_slot(NULL, root->fs_info->extent_root,
1039 leaf = btrfs_buffer_leaf(path->nodes[0]);
1040 btrfs_disk_key_to_cpu(&found_key,
1041 &leaf->items[path->slots[0]].key);
1042 cache = kmalloc(sizeof(*cache), GFP_NOFS);
1047 bi = btrfs_item_ptr(leaf, path->slots[0],
1048 struct btrfs_block_group_item);
1049 memcpy(&cache->item, bi, sizeof(*bi));
1050 memcpy(&cache->key, &found_key, sizeof(found_key));
1051 cache->last_alloc = cache->key.objectid;
1052 cache->first_free = cache->key.objectid;
1053 key.objectid = found_key.objectid + found_key.offset;
1054 btrfs_release_path(root, path);
1055 ret = radix_tree_insert(&root->fs_info->block_group_radix,
1056 found_key.objectid +
1057 found_key.offset - 1,
1060 used = btrfs_block_group_used(bi);
1061 if (used < (key.offset * 2) / 3) {
1062 radix_tree_tag_set(&root->fs_info->block_group_radix,
1063 found_key.objectid +
1064 found_key.offset - 1,
1065 BTRFS_BLOCK_GROUP_AVAIL);
1068 btrfs_super_total_blocks(root->fs_info->disk_super))
1072 btrfs_free_path(path);