1 #include <linux/module.h>
4 #include "print-tree.h"
5 #include "transaction.h"
7 static int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
8 *orig_root, u64 num_blocks, u64 search_start, u64
9 search_end, struct btrfs_key *ins, int data);
10 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
11 btrfs_root *extent_root);
12 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
13 btrfs_root *extent_root);
15 static void reada_extent_leaves(struct btrfs_root *root,
16 struct btrfs_path *path, u64 limit)
18 struct btrfs_node *node;
28 node = btrfs_buffer_node(path->nodes[1]);
29 slot = path->slots[1] + 1;
30 nritems = btrfs_header_nritems(&node->header);
31 for (i = slot; i < nritems && i < slot + 8; i++) {
32 item_objectid = btrfs_disk_key_objectid(&node->ptrs[i].key);
33 if (item_objectid > limit)
35 blocknr = btrfs_node_blockptr(node, i);
36 ret = readahead_tree_block(root, blocknr);
42 static int cache_block_group(struct btrfs_root *root,
43 struct btrfs_block_group_cache *block_group)
45 struct btrfs_path *path;
48 struct btrfs_leaf *leaf;
49 struct radix_tree_root *extent_radix;
57 root = root->fs_info->extent_root;
58 extent_radix = &root->fs_info->extent_map_radix;
60 if (block_group->cached)
62 if (block_group->data)
64 path = btrfs_alloc_path();
67 printk("cache block group %Lu\n", block_group->key.objectid);
68 key.objectid = block_group->key.objectid;
71 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
72 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
75 if (ret && path->slots[0] > 0)
77 limit = block_group->key.objectid + block_group->key.offset;
78 reada_extent_leaves(root, path, limit);
80 leaf = btrfs_buffer_leaf(path->nodes[0]);
81 slot = path->slots[0];
82 if (slot >= btrfs_header_nritems(&leaf->header)) {
83 reada_extent_leaves(root, path, limit);
84 ret = btrfs_next_leaf(root, path);
89 hole_size = block_group->key.objectid +
90 block_group->key.offset - last;
92 last = block_group->key.objectid;
93 hole_size = block_group->key.offset;
95 for (i = 0; i < hole_size; i++) {
96 set_radix_bit(extent_radix,
102 btrfs_disk_key_to_cpu(&key, &leaf->items[slot].key);
103 if (key.objectid >= block_group->key.objectid +
104 block_group->key.offset) {
106 hole_size = block_group->key.objectid +
107 block_group->key.offset - last;
109 last = block_group->key.objectid;
110 hole_size = block_group->key.offset;
112 for (i = 0; i < hole_size; i++) {
113 set_radix_bit(extent_radix, last + i);
117 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
119 last = key.objectid + key.offset;
122 hole_size = key.objectid - last;
123 for (i = 0; i < hole_size; i++) {
124 set_radix_bit(extent_radix, last + i);
126 last = key.objectid + key.offset;
132 block_group->cached = 1;
133 btrfs_free_path(path);
137 static struct btrfs_block_group_cache *lookup_block_group(struct
141 struct btrfs_block_group_cache *block_group;
144 ret = radix_tree_gang_lookup(&info->block_group_radix,
145 (void **)&block_group,
148 if (block_group->key.objectid <= blocknr && blocknr <=
149 block_group->key.objectid + block_group->key.offset)
152 ret = radix_tree_gang_lookup(&info->block_group_data_radix,
153 (void **)&block_group,
156 if (block_group->key.objectid <= blocknr && blocknr <=
157 block_group->key.objectid + block_group->key.offset)
161 printk("lookup_block_group fails for blocknr %Lu\n", blocknr);
162 printk("last ret was %d\n", ret);
164 printk("last block group was %Lu %Lu\n", block_group->key.objectid, block_group->key.offset);
169 static u64 leaf_range(struct btrfs_root *root)
171 u64 size = BTRFS_LEAF_DATA_SIZE(root);
172 size = size / (sizeof(struct btrfs_extent_item) +
173 sizeof(struct btrfs_item));
177 static u64 find_search_start(struct btrfs_root *root,
178 struct btrfs_block_group_cache **cache_ret,
179 u64 search_start, int num)
181 unsigned long gang[8];
183 struct btrfs_block_group_cache *cache = *cache_ret;
184 u64 last = max(search_start, cache->key.objectid);
189 last = max(last, cache->last_prealloc);
192 cache_block_group(root, cache);
194 ret = find_first_radix_bit(&root->fs_info->extent_map_radix,
195 gang, last, ARRAY_SIZE(gang));
198 last = gang[ret-1] + 1;
200 if (ret != ARRAY_SIZE(gang)) {
203 if (gang[ret-1] - gang[0] > leaf_range(root)) {
207 if (gang[0] >= cache->key.objectid + cache->key.offset) {
213 return max(cache->last_alloc, search_start);
216 cache = lookup_block_group(root->fs_info, last + cache->key.offset - 1);
218 return max((*cache_ret)->last_alloc, search_start);
220 cache = btrfs_find_block_group(root, cache,
221 last + cache->key.offset - 1, 0, 0);
226 struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
227 struct btrfs_block_group_cache
228 *hint, u64 search_start,
231 struct btrfs_block_group_cache *cache[8];
232 struct btrfs_block_group_cache *found_group = NULL;
233 struct btrfs_fs_info *info = root->fs_info;
234 struct radix_tree_root *radix;
235 struct radix_tree_root *swap_radix;
249 radix = &info->block_group_data_radix;
250 swap_radix = &info->block_group_radix;
252 radix = &info->block_group_radix;
253 swap_radix = &info->block_group_data_radix;
257 struct btrfs_block_group_cache *shint;
258 shint = lookup_block_group(info, search_start);
259 if (shint->data == data) {
260 used = btrfs_block_group_used(&shint->item);
261 if (used + shint->pinned <
262 (shint->key.offset * factor) / 10) {
267 if (hint && hint->data == data) {
268 used = btrfs_block_group_used(&hint->item);
269 if (used + hint->pinned < (hint->key.offset * factor) / 10) {
272 if (used >= (hint->key.offset * 8) / 10) {
273 radix_tree_tag_clear(radix,
275 hint->key.offset - 1,
276 BTRFS_BLOCK_GROUP_AVAIL);
278 last = hint->key.offset * 3;
279 if (hint->key.objectid >= last)
280 last = max(search_start + hint->key.offset - 1,
281 hint->key.objectid - last);
283 last = hint->key.objectid + hint->key.offset;
287 hint_last = max(hint->key.objectid, search_start);
289 hint_last = search_start;
294 ret = radix_tree_gang_lookup_tag(radix, (void **)cache,
295 last, ARRAY_SIZE(cache),
296 BTRFS_BLOCK_GROUP_AVAIL);
299 for (i = 0; i < ret; i++) {
300 last = cache[i]->key.objectid +
301 cache[i]->key.offset;
302 used = btrfs_block_group_used(&cache[i]->item);
303 if (used + cache[i]->pinned <
304 (cache[i]->key.offset * factor) / 10) {
305 found_group = cache[i];
308 if (used >= (cache[i]->key.offset * 8) / 10) {
309 radix_tree_tag_clear(radix,
310 cache[i]->key.objectid +
311 cache[i]->key.offset - 1,
312 BTRFS_BLOCK_GROUP_AVAIL);
320 ret = radix_tree_gang_lookup(radix, (void **)cache,
321 last, ARRAY_SIZE(cache));
324 for (i = 0; i < ret; i++) {
325 last = cache[i]->key.objectid +
326 cache[i]->key.offset;
327 used = btrfs_block_group_used(&cache[i]->item);
328 if (used + cache[i]->pinned < cache[i]->key.offset) {
329 found_group = cache[i];
332 if (used >= cache[i]->key.offset) {
333 radix_tree_tag_clear(radix,
334 cache[i]->key.objectid +
335 cache[i]->key.offset - 1,
336 BTRFS_BLOCK_GROUP_AVAIL);
347 struct radix_tree_root *tmp = radix;
355 printk("find block group bailing to zero data %d\n", data);
356 ret = radix_tree_gang_lookup(radix,
357 (void **)&found_group, 0, 1);
359 ret = radix_tree_gang_lookup(swap_radix,
360 (void **)&found_group,
369 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
370 struct btrfs_root *root,
371 u64 blocknr, u64 num_blocks)
373 struct btrfs_path *path;
375 struct btrfs_key key;
376 struct btrfs_leaf *l;
377 struct btrfs_extent_item *item;
378 struct btrfs_key ins;
381 find_free_extent(trans, root->fs_info->extent_root, 0, 0, (u64)-1,
383 path = btrfs_alloc_path();
385 btrfs_init_path(path);
386 key.objectid = blocknr;
388 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
389 key.offset = num_blocks;
390 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
393 printk("can't find block %Lu %Lu\n", blocknr, num_blocks);
397 l = btrfs_buffer_leaf(path->nodes[0]);
398 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
399 refs = btrfs_extent_refs(item);
400 btrfs_set_extent_refs(item, refs + 1);
401 btrfs_mark_buffer_dirty(path->nodes[0]);
403 btrfs_release_path(root->fs_info->extent_root, path);
404 btrfs_free_path(path);
405 finish_current_insert(trans, root->fs_info->extent_root);
406 del_pending_extents(trans, root->fs_info->extent_root);
410 static int lookup_extent_ref(struct btrfs_trans_handle *trans,
411 struct btrfs_root *root, u64 blocknr,
412 u64 num_blocks, u32 *refs)
414 struct btrfs_path *path;
416 struct btrfs_key key;
417 struct btrfs_leaf *l;
418 struct btrfs_extent_item *item;
420 path = btrfs_alloc_path();
421 btrfs_init_path(path);
422 key.objectid = blocknr;
423 key.offset = num_blocks;
425 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
426 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
430 l = btrfs_buffer_leaf(path->nodes[0]);
431 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
432 *refs = btrfs_extent_refs(item);
433 btrfs_release_path(root->fs_info->extent_root, path);
434 btrfs_free_path(path);
438 int btrfs_inc_root_ref(struct btrfs_trans_handle *trans,
439 struct btrfs_root *root)
441 return btrfs_inc_extent_ref(trans, root, bh_blocknr(root->node), 1);
444 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
445 struct buffer_head *buf)
448 struct btrfs_node *buf_node;
449 struct btrfs_leaf *buf_leaf;
450 struct btrfs_disk_key *key;
451 struct btrfs_file_extent_item *fi;
458 buf_node = btrfs_buffer_node(buf);
459 leaf = btrfs_is_leaf(buf_node);
460 buf_leaf = btrfs_buffer_leaf(buf);
461 for (i = 0; i < btrfs_header_nritems(&buf_node->header); i++) {
464 key = &buf_leaf->items[i].key;
465 if (btrfs_disk_key_type(key) != BTRFS_EXTENT_DATA_KEY)
467 fi = btrfs_item_ptr(buf_leaf, i,
468 struct btrfs_file_extent_item);
469 if (btrfs_file_extent_type(fi) ==
470 BTRFS_FILE_EXTENT_INLINE)
472 disk_blocknr = btrfs_file_extent_disk_blocknr(fi);
473 if (disk_blocknr == 0)
475 ret = btrfs_inc_extent_ref(trans, root, disk_blocknr,
476 btrfs_file_extent_disk_num_blocks(fi));
479 blocknr = btrfs_node_blockptr(buf_node, i);
480 ret = btrfs_inc_extent_ref(trans, root, blocknr, 1);
487 static int write_one_cache_group(struct btrfs_trans_handle *trans,
488 struct btrfs_root *root,
489 struct btrfs_path *path,
490 struct btrfs_block_group_cache *cache)
494 struct btrfs_root *extent_root = root->fs_info->extent_root;
495 struct btrfs_block_group_item *bi;
496 struct btrfs_key ins;
498 find_free_extent(trans, extent_root, 0, 0, (u64)-1, &ins, 0);
499 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
501 bi = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]), path->slots[0],
502 struct btrfs_block_group_item);
503 memcpy(bi, &cache->item, sizeof(*bi));
504 mark_buffer_dirty(path->nodes[0]);
505 btrfs_release_path(extent_root, path);
507 finish_current_insert(trans, extent_root);
508 pending_ret = del_pending_extents(trans, extent_root);
514 cache->last_alloc = cache->first_free;
519 static int write_dirty_block_radix(struct btrfs_trans_handle *trans,
520 struct btrfs_root *root,
521 struct radix_tree_root *radix)
523 struct btrfs_block_group_cache *cache[8];
528 struct btrfs_path *path;
530 path = btrfs_alloc_path();
535 ret = radix_tree_gang_lookup_tag(radix, (void **)cache,
536 0, ARRAY_SIZE(cache),
537 BTRFS_BLOCK_GROUP_DIRTY);
540 for (i = 0; i < ret; i++) {
541 radix_tree_tag_clear(radix, cache[i]->key.objectid +
542 cache[i]->key.offset - 1,
543 BTRFS_BLOCK_GROUP_DIRTY);
544 err = write_one_cache_group(trans, root,
550 btrfs_free_path(path);
554 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
555 struct btrfs_root *root)
559 ret = write_dirty_block_radix(trans, root,
560 &root->fs_info->block_group_radix);
561 ret2 = write_dirty_block_radix(trans, root,
562 &root->fs_info->block_group_data_radix);
570 static int update_block_group(struct btrfs_trans_handle *trans,
571 struct btrfs_root *root,
572 u64 blocknr, u64 num, int alloc, int mark_free,
575 struct btrfs_block_group_cache *cache;
576 struct btrfs_fs_info *info = root->fs_info;
584 cache = lookup_block_group(info, blocknr);
586 printk(KERN_CRIT "blocknr %Lu lookup failed\n",
590 block_in_group = blocknr - cache->key.objectid;
591 WARN_ON(block_in_group > cache->key.offset);
592 radix_tree_tag_set(cache->radix, cache->key.objectid +
593 cache->key.offset - 1,
594 BTRFS_BLOCK_GROUP_DIRTY);
596 old_val = btrfs_block_group_used(&cache->item);
597 num = min(total, cache->key.offset - block_in_group);
599 if (blocknr > cache->last_alloc)
600 cache->last_alloc = blocknr;
602 for (i = 0; i < num; i++) {
603 clear_radix_bit(&info->extent_map_radix,
607 if (cache->data != data &&
608 old_val < cache->key.offset / 2) {
609 printk("changing block group %Lu from %d to %d\n", cache->key.objectid, cache->data, data);
611 radix_tree_delete(cache->radix,
612 cache->key.objectid +
613 cache->key.offset - 1);
617 &info->block_group_data_radix;
619 BTRFS_BLOCK_GROUP_DATA;
621 cache->radix = &info->block_group_radix;
623 ~BTRFS_BLOCK_GROUP_DATA;
625 ret = radix_tree_insert(cache->radix,
626 cache->key.objectid +
627 cache->key.offset - 1,
633 if (blocknr < cache->first_free)
634 cache->first_free = blocknr;
635 if (!cache->data && mark_free) {
636 for (i = 0; i < num; i++) {
637 set_radix_bit(&info->extent_map_radix,
641 if (old_val < cache->key.offset / 2 &&
642 old_val + num >= cache->key.offset / 2) {
643 printk("group %Lu now available\n", cache->key.objectid);
644 radix_tree_tag_set(cache->radix,
645 cache->key.objectid +
646 cache->key.offset - 1,
647 BTRFS_BLOCK_GROUP_AVAIL);
650 btrfs_set_block_group_used(&cache->item, old_val);
657 static int try_remove_page(struct address_space *mapping, unsigned long index)
660 ret = invalidate_mapping_pages(mapping, index, index);
664 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, struct
667 unsigned long gang[8];
668 struct inode *btree_inode = root->fs_info->btree_inode;
669 struct btrfs_block_group_cache *block_group;
673 struct radix_tree_root *pinned_radix = &root->fs_info->pinned_radix;
674 struct radix_tree_root *extent_radix = &root->fs_info->extent_map_radix;
677 ret = find_first_radix_bit(pinned_radix, gang, 0,
683 for (i = 0; i < ret; i++) {
684 clear_radix_bit(pinned_radix, gang[i]);
685 block_group = lookup_block_group(root->fs_info,
688 WARN_ON(block_group->pinned == 0);
689 block_group->pinned--;
690 if (gang[i] < block_group->last_alloc)
691 block_group->last_alloc = gang[i];
692 if (gang[i] < block_group->last_prealloc)
693 block_group->last_prealloc = gang[i];
694 if (!block_group->data)
695 set_radix_bit(extent_radix, gang[i]);
697 try_remove_page(btree_inode->i_mapping,
698 gang[i] << (PAGE_CACHE_SHIFT -
699 btree_inode->i_blkbits));
705 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
706 btrfs_root *extent_root)
708 struct btrfs_key ins;
709 struct btrfs_extent_item extent_item;
712 u64 super_blocks_used;
713 struct btrfs_fs_info *info = extent_root->fs_info;
715 btrfs_set_extent_refs(&extent_item, 1);
718 btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
719 btrfs_set_extent_owner(&extent_item, extent_root->root_key.objectid);
721 for (i = 0; i < extent_root->fs_info->extent_tree_insert_nr; i++) {
722 ins.objectid = extent_root->fs_info->extent_tree_insert[i];
723 super_blocks_used = btrfs_super_blocks_used(info->disk_super);
724 btrfs_set_super_blocks_used(info->disk_super,
725 super_blocks_used + 1);
726 ret = btrfs_insert_item(trans, extent_root, &ins, &extent_item,
727 sizeof(extent_item));
730 extent_root->fs_info->extent_tree_insert_nr = 0;
731 extent_root->fs_info->extent_tree_prealloc_nr = 0;
735 static int pin_down_block(struct btrfs_root *root, u64 blocknr, int pending)
738 struct btrfs_header *header;
739 struct buffer_head *bh;
742 bh = btrfs_find_tree_block(root, blocknr);
744 if (buffer_uptodate(bh)) {
746 root->fs_info->running_transaction->transid;
747 header = btrfs_buffer_header(bh);
748 if (btrfs_header_generation(header) ==
750 btrfs_block_release(root, bh);
754 btrfs_block_release(root, bh);
756 err = set_radix_bit(&root->fs_info->pinned_radix, blocknr);
758 struct btrfs_block_group_cache *cache;
759 cache = lookup_block_group(root->fs_info, blocknr);
764 err = set_radix_bit(&root->fs_info->pending_del_radix, blocknr);
771 * remove an extent from the root, returns 0 on success
773 static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
774 *root, u64 blocknr, u64 num_blocks, int pin,
777 struct btrfs_path *path;
778 struct btrfs_key key;
779 struct btrfs_fs_info *info = root->fs_info;
780 struct btrfs_root *extent_root = info->extent_root;
782 struct btrfs_extent_item *ei;
783 struct btrfs_key ins;
786 key.objectid = blocknr;
788 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
789 key.offset = num_blocks;
791 find_free_extent(trans, root, 0, 0, (u64)-1, &ins, 0);
792 path = btrfs_alloc_path();
794 btrfs_init_path(path);
796 ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
798 printk("failed to find %Lu\n", key.objectid);
799 btrfs_print_tree(extent_root, extent_root->node);
800 printk("failed to find %Lu\n", key.objectid);
803 ei = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]), path->slots[0],
804 struct btrfs_extent_item);
805 BUG_ON(ei->refs == 0);
806 refs = btrfs_extent_refs(ei) - 1;
807 btrfs_set_extent_refs(ei, refs);
808 btrfs_mark_buffer_dirty(path->nodes[0]);
810 u64 super_blocks_used;
813 ret = pin_down_block(root, blocknr, 0);
817 super_blocks_used = btrfs_super_blocks_used(info->disk_super);
818 btrfs_set_super_blocks_used(info->disk_super,
819 super_blocks_used - num_blocks);
820 ret = btrfs_del_item(trans, extent_root, path);
823 ret = update_block_group(trans, root, blocknr, num_blocks, 0,
827 btrfs_free_path(path);
828 finish_current_insert(trans, extent_root);
833 * find all the blocks marked as pending in the radix tree and remove
834 * them from the extent map
836 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
837 btrfs_root *extent_root)
842 unsigned long gang[4];
844 struct radix_tree_root *pending_radix;
845 struct radix_tree_root *pinned_radix;
846 struct btrfs_block_group_cache *cache;
848 pending_radix = &extent_root->fs_info->pending_del_radix;
849 pinned_radix = &extent_root->fs_info->pinned_radix;
852 ret = find_first_radix_bit(pending_radix, gang, 0,
856 for (i = 0; i < ret; i++) {
857 wret = set_radix_bit(pinned_radix, gang[i]);
859 cache = lookup_block_group(extent_root->fs_info,
865 printk(KERN_CRIT "set_radix_bit, err %d\n",
869 wret = clear_radix_bit(pending_radix, gang[i]);
871 wret = __free_extent(trans, extent_root,
881 * remove an extent from the root, returns 0 on success
883 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
884 *root, u64 blocknr, u64 num_blocks, int pin)
886 struct btrfs_root *extent_root = root->fs_info->extent_root;
890 if (root == extent_root) {
891 pin_down_block(root, blocknr, 1);
894 ret = __free_extent(trans, root, blocknr, num_blocks, pin, pin == 0);
895 pending_ret = del_pending_extents(trans, root->fs_info->extent_root);
896 return ret ? ret : pending_ret;
900 * walks the btree of allocated extents and find a hole of a given size.
901 * The key ins is changed to record the hole:
902 * ins->objectid == block start
903 * ins->flags = BTRFS_EXTENT_ITEM_KEY
904 * ins->offset == number of blocks
905 * Any available blocks before search_start are skipped.
907 static int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
908 *orig_root, u64 num_blocks, u64 search_start, u64
909 search_end, struct btrfs_key *ins, int data)
911 struct btrfs_path *path;
912 struct btrfs_key key;
918 u64 orig_search_start = search_start;
920 struct btrfs_leaf *l;
921 struct btrfs_root * root = orig_root->fs_info->extent_root;
922 struct btrfs_fs_info *info = root->fs_info;
923 int total_needed = num_blocks;
925 int fill_prealloc = 0;
927 struct btrfs_block_group_cache *block_group;
931 path = btrfs_alloc_path();
933 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
935 level = btrfs_header_level(btrfs_buffer_header(root->node));
936 if (num_blocks == 0) {
939 total_needed = (min(level + 1, BTRFS_MAX_LEVEL) + 2) * 3;
941 if (search_end == (u64)-1)
942 search_end = btrfs_super_total_blocks(info->disk_super);
944 block_group = lookup_block_group(info, search_start);
945 block_group = btrfs_find_block_group(root, block_group,
946 search_start, data, 1);
948 block_group = btrfs_find_block_group(root,
949 trans->block_group, 0,
954 if (!block_group->data)
955 search_start = find_search_start(root, &block_group,
956 search_start, total_needed);
958 search_start = max(block_group->last_alloc, search_start);
960 btrfs_init_path(path);
961 ins->objectid = search_start;
965 ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
969 if (path->slots[0] > 0) {
973 l = btrfs_buffer_leaf(path->nodes[0]);
974 btrfs_disk_key_to_cpu(&key, &l->items[path->slots[0]].key);
976 * a rare case, go back one key if we hit a block group item
977 * instead of an extent item
979 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY &&
980 key.objectid + key.offset >= search_start) {
981 ins->objectid = key.objectid;
982 ins->offset = key.offset - 1;
983 btrfs_release_path(root, path);
984 ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
988 if (path->slots[0] > 0) {
994 l = btrfs_buffer_leaf(path->nodes[0]);
995 slot = path->slots[0];
996 if (slot >= btrfs_header_nritems(&l->header)) {
998 info->extent_tree_prealloc_nr = 0;
1002 limit = last_block +
1003 block_group->key.offset / 2;
1005 limit = search_start +
1006 block_group->key.offset / 2;
1007 ret = btrfs_next_leaf(root, path);
1013 ins->objectid = search_start;
1014 ins->offset = search_end - search_start;
1018 ins->objectid = last_block > search_start ?
1019 last_block : search_start;
1020 ins->offset = search_end - ins->objectid;
1024 btrfs_disk_key_to_cpu(&key, &l->items[slot].key);
1025 if (key.objectid >= search_start && key.objectid > last_block &&
1027 if (last_block < search_start)
1028 last_block = search_start;
1029 hole_size = key.objectid - last_block;
1030 if (hole_size >= num_blocks) {
1031 ins->objectid = last_block;
1032 ins->offset = hole_size;
1037 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
1041 last_block = key.objectid + key.offset;
1042 if (last_block >= block_group->key.objectid +
1043 block_group->key.offset) {
1044 btrfs_release_path(root, path);
1045 search_start = block_group->key.objectid +
1046 block_group->key.offset * 2;
1055 /* we have to make sure we didn't find an extent that has already
1056 * been allocated by the map tree or the original allocation
1058 btrfs_release_path(root, path);
1059 BUG_ON(ins->objectid < search_start);
1061 if (ins->objectid + num_blocks >= search_end) {
1064 search_start = orig_search_start;
1068 for (test_block = ins->objectid;
1069 test_block < ins->objectid + num_blocks; test_block++) {
1070 if (test_radix_bit(&info->pinned_radix, test_block)) {
1071 search_start = test_block + 1;
1075 if (!fill_prealloc && info->extent_tree_insert_nr) {
1077 info->extent_tree_insert[info->extent_tree_insert_nr - 1];
1078 if (ins->objectid + num_blocks >
1079 info->extent_tree_insert[0] &&
1080 ins->objectid <= last) {
1081 search_start = last + 1;
1082 WARN_ON(!full_scan);
1086 if (!fill_prealloc && info->extent_tree_prealloc_nr) {
1088 info->extent_tree_prealloc[info->extent_tree_prealloc_nr - 1];
1089 if (ins->objectid + num_blocks > first &&
1090 ins->objectid <= info->extent_tree_prealloc[0]) {
1091 search_start = info->extent_tree_prealloc[0] + 1;
1092 WARN_ON(!full_scan);
1096 if (fill_prealloc) {
1098 test_block = ins->objectid;
1099 if (test_block - info->extent_tree_prealloc[total_needed - 1] >=
1102 info->extent_tree_prealloc_nr = total_found;
1104 while(test_block < ins->objectid + ins->offset &&
1105 total_found < total_needed) {
1106 nr = total_needed - total_found - 1;
1108 info->extent_tree_prealloc[nr] = test_block;
1112 if (total_found < total_needed) {
1113 search_start = test_block;
1116 info->extent_tree_prealloc_nr = total_found;
1119 block_group = lookup_block_group(info, ins->objectid);
1122 block_group->last_prealloc =
1123 info->extent_tree_prealloc[total_needed-1];
1125 trans->block_group = block_group;
1128 ins->offset = num_blocks;
1129 btrfs_free_path(path);
1133 if (search_start + num_blocks >= search_end) {
1134 search_start = orig_search_start;
1135 printk("doing full scan!\n");
1138 block_group = lookup_block_group(info, search_start);
1140 block_group = btrfs_find_block_group(root, block_group,
1141 search_start, data, 0);
1146 btrfs_release_path(root, path);
1147 btrfs_free_path(path);
1151 * finds a free extent and does all the dirty work required for allocation
1152 * returns the key for the extent through ins, and a tree buffer for
1153 * the first block of the extent through buf.
1155 * returns 0 if everything worked, non-zero otherwise.
1157 int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
1158 struct btrfs_root *root, u64 owner,
1159 u64 num_blocks, u64 search_start,
1160 u64 search_end, struct btrfs_key *ins, int data)
1164 u64 super_blocks_used;
1165 struct btrfs_fs_info *info = root->fs_info;
1166 struct btrfs_root *extent_root = info->extent_root;
1167 struct btrfs_extent_item extent_item;
1168 struct btrfs_key prealloc_key;
1170 btrfs_set_extent_refs(&extent_item, 1);
1171 btrfs_set_extent_owner(&extent_item, owner);
1173 if (root == extent_root) {
1175 BUG_ON(info->extent_tree_prealloc_nr == 0);
1176 BUG_ON(num_blocks != 1);
1178 info->extent_tree_prealloc_nr--;
1179 nr = info->extent_tree_prealloc_nr;
1180 ins->objectid = info->extent_tree_prealloc[nr];
1181 info->extent_tree_insert[info->extent_tree_insert_nr++] =
1183 ret = update_block_group(trans, root,
1184 ins->objectid, ins->offset, 1, 0, 0);
1190 * if we're doing a data allocation, preallocate room in the
1191 * extent tree first. This way the extent tree blocks end up
1192 * in the correct block group.
1195 ret = find_free_extent(trans, root, 0, 0,
1196 search_end, &prealloc_key, 0);
1200 if (prealloc_key.objectid + prealloc_key.offset >= search_end) {
1201 int nr = info->extent_tree_prealloc_nr;
1202 search_end = info->extent_tree_prealloc[nr - 1] - 1;
1204 search_start = info->extent_tree_prealloc[0] + 1;
1207 /* do the real allocation */
1208 ret = find_free_extent(trans, root, num_blocks, search_start,
1209 search_end, ins, data);
1215 * if we're doing a metadata allocation, preallocate space in the
1216 * extent tree second. This way, we don't create a tiny hole
1217 * in the allocation map between any unused preallocation blocks
1218 * and the metadata block we're actually allocating. On disk,
1220 * [block we've allocated], [used prealloc 1], [ unused prealloc ]
1221 * The unused prealloc will get reused the next time around.
1224 if (ins->objectid + ins->offset >= search_end)
1225 search_end = ins->objectid - 1;
1227 search_start = ins->objectid + ins->offset;
1229 ret = find_free_extent(trans, root, 0, search_start,
1230 search_end, &prealloc_key, 0);
1236 super_blocks_used = btrfs_super_blocks_used(info->disk_super);
1237 btrfs_set_super_blocks_used(info->disk_super, super_blocks_used +
1239 ret = btrfs_insert_item(trans, extent_root, ins, &extent_item,
1240 sizeof(extent_item));
1242 finish_current_insert(trans, extent_root);
1243 pending_ret = del_pending_extents(trans, extent_root);
1250 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0,
1256 * helper function to allocate a block for a given tree
1257 * returns the tree buffer or NULL.
1259 struct buffer_head *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
1260 struct btrfs_root *root, u64 hint)
1262 struct btrfs_key ins;
1264 struct buffer_head *buf;
1266 ret = btrfs_alloc_extent(trans, root, root->root_key.objectid,
1267 1, hint, (unsigned long)-1, &ins, 0);
1273 buf = btrfs_find_create_tree_block(root, ins.objectid);
1274 set_buffer_uptodate(buf);
1275 set_buffer_checked(buf);
1276 set_radix_bit(&trans->transaction->dirty_pages, buf->b_page->index);
1280 static int drop_leaf_ref(struct btrfs_trans_handle *trans,
1281 struct btrfs_root *root, struct buffer_head *cur)
1283 struct btrfs_disk_key *key;
1284 struct btrfs_leaf *leaf;
1285 struct btrfs_file_extent_item *fi;
1290 BUG_ON(!btrfs_is_leaf(btrfs_buffer_node(cur)));
1291 leaf = btrfs_buffer_leaf(cur);
1292 nritems = btrfs_header_nritems(&leaf->header);
1293 for (i = 0; i < nritems; i++) {
1295 key = &leaf->items[i].key;
1296 if (btrfs_disk_key_type(key) != BTRFS_EXTENT_DATA_KEY)
1298 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1299 if (btrfs_file_extent_type(fi) == BTRFS_FILE_EXTENT_INLINE)
1302 * FIXME make sure to insert a trans record that
1303 * repeats the snapshot del on crash
1305 disk_blocknr = btrfs_file_extent_disk_blocknr(fi);
1306 if (disk_blocknr == 0)
1308 ret = btrfs_free_extent(trans, root, disk_blocknr,
1309 btrfs_file_extent_disk_num_blocks(fi),
1317 * helper function for drop_snapshot, this walks down the tree dropping ref
1318 * counts as it goes.
1320 static int walk_down_tree(struct btrfs_trans_handle *trans, struct btrfs_root
1321 *root, struct btrfs_path *path, int *level)
1323 struct buffer_head *next;
1324 struct buffer_head *cur;
1329 WARN_ON(*level < 0);
1330 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1331 ret = lookup_extent_ref(trans, root, bh_blocknr(path->nodes[*level]),
1337 * walk down to the last node level and free all the leaves
1339 while(*level >= 0) {
1340 WARN_ON(*level < 0);
1341 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1342 cur = path->nodes[*level];
1343 if (btrfs_header_level(btrfs_buffer_header(cur)) != *level)
1345 if (path->slots[*level] >=
1346 btrfs_header_nritems(btrfs_buffer_header(cur)))
1349 ret = drop_leaf_ref(trans, root, cur);
1353 blocknr = btrfs_node_blockptr(btrfs_buffer_node(cur),
1354 path->slots[*level]);
1355 ret = lookup_extent_ref(trans, root, blocknr, 1, &refs);
1358 path->slots[*level]++;
1359 ret = btrfs_free_extent(trans, root, blocknr, 1, 1);
1363 next = read_tree_block(root, blocknr);
1364 WARN_ON(*level <= 0);
1365 if (path->nodes[*level-1])
1366 btrfs_block_release(root, path->nodes[*level-1]);
1367 path->nodes[*level-1] = next;
1368 *level = btrfs_header_level(btrfs_buffer_header(next));
1369 path->slots[*level] = 0;
1372 WARN_ON(*level < 0);
1373 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1374 ret = btrfs_free_extent(trans, root,
1375 bh_blocknr(path->nodes[*level]), 1, 1);
1376 btrfs_block_release(root, path->nodes[*level]);
1377 path->nodes[*level] = NULL;
1384 * helper for dropping snapshots. This walks back up the tree in the path
1385 * to find the first node higher up where we haven't yet gone through
1388 static int walk_up_tree(struct btrfs_trans_handle *trans, struct btrfs_root
1389 *root, struct btrfs_path *path, int *level)
1394 for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
1395 slot = path->slots[i];
1396 if (slot < btrfs_header_nritems(
1397 btrfs_buffer_header(path->nodes[i])) - 1) {
1402 ret = btrfs_free_extent(trans, root,
1403 bh_blocknr(path->nodes[*level]),
1406 btrfs_block_release(root, path->nodes[*level]);
1407 path->nodes[*level] = NULL;
1415 * drop the reference count on the tree rooted at 'snap'. This traverses
1416 * the tree freeing any blocks that have a ref count of zero after being
1419 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
1420 *root, struct buffer_head *snap)
1425 struct btrfs_path *path;
1429 path = btrfs_alloc_path();
1431 btrfs_init_path(path);
1433 level = btrfs_header_level(btrfs_buffer_header(snap));
1435 path->nodes[level] = snap;
1436 path->slots[level] = 0;
1438 wret = walk_down_tree(trans, root, path, &level);
1444 wret = walk_up_tree(trans, root, path, &level);
1449 btrfs_btree_balance_dirty(root);
1451 for (i = 0; i <= orig_level; i++) {
1452 if (path->nodes[i]) {
1453 btrfs_block_release(root, path->nodes[i]);
1456 btrfs_free_path(path);
1460 static int free_block_group_radix(struct radix_tree_root *radix)
1463 struct btrfs_block_group_cache *cache[8];
1467 ret = radix_tree_gang_lookup(radix, (void **)cache, 0,
1471 for (i = 0; i < ret; i++) {
1472 radix_tree_delete(radix, cache[i]->key.objectid +
1473 cache[i]->key.offset - 1);
1480 int btrfs_free_block_groups(struct btrfs_fs_info *info)
1484 unsigned long gang[16];
1487 ret = free_block_group_radix(&info->block_group_radix);
1488 ret2 = free_block_group_radix(&info->block_group_data_radix);
1495 ret = find_first_radix_bit(&info->extent_map_radix,
1496 gang, 0, ARRAY_SIZE(gang));
1499 for (i = 0; i < ret; i++) {
1500 clear_radix_bit(&info->extent_map_radix, gang[i]);
1506 int btrfs_read_block_groups(struct btrfs_root *root)
1508 struct btrfs_path *path;
1511 struct btrfs_block_group_item *bi;
1512 struct btrfs_block_group_cache *cache;
1513 struct btrfs_fs_info *info = root->fs_info;
1514 struct radix_tree_root *radix;
1515 struct btrfs_key key;
1516 struct btrfs_key found_key;
1517 struct btrfs_leaf *leaf;
1518 u64 group_size_blocks = BTRFS_BLOCK_GROUP_SIZE / root->blocksize;
1521 root = info->extent_root;
1523 key.offset = group_size_blocks;
1525 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
1527 path = btrfs_alloc_path();
1532 ret = btrfs_search_slot(NULL, info->extent_root,
1538 leaf = btrfs_buffer_leaf(path->nodes[0]);
1539 btrfs_disk_key_to_cpu(&found_key,
1540 &leaf->items[path->slots[0]].key);
1541 cache = kmalloc(sizeof(*cache), GFP_NOFS);
1547 bi = btrfs_item_ptr(leaf, path->slots[0],
1548 struct btrfs_block_group_item);
1549 if (bi->flags & BTRFS_BLOCK_GROUP_DATA) {
1550 radix = &info->block_group_data_radix;
1553 radix = &info->block_group_radix;
1557 memcpy(&cache->item, bi, sizeof(*bi));
1558 memcpy(&cache->key, &found_key, sizeof(found_key));
1559 cache->last_alloc = cache->key.objectid;
1560 cache->first_free = cache->key.objectid;
1561 cache->last_prealloc = cache->key.objectid;
1565 cache->radix = radix;
1567 key.objectid = found_key.objectid + found_key.offset;
1568 btrfs_release_path(root, path);
1569 ret = radix_tree_insert(radix, found_key.objectid +
1570 found_key.offset - 1,
1573 used = btrfs_block_group_used(bi);
1574 if (used < (key.offset * 8) / 10) {
1575 radix_tree_tag_set(radix, found_key.objectid +
1576 found_key.offset - 1,
1577 BTRFS_BLOCK_GROUP_AVAIL);
1580 btrfs_super_total_blocks(info->disk_super))
1584 btrfs_free_path(path);