2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/crc32c.h>
24 #include "print-tree.h"
25 #include "transaction.h"
27 #define BLOCK_GROUP_DATA EXTENT_WRITEBACK
28 #define BLOCK_GROUP_METADATA EXTENT_UPTODATE
29 #define BLOCK_GROUP_DIRTY EXTENT_DIRTY
31 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
32 btrfs_root *extent_root);
33 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
34 btrfs_root *extent_root);
36 static int cache_block_group(struct btrfs_root *root,
37 struct btrfs_block_group_cache *block_group)
39 struct btrfs_path *path;
42 struct extent_buffer *leaf;
43 struct extent_map_tree *free_space_cache;
53 root = root->fs_info->extent_root;
54 free_space_cache = &root->fs_info->free_space_cache;
56 if (block_group->cached)
59 path = btrfs_alloc_path();
64 first_free = block_group->key.objectid;
65 key.objectid = block_group->key.objectid;
68 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
69 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
74 if (ret && path->slots[0] > 0)
78 leaf = path->nodes[0];
79 slot = path->slots[0];
80 if (slot >= btrfs_header_nritems(leaf)) {
81 ret = btrfs_next_leaf(root, path);
91 btrfs_item_key_to_cpu(leaf, &key, slot);
92 if (key.objectid < block_group->key.objectid) {
93 if (btrfs_key_type(&key) != BTRFS_EXTENT_REF_KEY &&
94 key.objectid + key.offset > first_free)
95 first_free = key.objectid + key.offset;
99 if (key.objectid >= block_group->key.objectid +
100 block_group->key.offset) {
104 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
109 if (key.objectid > last) {
110 hole_size = key.objectid - last;
111 set_extent_dirty(free_space_cache, last,
112 last + hole_size - 1,
115 last = key.objectid + key.offset;
123 if (block_group->key.objectid +
124 block_group->key.offset > last) {
125 hole_size = block_group->key.objectid +
126 block_group->key.offset - last;
127 set_extent_dirty(free_space_cache, last,
128 last + hole_size - 1, GFP_NOFS);
130 block_group->cached = 1;
132 btrfs_free_path(path);
136 struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
140 struct extent_map_tree *block_group_cache;
141 struct btrfs_block_group_cache *block_group = NULL;
147 block_group_cache = &info->block_group_cache;
148 ret = find_first_extent_bit(block_group_cache,
149 bytenr, &start, &end,
150 BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA);
154 ret = get_state_private(block_group_cache, start, &ptr);
158 block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
159 if (block_group->key.objectid <= bytenr && bytenr <
160 block_group->key.objectid + block_group->key.offset)
164 static u64 find_search_start(struct btrfs_root *root,
165 struct btrfs_block_group_cache **cache_ret,
166 u64 search_start, int num,
167 int data, int full_scan)
170 struct btrfs_block_group_cache *cache = *cache_ret;
181 ret = cache_block_group(root, cache);
185 last = max(search_start, cache->key.objectid);
188 ret = find_first_extent_bit(&root->fs_info->free_space_cache,
189 last, &start, &end, EXTENT_DIRTY);
196 start = max(last, start);
198 if (last - start < num) {
199 if (last == cache->key.objectid + cache->key.offset)
203 if (data != BTRFS_BLOCK_GROUP_MIXED &&
204 start + num > cache->key.objectid + cache->key.offset)
209 cache = btrfs_lookup_block_group(root->fs_info, search_start);
211 printk("Unable to find block group for %Lu\n",
219 last = cache->key.objectid + cache->key.offset;
221 cache = btrfs_lookup_block_group(root->fs_info, last);
227 data = BTRFS_BLOCK_GROUP_MIXED;
232 if (cache_miss && !cache->cached) {
233 cache_block_group(root, cache);
235 cache = btrfs_lookup_block_group(root->fs_info, last);
237 cache = btrfs_find_block_group(root, cache, last, data, 0);
245 static u64 div_factor(u64 num, int factor)
254 struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
255 struct btrfs_block_group_cache
256 *hint, u64 search_start,
259 struct btrfs_block_group_cache *cache;
260 struct extent_map_tree *block_group_cache;
261 struct btrfs_block_group_cache *found_group = NULL;
262 struct btrfs_fs_info *info = root->fs_info;
276 block_group_cache = &info->block_group_cache;
281 if (data == BTRFS_BLOCK_GROUP_MIXED) {
282 bit = BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA;
285 bit = BLOCK_GROUP_DATA;
287 bit = BLOCK_GROUP_METADATA;
290 struct btrfs_block_group_cache *shint;
291 shint = btrfs_lookup_block_group(info, search_start);
292 if (shint && (shint->data == data ||
293 shint->data == BTRFS_BLOCK_GROUP_MIXED)) {
294 used = btrfs_block_group_used(&shint->item);
295 if (used + shint->pinned <
296 div_factor(shint->key.offset, factor)) {
301 if (hint && (hint->data == data ||
302 hint->data == BTRFS_BLOCK_GROUP_MIXED)) {
303 used = btrfs_block_group_used(&hint->item);
304 if (used + hint->pinned <
305 div_factor(hint->key.offset, factor)) {
308 last = hint->key.objectid + hint->key.offset;
312 hint_last = max(hint->key.objectid, search_start);
314 hint_last = search_start;
320 ret = find_first_extent_bit(block_group_cache, last,
325 ret = get_state_private(block_group_cache, start, &ptr);
329 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
330 last = cache->key.objectid + cache->key.offset;
331 used = btrfs_block_group_used(&cache->item);
334 free_check = cache->key.offset;
336 free_check = div_factor(cache->key.offset, factor);
337 if (used + cache->pinned < free_check) {
350 bit = BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA;
358 static u64 hash_extent_ref(u64 root_objectid, u64 ref_generation,
359 u64 owner, u64 owner_offset)
361 u32 high_crc = ~(u32)0;
362 u32 low_crc = ~(u32)0;
365 lenum = cpu_to_le64(root_objectid);
366 high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
367 lenum = cpu_to_le64(ref_generation);
368 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
371 lenum = cpu_to_le64(owner);
372 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
373 lenum = cpu_to_le64(owner_offset);
374 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
376 return ((u64)high_crc << 32) | (u64)low_crc;
379 static int match_extent_ref(struct extent_buffer *leaf,
380 struct btrfs_extent_ref *disk_ref,
381 struct btrfs_extent_ref *cpu_ref)
386 if (cpu_ref->objectid)
387 len = sizeof(*cpu_ref);
389 len = 2 * sizeof(u64);
390 ret = memcmp_extent_buffer(leaf, cpu_ref, (unsigned long)disk_ref,
395 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
396 struct btrfs_root *root,
397 struct btrfs_path *path, u64 bytenr,
398 u64 root_objectid, u64 ref_generation,
399 u64 owner, u64 owner_offset, int del)
402 struct btrfs_key key;
403 struct btrfs_key found_key;
404 struct btrfs_extent_ref ref;
405 struct extent_buffer *leaf;
406 struct btrfs_extent_ref *disk_ref;
410 btrfs_set_stack_ref_root(&ref, root_objectid);
411 btrfs_set_stack_ref_generation(&ref, ref_generation);
412 btrfs_set_stack_ref_objectid(&ref, owner);
413 btrfs_set_stack_ref_offset(&ref, owner_offset);
415 hash = hash_extent_ref(root_objectid, ref_generation, owner,
418 key.objectid = bytenr;
419 key.type = BTRFS_EXTENT_REF_KEY;
422 ret = btrfs_search_slot(trans, root, &key, path,
426 leaf = path->nodes[0];
428 u32 nritems = btrfs_header_nritems(leaf);
429 if (path->slots[0] >= nritems) {
430 ret2 = btrfs_next_leaf(root, path);
433 leaf = path->nodes[0];
435 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
436 if (found_key.objectid != bytenr ||
437 found_key.type != BTRFS_EXTENT_REF_KEY)
439 key.offset = found_key.offset;
441 btrfs_release_path(root, path);
445 disk_ref = btrfs_item_ptr(path->nodes[0],
447 struct btrfs_extent_ref);
448 if (match_extent_ref(path->nodes[0], disk_ref, &ref)) {
452 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
453 key.offset = found_key.offset + 1;
454 btrfs_release_path(root, path);
460 int btrfs_insert_extent_backref(struct btrfs_trans_handle *trans,
461 struct btrfs_root *root,
462 struct btrfs_path *path, u64 bytenr,
463 u64 root_objectid, u64 ref_generation,
464 u64 owner, u64 owner_offset)
467 struct btrfs_key key;
468 struct btrfs_extent_ref ref;
469 struct btrfs_extent_ref *disk_ref;
472 btrfs_set_stack_ref_root(&ref, root_objectid);
473 btrfs_set_stack_ref_generation(&ref, ref_generation);
474 btrfs_set_stack_ref_objectid(&ref, owner);
475 btrfs_set_stack_ref_offset(&ref, owner_offset);
477 hash = hash_extent_ref(root_objectid, ref_generation, owner,
480 key.objectid = bytenr;
481 key.type = BTRFS_EXTENT_REF_KEY;
483 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(ref));
484 while (ret == -EEXIST) {
485 disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
486 struct btrfs_extent_ref);
487 if (match_extent_ref(path->nodes[0], disk_ref, &ref))
490 btrfs_release_path(root, path);
491 ret = btrfs_insert_empty_item(trans, root, path, &key,
496 disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
497 struct btrfs_extent_ref);
498 write_extent_buffer(path->nodes[0], &ref, (unsigned long)disk_ref,
500 btrfs_mark_buffer_dirty(path->nodes[0]);
502 btrfs_release_path(root, path);
506 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
507 struct btrfs_root *root,
508 u64 bytenr, u64 num_bytes,
509 u64 root_objectid, u64 ref_generation,
510 u64 owner, u64 owner_offset)
512 struct btrfs_path *path;
514 struct btrfs_key key;
515 struct extent_buffer *l;
516 struct btrfs_extent_item *item;
519 WARN_ON(num_bytes < root->sectorsize);
520 path = btrfs_alloc_path();
524 key.objectid = bytenr;
525 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
526 key.offset = num_bytes;
527 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
536 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
537 refs = btrfs_extent_refs(l, item);
538 btrfs_set_extent_refs(l, item, refs + 1);
539 btrfs_mark_buffer_dirty(path->nodes[0]);
541 btrfs_release_path(root->fs_info->extent_root, path);
543 ret = btrfs_insert_extent_backref(trans, root->fs_info->extent_root,
544 path, bytenr, root_objectid,
545 ref_generation, owner, owner_offset);
547 finish_current_insert(trans, root->fs_info->extent_root);
548 del_pending_extents(trans, root->fs_info->extent_root);
550 btrfs_free_path(path);
554 int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
555 struct btrfs_root *root)
557 finish_current_insert(trans, root->fs_info->extent_root);
558 del_pending_extents(trans, root->fs_info->extent_root);
562 static int lookup_extent_ref(struct btrfs_trans_handle *trans,
563 struct btrfs_root *root, u64 bytenr,
564 u64 num_bytes, u32 *refs)
566 struct btrfs_path *path;
568 struct btrfs_key key;
569 struct extent_buffer *l;
570 struct btrfs_extent_item *item;
572 WARN_ON(num_bytes < root->sectorsize);
573 path = btrfs_alloc_path();
574 key.objectid = bytenr;
575 key.offset = num_bytes;
576 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
577 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
582 btrfs_print_leaf(root, path->nodes[0]);
583 printk("failed to find block number %Lu\n", bytenr);
587 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
588 *refs = btrfs_extent_refs(l, item);
590 btrfs_free_path(path);
594 int btrfs_inc_root_ref(struct btrfs_trans_handle *trans,
595 struct btrfs_root *root, u64 owner_objectid)
601 struct btrfs_disk_key disk_key;
603 level = btrfs_header_level(root->node);
604 generation = trans->transid;
605 nritems = btrfs_header_nritems(root->node);
608 btrfs_item_key(root->node, &disk_key, 0);
610 btrfs_node_key(root->node, &disk_key, 0);
611 key_objectid = btrfs_disk_key_objectid(&disk_key);
615 return btrfs_inc_extent_ref(trans, root, root->node->start,
616 root->node->len, owner_objectid,
620 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
621 struct extent_buffer *buf)
625 struct btrfs_key key;
626 struct btrfs_file_extent_item *fi;
635 level = btrfs_header_level(buf);
636 nritems = btrfs_header_nritems(buf);
637 for (i = 0; i < nritems; i++) {
640 btrfs_item_key_to_cpu(buf, &key, i);
641 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
643 fi = btrfs_item_ptr(buf, i,
644 struct btrfs_file_extent_item);
645 if (btrfs_file_extent_type(buf, fi) ==
646 BTRFS_FILE_EXTENT_INLINE)
648 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
649 if (disk_bytenr == 0)
651 ret = btrfs_inc_extent_ref(trans, root, disk_bytenr,
652 btrfs_file_extent_disk_num_bytes(buf, fi),
653 root->root_key.objectid, trans->transid,
654 key.objectid, key.offset);
660 bytenr = btrfs_node_blockptr(buf, i);
661 ret = btrfs_inc_extent_ref(trans, root, bytenr,
662 btrfs_level_size(root, level - 1),
663 root->root_key.objectid,
664 trans->transid, 0, 0);
675 for (i =0; i < faili; i++) {
678 btrfs_item_key_to_cpu(buf, &key, i);
679 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
681 fi = btrfs_item_ptr(buf, i,
682 struct btrfs_file_extent_item);
683 if (btrfs_file_extent_type(buf, fi) ==
684 BTRFS_FILE_EXTENT_INLINE)
686 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
687 if (disk_bytenr == 0)
689 err = btrfs_free_extent(trans, root, disk_bytenr,
690 btrfs_file_extent_disk_num_bytes(buf,
694 bytenr = btrfs_node_blockptr(buf, i);
695 err = btrfs_free_extent(trans, root, bytenr,
696 btrfs_level_size(root, level - 1), 0);
704 static int write_one_cache_group(struct btrfs_trans_handle *trans,
705 struct btrfs_root *root,
706 struct btrfs_path *path,
707 struct btrfs_block_group_cache *cache)
711 struct btrfs_root *extent_root = root->fs_info->extent_root;
713 struct extent_buffer *leaf;
715 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
720 leaf = path->nodes[0];
721 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
722 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
723 btrfs_mark_buffer_dirty(leaf);
724 btrfs_release_path(extent_root, path);
726 finish_current_insert(trans, extent_root);
727 pending_ret = del_pending_extents(trans, extent_root);
736 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
737 struct btrfs_root *root)
739 struct extent_map_tree *block_group_cache;
740 struct btrfs_block_group_cache *cache;
744 struct btrfs_path *path;
750 block_group_cache = &root->fs_info->block_group_cache;
751 path = btrfs_alloc_path();
756 ret = find_first_extent_bit(block_group_cache, last,
757 &start, &end, BLOCK_GROUP_DIRTY);
762 ret = get_state_private(block_group_cache, start, &ptr);
766 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
767 err = write_one_cache_group(trans, root,
770 * if we fail to write the cache group, we want
771 * to keep it marked dirty in hopes that a later
778 clear_extent_bits(block_group_cache, start, end,
779 BLOCK_GROUP_DIRTY, GFP_NOFS);
781 btrfs_free_path(path);
785 static int update_block_group(struct btrfs_trans_handle *trans,
786 struct btrfs_root *root,
787 u64 bytenr, u64 num_bytes, int alloc,
788 int mark_free, int data)
790 struct btrfs_block_group_cache *cache;
791 struct btrfs_fs_info *info = root->fs_info;
792 u64 total = num_bytes;
799 cache = btrfs_lookup_block_group(info, bytenr);
803 byte_in_group = bytenr - cache->key.objectid;
804 WARN_ON(byte_in_group > cache->key.offset);
805 start = cache->key.objectid;
806 end = start + cache->key.offset - 1;
807 set_extent_bits(&info->block_group_cache, start, end,
808 BLOCK_GROUP_DIRTY, GFP_NOFS);
810 old_val = btrfs_block_group_used(&cache->item);
811 num_bytes = min(total, cache->key.offset - byte_in_group);
813 if (cache->data != data &&
814 old_val < (cache->key.offset >> 1)) {
819 bit_to_clear = BLOCK_GROUP_METADATA;
820 bit_to_set = BLOCK_GROUP_DATA;
822 ~BTRFS_BLOCK_GROUP_MIXED;
824 BTRFS_BLOCK_GROUP_DATA;
826 bit_to_clear = BLOCK_GROUP_DATA;
827 bit_to_set = BLOCK_GROUP_METADATA;
829 ~BTRFS_BLOCK_GROUP_MIXED;
831 ~BTRFS_BLOCK_GROUP_DATA;
833 clear_extent_bits(&info->block_group_cache,
834 start, end, bit_to_clear,
836 set_extent_bits(&info->block_group_cache,
837 start, end, bit_to_set,
839 } else if (cache->data != data &&
840 cache->data != BTRFS_BLOCK_GROUP_MIXED) {
841 cache->data = BTRFS_BLOCK_GROUP_MIXED;
842 set_extent_bits(&info->block_group_cache,
845 BLOCK_GROUP_METADATA,
848 old_val += num_bytes;
850 old_val -= num_bytes;
852 set_extent_dirty(&info->free_space_cache,
853 bytenr, bytenr + num_bytes - 1,
857 btrfs_set_block_group_used(&cache->item, old_val);
863 static int update_pinned_extents(struct btrfs_root *root,
864 u64 bytenr, u64 num, int pin)
867 struct btrfs_block_group_cache *cache;
868 struct btrfs_fs_info *fs_info = root->fs_info;
871 set_extent_dirty(&fs_info->pinned_extents,
872 bytenr, bytenr + num - 1, GFP_NOFS);
874 clear_extent_dirty(&fs_info->pinned_extents,
875 bytenr, bytenr + num - 1, GFP_NOFS);
878 cache = btrfs_lookup_block_group(fs_info, bytenr);
880 len = min(num, cache->key.offset -
881 (bytenr - cache->key.objectid));
883 cache->pinned += len;
884 fs_info->total_pinned += len;
886 cache->pinned -= len;
887 fs_info->total_pinned -= len;
895 int btrfs_copy_pinned(struct btrfs_root *root, struct extent_map_tree *copy)
900 struct extent_map_tree *pinned_extents = &root->fs_info->pinned_extents;
904 ret = find_first_extent_bit(pinned_extents, last,
905 &start, &end, EXTENT_DIRTY);
908 set_extent_dirty(copy, start, end, GFP_NOFS);
914 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
915 struct btrfs_root *root,
916 struct extent_map_tree *unpin)
921 struct extent_map_tree *free_space_cache;
922 free_space_cache = &root->fs_info->free_space_cache;
925 ret = find_first_extent_bit(unpin, 0, &start, &end,
929 update_pinned_extents(root, start, end + 1 - start, 0);
930 clear_extent_dirty(unpin, start, end, GFP_NOFS);
931 set_extent_dirty(free_space_cache, start, end, GFP_NOFS);
936 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
937 btrfs_root *extent_root)
941 struct btrfs_fs_info *info = extent_root->fs_info;
942 struct btrfs_path *path;
943 struct btrfs_key ins;
944 struct btrfs_extent_item extent_item;
948 btrfs_set_stack_extent_refs(&extent_item, 1);
949 btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
950 path = btrfs_alloc_path();
953 ret = find_first_extent_bit(&info->extent_ins, 0, &start,
954 &end, EXTENT_LOCKED);
958 ins.objectid = start;
959 ins.offset = end + 1 - start;
960 err = btrfs_insert_item(trans, extent_root, &ins,
961 &extent_item, sizeof(extent_item));
962 clear_extent_bits(&info->extent_ins, start, end, EXTENT_LOCKED,
964 err = btrfs_insert_extent_backref(trans, extent_root, path,
965 start, extent_root->root_key.objectid,
969 btrfs_free_path(path);
973 static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes,
977 struct extent_buffer *buf;
980 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
982 if (btrfs_buffer_uptodate(buf)) {
984 root->fs_info->running_transaction->transid;
985 if (btrfs_header_generation(buf) == transid) {
986 free_extent_buffer(buf);
990 free_extent_buffer(buf);
992 update_pinned_extents(root, bytenr, num_bytes, 1);
994 set_extent_bits(&root->fs_info->pending_del,
995 bytenr, bytenr + num_bytes - 1,
996 EXTENT_LOCKED, GFP_NOFS);
1003 * remove an extent from the root, returns 0 on success
1005 static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
1006 *root, u64 bytenr, u64 num_bytes,
1007 u64 root_objectid, u64 ref_generation,
1008 u64 owner_objectid, u64 owner_offset, int pin,
1011 struct btrfs_path *path;
1012 struct btrfs_key key;
1013 struct btrfs_fs_info *info = root->fs_info;
1014 struct btrfs_root *extent_root = info->extent_root;
1015 struct extent_buffer *leaf;
1017 struct btrfs_extent_item *ei;
1020 key.objectid = bytenr;
1021 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
1022 key.offset = num_bytes;
1024 path = btrfs_alloc_path();
1028 if (ref_generation && owner_objectid == 0 && root_objectid == 3) {
1029 //printk("drop backref root %Lu gen %Lu byte %Lu\n", root_objectid, ref_generation, bytenr );
1031 ret = lookup_extent_backref(trans, extent_root, path,
1032 bytenr, root_objectid,
1034 owner_objectid, owner_offset, 1);
1036 ret = btrfs_del_item(trans, extent_root, path);
1038 btrfs_print_leaf(extent_root, path->nodes[0]);
1040 printk("Unable to find ref byte nr %Lu root %Lu "
1041 " gen %Lu owner %Lu offset %Lu\n", bytenr,
1042 root_objectid, ref_generation, owner_objectid,
1045 btrfs_release_path(extent_root, path);
1046 ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
1051 leaf = path->nodes[0];
1052 ei = btrfs_item_ptr(leaf, path->slots[0],
1053 struct btrfs_extent_item);
1054 refs = btrfs_extent_refs(leaf, ei);
1057 btrfs_set_extent_refs(leaf, ei, refs);
1058 btrfs_mark_buffer_dirty(leaf);
1065 ret = pin_down_bytes(root, bytenr, num_bytes, 0);
1071 /* block accounting for super block */
1072 super_used = btrfs_super_bytes_used(&info->super_copy);
1073 btrfs_set_super_bytes_used(&info->super_copy,
1074 super_used - num_bytes);
1076 /* block accounting for root item */
1077 root_used = btrfs_root_used(&root->root_item);
1078 btrfs_set_root_used(&root->root_item,
1079 root_used - num_bytes);
1081 ret = btrfs_del_item(trans, extent_root, path);
1085 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
1089 btrfs_free_path(path);
1090 finish_current_insert(trans, extent_root);
1095 * find all the blocks marked as pending in the radix tree and remove
1096 * them from the extent map
1098 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
1099 btrfs_root *extent_root)
1105 struct extent_map_tree *pending_del;
1106 struct extent_map_tree *pinned_extents;
1108 pending_del = &extent_root->fs_info->pending_del;
1109 pinned_extents = &extent_root->fs_info->pinned_extents;
1112 ret = find_first_extent_bit(pending_del, 0, &start, &end,
1116 update_pinned_extents(extent_root, start, end + 1 - start, 1);
1117 clear_extent_bits(pending_del, start, end, EXTENT_LOCKED,
1119 ret = __free_extent(trans, extent_root,
1120 start, end + 1 - start,
1121 extent_root->root_key.objectid,
1130 * remove an extent from the root, returns 0 on success
1132 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
1133 *root, u64 bytenr, u64 num_bytes,
1134 u64 root_objectid, u64 ref_generation,
1135 u64 owner_objectid, u64 owner_offset, int pin)
1137 struct btrfs_root *extent_root = root->fs_info->extent_root;
1141 WARN_ON(num_bytes < root->sectorsize);
1142 if (!root->ref_cows)
1145 if (root == extent_root) {
1146 pin_down_bytes(root, bytenr, num_bytes, 1);
1149 ret = __free_extent(trans, root, bytenr, num_bytes, root_objectid,
1150 ref_generation, owner_objectid, owner_offset,
1152 pending_ret = del_pending_extents(trans, root->fs_info->extent_root);
1153 return ret ? ret : pending_ret;
1156 static u64 stripe_align(struct btrfs_root *root, u64 val)
1158 u64 mask = ((u64)root->stripesize - 1);
1159 u64 ret = (val + mask) & ~mask;
1164 * walks the btree of allocated extents and find a hole of a given size.
1165 * The key ins is changed to record the hole:
1166 * ins->objectid == block start
1167 * ins->flags = BTRFS_EXTENT_ITEM_KEY
1168 * ins->offset == number of blocks
1169 * Any available blocks before search_start are skipped.
1171 static int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
1172 *orig_root, u64 num_bytes, u64 empty_size,
1173 u64 search_start, u64 search_end, u64 hint_byte,
1174 struct btrfs_key *ins, u64 exclude_start,
1175 u64 exclude_nr, int data)
1177 struct btrfs_path *path;
1178 struct btrfs_key key;
1184 u64 orig_search_start = search_start;
1186 struct extent_buffer *l;
1187 struct btrfs_root * root = orig_root->fs_info->extent_root;
1188 struct btrfs_fs_info *info = root->fs_info;
1189 u64 total_needed = num_bytes;
1191 struct btrfs_block_group_cache *block_group;
1196 WARN_ON(num_bytes < root->sectorsize);
1197 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
1199 level = btrfs_header_level(root->node);
1201 if (num_bytes >= 32 * 1024 * 1024 && hint_byte) {
1202 data = BTRFS_BLOCK_GROUP_MIXED;
1205 if (search_end == (u64)-1)
1206 search_end = btrfs_super_total_bytes(&info->super_copy);
1208 block_group = btrfs_lookup_block_group(info, hint_byte);
1210 hint_byte = search_start;
1211 block_group = btrfs_find_block_group(root, block_group,
1212 hint_byte, data, 1);
1214 block_group = btrfs_find_block_group(root,
1216 search_start, data, 1);
1219 total_needed += empty_size;
1220 path = btrfs_alloc_path();
1222 search_start = find_search_start(root, &block_group, search_start,
1223 total_needed, data, full_scan);
1224 search_start = stripe_align(root, search_start);
1225 cached_start = search_start;
1226 btrfs_init_path(path);
1227 ins->objectid = search_start;
1232 ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
1236 if (path->slots[0] > 0) {
1241 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1244 * walk backwards to find the first extent item key
1246 while(btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY) {
1247 if (path->slots[0] == 0) {
1248 ret = btrfs_prev_leaf(root, path);
1250 ret = btrfs_search_slot(trans, root, ins,
1254 if (path->slots[0] > 0)
1262 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1266 slot = path->slots[0];
1267 if (slot >= btrfs_header_nritems(l)) {
1268 ret = btrfs_next_leaf(root, path);
1274 search_start = max(search_start,
1275 block_group->key.objectid);
1277 aligned = stripe_align(root, search_start);
1278 ins->objectid = aligned;
1279 if (aligned >= search_end) {
1283 ins->offset = search_end - aligned;
1287 ins->objectid = stripe_align(root,
1288 last_byte > search_start ?
1289 last_byte : search_start);
1290 if (search_end <= ins->objectid) {
1294 ins->offset = search_end - ins->objectid;
1295 BUG_ON(ins->objectid >= search_end);
1298 btrfs_item_key_to_cpu(l, &key, slot);
1300 if (key.objectid >= search_start && key.objectid > last_byte &&
1302 if (last_byte < search_start)
1303 last_byte = search_start;
1304 aligned = stripe_align(root, last_byte);
1305 hole_size = key.objectid - aligned;
1306 if (key.objectid > aligned && hole_size >= num_bytes) {
1307 ins->objectid = aligned;
1308 ins->offset = hole_size;
1312 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY) {
1313 if (!start_found && btrfs_key_type(&key) ==
1314 BTRFS_BLOCK_GROUP_ITEM_KEY) {
1315 last_byte = key.objectid;
1323 last_byte = key.objectid + key.offset;
1325 if (!full_scan && data != BTRFS_BLOCK_GROUP_MIXED &&
1326 last_byte >= block_group->key.objectid +
1327 block_group->key.offset) {
1328 btrfs_release_path(root, path);
1329 search_start = block_group->key.objectid +
1330 block_group->key.offset;
1338 /* we have to make sure we didn't find an extent that has already
1339 * been allocated by the map tree or the original allocation
1341 btrfs_release_path(root, path);
1342 BUG_ON(ins->objectid < search_start);
1344 if (ins->objectid + num_bytes >= search_end)
1346 if (!full_scan && data != BTRFS_BLOCK_GROUP_MIXED &&
1347 ins->objectid + num_bytes > block_group->
1348 key.objectid + block_group->key.offset) {
1349 search_start = block_group->key.objectid +
1350 block_group->key.offset;
1353 if (test_range_bit(&info->extent_ins, ins->objectid,
1354 ins->objectid + num_bytes -1, EXTENT_LOCKED, 0)) {
1355 search_start = ins->objectid + num_bytes;
1358 if (test_range_bit(&info->pinned_extents, ins->objectid,
1359 ins->objectid + num_bytes -1, EXTENT_DIRTY, 0)) {
1360 search_start = ins->objectid + num_bytes;
1363 if (exclude_nr > 0 && (ins->objectid + num_bytes > exclude_start &&
1364 ins->objectid < exclude_start + exclude_nr)) {
1365 search_start = exclude_start + exclude_nr;
1369 block_group = btrfs_lookup_block_group(info, ins->objectid);
1371 trans->block_group = block_group;
1373 ins->offset = num_bytes;
1374 btrfs_free_path(path);
1378 if (search_start + num_bytes >= search_end) {
1380 search_start = orig_search_start;
1387 total_needed -= empty_size;
1389 data = BTRFS_BLOCK_GROUP_MIXED;
1393 block_group = btrfs_lookup_block_group(info, search_start);
1395 block_group = btrfs_find_block_group(root, block_group,
1396 search_start, data, 0);
1400 btrfs_release_path(root, path);
1401 btrfs_free_path(path);
1405 * finds a free extent and does all the dirty work required for allocation
1406 * returns the key for the extent through ins, and a tree buffer for
1407 * the first block of the extent through buf.
1409 * returns 0 if everything worked, non-zero otherwise.
1411 int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
1412 struct btrfs_root *root,
1413 u64 num_bytes, u64 root_objectid, u64 ref_generation,
1414 u64 owner, u64 owner_offset,
1415 u64 empty_size, u64 hint_byte,
1416 u64 search_end, struct btrfs_key *ins, int data)
1420 u64 super_used, root_used;
1421 u64 search_start = 0;
1422 struct btrfs_fs_info *info = root->fs_info;
1423 struct btrfs_root *extent_root = info->extent_root;
1424 struct btrfs_extent_item extent_item;
1425 struct btrfs_path *path;
1427 btrfs_set_stack_extent_refs(&extent_item, 1);
1429 WARN_ON(num_bytes < root->sectorsize);
1430 ret = find_free_extent(trans, root, num_bytes, empty_size,
1431 search_start, search_end, hint_byte, ins,
1432 trans->alloc_exclude_start,
1433 trans->alloc_exclude_nr, data);
1438 /* block accounting for super block */
1439 super_used = btrfs_super_bytes_used(&info->super_copy);
1440 btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
1442 /* block accounting for root item */
1443 root_used = btrfs_root_used(&root->root_item);
1444 btrfs_set_root_used(&root->root_item, root_used + num_bytes);
1446 clear_extent_dirty(&root->fs_info->free_space_cache,
1447 ins->objectid, ins->objectid + ins->offset - 1,
1450 if (root == extent_root) {
1451 set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
1452 ins->objectid + ins->offset - 1,
1453 EXTENT_LOCKED, GFP_NOFS);
1458 WARN_ON(trans->alloc_exclude_nr);
1459 trans->alloc_exclude_start = ins->objectid;
1460 trans->alloc_exclude_nr = ins->offset;
1461 ret = btrfs_insert_item(trans, extent_root, ins, &extent_item,
1462 sizeof(extent_item));
1464 trans->alloc_exclude_start = 0;
1465 trans->alloc_exclude_nr = 0;
1468 path = btrfs_alloc_path();
1470 ret = btrfs_insert_extent_backref(trans, extent_root, path,
1471 ins->objectid, root_objectid,
1472 ref_generation, owner, owner_offset);
1475 btrfs_free_path(path);
1476 finish_current_insert(trans, extent_root);
1477 pending_ret = del_pending_extents(trans, extent_root);
1487 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0,
1494 * helper function to allocate a block for a given tree
1495 * returns the tree buffer or NULL.
1497 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
1498 struct btrfs_root *root,
1500 u64 root_objectid, u64 hint,
1506 ref_generation = trans->transid;
1511 return __btrfs_alloc_free_block(trans, root, blocksize, root_objectid,
1512 ref_generation, 0, 0, hint, empty_size);
1516 * helper function to allocate a block for a given tree
1517 * returns the tree buffer or NULL.
1519 struct extent_buffer *__btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
1520 struct btrfs_root *root,
1529 struct btrfs_key ins;
1531 struct extent_buffer *buf;
1533 ret = btrfs_alloc_extent(trans, root, blocksize,
1534 root_objectid, ref_generation,
1535 first_objectid, level, empty_size, hint,
1539 return ERR_PTR(ret);
1541 buf = btrfs_find_create_tree_block(root, ins.objectid, blocksize);
1543 btrfs_free_extent(trans, root, ins.objectid, blocksize,
1544 root->root_key.objectid, ref_generation,
1546 return ERR_PTR(-ENOMEM);
1548 btrfs_set_buffer_uptodate(buf);
1549 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
1550 buf->start + buf->len - 1, GFP_NOFS);
1551 set_extent_bits(&BTRFS_I(root->fs_info->btree_inode)->extent_tree,
1552 buf->start, buf->start + buf->len - 1,
1553 EXTENT_CSUM, GFP_NOFS);
1554 buf->flags |= EXTENT_CSUM;
1555 btrfs_set_buffer_defrag(buf);
1556 trans->blocks_used++;
1560 static int drop_leaf_ref(struct btrfs_trans_handle *trans,
1561 struct btrfs_root *root, struct extent_buffer *leaf)
1564 u64 leaf_generation;
1565 struct btrfs_key key;
1566 struct btrfs_file_extent_item *fi;
1571 BUG_ON(!btrfs_is_leaf(leaf));
1572 nritems = btrfs_header_nritems(leaf);
1573 leaf_owner = btrfs_header_owner(leaf);
1574 leaf_generation = btrfs_header_generation(leaf);
1576 for (i = 0; i < nritems; i++) {
1579 btrfs_item_key_to_cpu(leaf, &key, i);
1580 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1582 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1583 if (btrfs_file_extent_type(leaf, fi) ==
1584 BTRFS_FILE_EXTENT_INLINE)
1587 * FIXME make sure to insert a trans record that
1588 * repeats the snapshot del on crash
1590 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1591 if (disk_bytenr == 0)
1593 ret = btrfs_free_extent(trans, root, disk_bytenr,
1594 btrfs_file_extent_disk_num_bytes(leaf, fi),
1595 leaf_owner, leaf_generation,
1596 key.objectid, key.offset, 0);
1602 static void reada_walk_down(struct btrfs_root *root,
1603 struct extent_buffer *node)
1613 nritems = btrfs_header_nritems(node);
1614 level = btrfs_header_level(node);
1615 for (i = 0; i < nritems; i++) {
1616 bytenr = btrfs_node_blockptr(node, i);
1617 blocksize = btrfs_level_size(root, level - 1);
1618 ret = lookup_extent_ref(NULL, root, bytenr, blocksize, &refs);
1622 mutex_unlock(&root->fs_info->fs_mutex);
1623 ret = readahead_tree_block(root, bytenr, blocksize);
1625 mutex_lock(&root->fs_info->fs_mutex);
1632 * helper function for drop_snapshot, this walks down the tree dropping ref
1633 * counts as it goes.
1635 static int walk_down_tree(struct btrfs_trans_handle *trans, struct btrfs_root
1636 *root, struct btrfs_path *path, int *level)
1641 struct extent_buffer *next;
1642 struct extent_buffer *cur;
1643 struct extent_buffer *parent;
1648 WARN_ON(*level < 0);
1649 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1650 ret = lookup_extent_ref(trans, root,
1651 path->nodes[*level]->start,
1652 path->nodes[*level]->len, &refs);
1658 * walk down to the last node level and free all the leaves
1660 while(*level >= 0) {
1661 WARN_ON(*level < 0);
1662 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1663 cur = path->nodes[*level];
1665 if (*level > 0 && path->slots[*level] == 0)
1666 reada_walk_down(root, cur);
1668 if (btrfs_header_level(cur) != *level)
1671 if (path->slots[*level] >=
1672 btrfs_header_nritems(cur))
1675 ret = drop_leaf_ref(trans, root, cur);
1679 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
1680 blocksize = btrfs_level_size(root, *level - 1);
1681 ret = lookup_extent_ref(trans, root, bytenr, blocksize, &refs);
1684 parent = path->nodes[*level];
1685 root_owner = btrfs_header_owner(parent);
1686 root_gen = btrfs_header_generation(parent);
1687 path->slots[*level]++;
1688 ret = btrfs_free_extent(trans, root, bytenr,
1689 blocksize, root_owner,
1694 next = btrfs_find_tree_block(root, bytenr, blocksize);
1695 if (!next || !btrfs_buffer_uptodate(next)) {
1696 free_extent_buffer(next);
1697 mutex_unlock(&root->fs_info->fs_mutex);
1698 next = read_tree_block(root, bytenr, blocksize);
1699 mutex_lock(&root->fs_info->fs_mutex);
1701 /* we dropped the lock, check one more time */
1702 ret = lookup_extent_ref(trans, root, bytenr,
1706 parent = path->nodes[*level];
1707 root_owner = btrfs_header_owner(parent);
1708 root_gen = btrfs_header_generation(parent);
1710 path->slots[*level]++;
1711 free_extent_buffer(next);
1712 ret = btrfs_free_extent(trans, root, bytenr,
1720 WARN_ON(*level <= 0);
1721 if (path->nodes[*level-1])
1722 free_extent_buffer(path->nodes[*level-1]);
1723 path->nodes[*level-1] = next;
1724 *level = btrfs_header_level(next);
1725 path->slots[*level] = 0;
1728 WARN_ON(*level < 0);
1729 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1731 if (path->nodes[*level] == root->node) {
1732 root_owner = root->root_key.objectid;
1733 parent = path->nodes[*level];
1735 parent = path->nodes[*level + 1];
1736 root_owner = btrfs_header_owner(parent);
1739 root_gen = btrfs_header_generation(parent);
1740 ret = btrfs_free_extent(trans, root, path->nodes[*level]->start,
1741 path->nodes[*level]->len,
1742 root_owner, root_gen, 0, 0, 1);
1743 free_extent_buffer(path->nodes[*level]);
1744 path->nodes[*level] = NULL;
1751 * helper for dropping snapshots. This walks back up the tree in the path
1752 * to find the first node higher up where we haven't yet gone through
1755 static int walk_up_tree(struct btrfs_trans_handle *trans, struct btrfs_root
1756 *root, struct btrfs_path *path, int *level)
1760 struct btrfs_root_item *root_item = &root->root_item;
1765 for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
1766 slot = path->slots[i];
1767 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
1768 struct extent_buffer *node;
1769 struct btrfs_disk_key disk_key;
1770 node = path->nodes[i];
1773 WARN_ON(*level == 0);
1774 btrfs_node_key(node, &disk_key, path->slots[i]);
1775 memcpy(&root_item->drop_progress,
1776 &disk_key, sizeof(disk_key));
1777 root_item->drop_level = i;
1780 if (path->nodes[*level] == root->node) {
1781 root_owner = root->root_key.objectid;
1783 btrfs_header_generation(path->nodes[*level]);
1785 struct extent_buffer *node;
1786 node = path->nodes[*level + 1];
1787 root_owner = btrfs_header_owner(node);
1788 root_gen = btrfs_header_generation(node);
1790 ret = btrfs_free_extent(trans, root,
1791 path->nodes[*level]->start,
1792 path->nodes[*level]->len,
1793 root_owner, root_gen, 0, 0, 1);
1795 free_extent_buffer(path->nodes[*level]);
1796 path->nodes[*level] = NULL;
1804 * drop the reference count on the tree rooted at 'snap'. This traverses
1805 * the tree freeing any blocks that have a ref count of zero after being
1808 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
1814 struct btrfs_path *path;
1817 struct btrfs_root_item *root_item = &root->root_item;
1819 path = btrfs_alloc_path();
1822 level = btrfs_header_level(root->node);
1824 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
1825 path->nodes[level] = root->node;
1826 extent_buffer_get(root->node);
1827 path->slots[level] = 0;
1829 struct btrfs_key key;
1830 struct btrfs_disk_key found_key;
1831 struct extent_buffer *node;
1833 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
1834 level = root_item->drop_level;
1835 path->lowest_level = level;
1836 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1841 node = path->nodes[level];
1842 btrfs_node_key(node, &found_key, path->slots[level]);
1843 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
1844 sizeof(found_key)));
1847 wret = walk_down_tree(trans, root, path, &level);
1853 wret = walk_up_tree(trans, root, path, &level);
1861 for (i = 0; i <= orig_level; i++) {
1862 if (path->nodes[i]) {
1863 free_extent_buffer(path->nodes[i]);
1864 path->nodes[i] = NULL;
1868 btrfs_free_path(path);
1872 int btrfs_free_block_groups(struct btrfs_fs_info *info)
1879 ret = find_first_extent_bit(&info->block_group_cache, 0,
1880 &start, &end, (unsigned int)-1);
1883 ret = get_state_private(&info->block_group_cache, start, &ptr);
1885 kfree((void *)(unsigned long)ptr);
1886 clear_extent_bits(&info->block_group_cache, start,
1887 end, (unsigned int)-1, GFP_NOFS);
1890 ret = find_first_extent_bit(&info->free_space_cache, 0,
1891 &start, &end, EXTENT_DIRTY);
1894 clear_extent_dirty(&info->free_space_cache, start,
1900 int btrfs_read_block_groups(struct btrfs_root *root)
1902 struct btrfs_path *path;
1906 struct btrfs_block_group_cache *cache;
1907 struct btrfs_fs_info *info = root->fs_info;
1908 struct extent_map_tree *block_group_cache;
1909 struct btrfs_key key;
1910 struct btrfs_key found_key;
1911 struct extent_buffer *leaf;
1913 block_group_cache = &info->block_group_cache;
1915 root = info->extent_root;
1917 key.offset = BTRFS_BLOCK_GROUP_SIZE;
1918 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
1920 path = btrfs_alloc_path();
1925 ret = btrfs_search_slot(NULL, info->extent_root,
1931 leaf = path->nodes[0];
1932 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1933 cache = kmalloc(sizeof(*cache), GFP_NOFS);
1939 read_extent_buffer(leaf, &cache->item,
1940 btrfs_item_ptr_offset(leaf, path->slots[0]),
1941 sizeof(cache->item));
1942 memcpy(&cache->key, &found_key, sizeof(found_key));
1945 key.objectid = found_key.objectid + found_key.offset;
1946 btrfs_release_path(root, path);
1948 if (cache->item.flags & BTRFS_BLOCK_GROUP_MIXED) {
1949 bit = BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA;
1950 cache->data = BTRFS_BLOCK_GROUP_MIXED;
1951 } else if (cache->item.flags & BTRFS_BLOCK_GROUP_DATA) {
1952 bit = BLOCK_GROUP_DATA;
1953 cache->data = BTRFS_BLOCK_GROUP_DATA;
1955 bit = BLOCK_GROUP_METADATA;
1959 /* use EXTENT_LOCKED to prevent merging */
1960 set_extent_bits(block_group_cache, found_key.objectid,
1961 found_key.objectid + found_key.offset - 1,
1962 bit | EXTENT_LOCKED, GFP_NOFS);
1963 set_state_private(block_group_cache, found_key.objectid,
1964 (unsigned long)cache);
1967 btrfs_super_total_bytes(&info->super_copy))
1971 btrfs_free_path(path);