Btrfs: Go back to kmaps instead of page_address in extent_buffers
[linux-2.6-block.git] / fs / btrfs / extent-tree.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include "ctree.h"
21#include "disk-io.h"
22#include "print-tree.h"
23#include "transaction.h"
24
25static int finish_current_insert(struct btrfs_trans_handle *trans, struct
26 btrfs_root *extent_root);
27static int del_pending_extents(struct btrfs_trans_handle *trans, struct
28 btrfs_root *extent_root);
29
30static int cache_block_group(struct btrfs_root *root,
31 struct btrfs_block_group_cache *block_group)
32{
33 struct btrfs_path *path;
34 int ret;
35 struct btrfs_key key;
36 struct extent_buffer *leaf;
37 struct radix_tree_root *extent_radix;
38 int slot;
39 u64 i;
40 u64 last = 0;
41 u64 hole_size;
42 u64 first_free;
43 int found = 0;
44
45 root = root->fs_info->extent_root;
46 extent_radix = &root->fs_info->extent_map_radix;
47
48 if (block_group->cached)
49 return 0;
50 if (block_group->data)
51 return 0;
52 path = btrfs_alloc_path();
53 if (!path)
54 return -ENOMEM;
55
56 path->reada = 2;
57 first_free = block_group->key.objectid;
58 key.objectid = block_group->key.objectid;
59 key.offset = 0;
60
61 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
62 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
63
64 if (ret < 0)
65 return ret;
66
67 if (ret && path->slots[0] > 0)
68 path->slots[0]--;
69
70 while(1) {
71 leaf = path->nodes[0];
72 slot = path->slots[0];
73 if (slot >= btrfs_header_nritems(leaf)) {
74 ret = btrfs_next_leaf(root, path);
75 if (ret < 0)
76 goto err;
77 if (ret == 0) {
78 continue;
79 } else {
80 break;
81 }
82 }
83
84 btrfs_item_key_to_cpu(leaf, &key, slot);
85 if (key.objectid < block_group->key.objectid) {
86 if (key.objectid + key.offset > first_free)
87 first_free = key.objectid + key.offset;
88 goto next;
89 }
90
91 if (key.objectid >= block_group->key.objectid +
92 block_group->key.offset) {
93 break;
94 }
95
96 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
97 if (!found) {
98 last = first_free;
99 found = 1;
100 }
101 hole_size = key.objectid - last;
102 for (i = 0; i < hole_size; i++) {
103 set_radix_bit(extent_radix, last + i);
104 }
105 last = key.objectid + key.offset;
106 }
107next:
108 path->slots[0]++;
109 }
110
111 if (!found)
112 last = first_free;
113 if (block_group->key.objectid +
114 block_group->key.offset > last) {
115 hole_size = block_group->key.objectid +
116 block_group->key.offset - last;
117 for (i = 0; i < hole_size; i++) {
118 set_radix_bit(extent_radix, last + i);
119 }
120 }
121 block_group->cached = 1;
122err:
123 btrfs_free_path(path);
124 return 0;
125}
126
127struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
128 btrfs_fs_info *info,
129 u64 blocknr)
130{
131 struct btrfs_block_group_cache *block_group;
132 int ret;
133
134 ret = radix_tree_gang_lookup(&info->block_group_radix,
135 (void **)&block_group,
136 blocknr, 1);
137 if (ret) {
138 if (block_group->key.objectid <= blocknr && blocknr <=
139 block_group->key.objectid + block_group->key.offset)
140 return block_group;
141 }
142 ret = radix_tree_gang_lookup(&info->block_group_data_radix,
143 (void **)&block_group,
144 blocknr, 1);
145 if (ret) {
146 if (block_group->key.objectid <= blocknr && blocknr <=
147 block_group->key.objectid + block_group->key.offset)
148 return block_group;
149 }
150 return NULL;
151}
152
153static u64 leaf_range(struct btrfs_root *root)
154{
155 u64 size = BTRFS_LEAF_DATA_SIZE(root);
156 do_div(size, sizeof(struct btrfs_extent_item) +
157 sizeof(struct btrfs_item));
158 return size;
159}
160
161static u64 find_search_start(struct btrfs_root *root,
162 struct btrfs_block_group_cache **cache_ret,
163 u64 search_start, int num)
164{
165 unsigned long gang[8];
166 int ret;
167 struct btrfs_block_group_cache *cache = *cache_ret;
168 u64 last = max(search_start, cache->key.objectid);
169
170 if (cache->data)
171 goto out;
172again:
173 ret = cache_block_group(root, cache);
174 if (ret)
175 goto out;
176 while(1) {
177 ret = find_first_radix_bit(&root->fs_info->extent_map_radix,
178 gang, last, ARRAY_SIZE(gang));
179 if (!ret)
180 goto out;
181 last = gang[ret-1] + 1;
182 if (num > 1) {
183 if (ret != ARRAY_SIZE(gang)) {
184 goto new_group;
185 }
186 if (gang[ret-1] - gang[0] > leaf_range(root)) {
187 continue;
188 }
189 }
190 if (gang[0] >= cache->key.objectid + cache->key.offset) {
191 goto new_group;
192 }
193 return gang[0];
194 }
195out:
196 return max(cache->last_alloc, search_start);
197
198new_group:
199 cache = btrfs_lookup_block_group(root->fs_info,
200 last + cache->key.offset - 1);
201 if (!cache) {
202 return max((*cache_ret)->last_alloc, search_start);
203 }
204 cache = btrfs_find_block_group(root, cache,
205 last + cache->key.offset - 1, 0, 0);
206 *cache_ret = cache;
207 goto again;
208}
209
210static u64 div_factor(u64 num, int factor)
211{
212 num *= factor;
213 do_div(num, 10);
214 return num;
215}
216
217struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
218 struct btrfs_block_group_cache
219 *hint, u64 search_start,
220 int data, int owner)
221{
222 struct btrfs_block_group_cache *cache[8];
223 struct btrfs_block_group_cache *found_group = NULL;
224 struct btrfs_fs_info *info = root->fs_info;
225 struct radix_tree_root *radix;
226 struct radix_tree_root *swap_radix;
227 u64 used;
228 u64 last = 0;
229 u64 hint_last;
230 int i;
231 int ret;
232 int full_search = 0;
233 int factor = 8;
234 int data_swap = 0;
235
236 if (!owner)
237 factor = 5;
238
239 if (data) {
240 radix = &info->block_group_data_radix;
241 swap_radix = &info->block_group_radix;
242 } else {
243 radix = &info->block_group_radix;
244 swap_radix = &info->block_group_data_radix;
245 }
246
247 if (search_start) {
248 struct btrfs_block_group_cache *shint;
249 shint = btrfs_lookup_block_group(info, search_start);
250 if (shint && shint->data == data) {
251 used = btrfs_block_group_used(&shint->item);
252 if (used + shint->pinned <
253 div_factor(shint->key.offset, factor)) {
254 return shint;
255 }
256 }
257 }
258 if (hint && hint->data == data) {
259 used = btrfs_block_group_used(&hint->item);
260 if (used + hint->pinned <
261 div_factor(hint->key.offset, factor)) {
262 return hint;
263 }
264 if (used >= div_factor(hint->key.offset, 8)) {
265 radix_tree_tag_clear(radix,
266 hint->key.objectid +
267 hint->key.offset - 1,
268 BTRFS_BLOCK_GROUP_AVAIL);
269 }
270 last = hint->key.offset * 3;
271 if (hint->key.objectid >= last)
272 last = max(search_start + hint->key.offset - 1,
273 hint->key.objectid - last);
274 else
275 last = hint->key.objectid + hint->key.offset;
276 hint_last = last;
277 } else {
278 if (hint)
279 hint_last = max(hint->key.objectid, search_start);
280 else
281 hint_last = search_start;
282
283 last = hint_last;
284 }
285 while(1) {
286 ret = radix_tree_gang_lookup_tag(radix, (void **)cache,
287 last, ARRAY_SIZE(cache),
288 BTRFS_BLOCK_GROUP_AVAIL);
289 if (!ret)
290 break;
291 for (i = 0; i < ret; i++) {
292 last = cache[i]->key.objectid +
293 cache[i]->key.offset;
294 used = btrfs_block_group_used(&cache[i]->item);
295 if (used + cache[i]->pinned <
296 div_factor(cache[i]->key.offset, factor)) {
297 found_group = cache[i];
298 goto found;
299 }
300 if (used >= div_factor(cache[i]->key.offset, 8)) {
301 radix_tree_tag_clear(radix,
302 cache[i]->key.objectid +
303 cache[i]->key.offset - 1,
304 BTRFS_BLOCK_GROUP_AVAIL);
305 }
306 }
307 cond_resched();
308 }
309 last = hint_last;
310again:
311 while(1) {
312 ret = radix_tree_gang_lookup(radix, (void **)cache,
313 last, ARRAY_SIZE(cache));
314 if (!ret)
315 break;
316 for (i = 0; i < ret; i++) {
317 last = cache[i]->key.objectid +
318 cache[i]->key.offset;
319 used = btrfs_block_group_used(&cache[i]->item);
320 if (used + cache[i]->pinned < cache[i]->key.offset) {
321 found_group = cache[i];
322 goto found;
323 }
324 if (used >= cache[i]->key.offset) {
325 radix_tree_tag_clear(radix,
326 cache[i]->key.objectid +
327 cache[i]->key.offset - 1,
328 BTRFS_BLOCK_GROUP_AVAIL);
329 }
330 }
331 cond_resched();
332 }
333 if (!full_search) {
334 last = search_start;
335 full_search = 1;
336 goto again;
337 }
338 if (!data_swap) {
339 struct radix_tree_root *tmp = radix;
340 data_swap = 1;
341 radix = swap_radix;
342 swap_radix = tmp;
343 last = search_start;
344 goto again;
345 }
346 if (!found_group) {
347 ret = radix_tree_gang_lookup(radix,
348 (void **)&found_group, 0, 1);
349 if (ret == 0) {
350 ret = radix_tree_gang_lookup(swap_radix,
351 (void **)&found_group,
352 0, 1);
353 }
354 BUG_ON(ret != 1);
355 }
356found:
357 return found_group;
358}
359
360int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
361 struct btrfs_root *root,
362 u64 blocknr, u64 num_blocks)
363{
364 struct btrfs_path *path;
365 int ret;
366 struct btrfs_key key;
367 struct extent_buffer *l;
368 struct btrfs_extent_item *item;
369 u32 refs;
370
371 path = btrfs_alloc_path();
372 if (!path)
373 return -ENOMEM;
374
375 key.objectid = blocknr;
376 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
377 key.offset = num_blocks;
378 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
379 0, 1);
380 if (ret < 0)
381 return ret;
382 if (ret != 0) {
383 BUG();
384 }
385 BUG_ON(ret != 0);
386 l = path->nodes[0];
387 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
388 refs = btrfs_extent_refs(l, item);
389 btrfs_set_extent_refs(l, item, refs + 1);
390 btrfs_mark_buffer_dirty(path->nodes[0]);
391
392 btrfs_release_path(root->fs_info->extent_root, path);
393 btrfs_free_path(path);
394 finish_current_insert(trans, root->fs_info->extent_root);
395 del_pending_extents(trans, root->fs_info->extent_root);
396 return 0;
397}
398
399int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
400 struct btrfs_root *root)
401{
402 finish_current_insert(trans, root->fs_info->extent_root);
403 del_pending_extents(trans, root->fs_info->extent_root);
404 return 0;
405}
406
407static int lookup_extent_ref(struct btrfs_trans_handle *trans,
408 struct btrfs_root *root, u64 blocknr,
409 u64 num_blocks, u32 *refs)
410{
411 struct btrfs_path *path;
412 int ret;
413 struct btrfs_key key;
414 struct extent_buffer *l;
415 struct btrfs_extent_item *item;
416
417 path = btrfs_alloc_path();
418 key.objectid = blocknr;
419 key.offset = num_blocks;
420 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
421 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
422 0, 0);
423 if (ret < 0)
424 goto out;
425 if (ret != 0) {
426 btrfs_print_leaf(root, path->nodes[0]);
427 printk("failed to find block number %Lu\n", blocknr);
428 BUG();
429 }
430 l = path->nodes[0];
431 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
432 *refs = btrfs_extent_refs(l, item);
433out:
434 btrfs_free_path(path);
435 return 0;
436}
437
438int btrfs_inc_root_ref(struct btrfs_trans_handle *trans,
439 struct btrfs_root *root)
440{
441 return btrfs_inc_extent_ref(trans, root,
442 extent_buffer_blocknr(root->node), 1);
443}
444
445int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
446 struct extent_buffer *buf)
447{
448 u64 blocknr;
449 u32 nritems;
450 struct btrfs_key key;
451 struct btrfs_file_extent_item *fi;
452 int i;
453 int leaf;
454 int ret;
455 int faili;
456 int err;
457
458 if (!root->ref_cows)
459 return 0;
460
461 leaf = btrfs_is_leaf(buf);
462 nritems = btrfs_header_nritems(buf);
463 for (i = 0; i < nritems; i++) {
464 if (leaf) {
465 u64 disk_blocknr;
466 btrfs_item_key_to_cpu(buf, &key, i);
467 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
468 continue;
469 fi = btrfs_item_ptr(buf, i,
470 struct btrfs_file_extent_item);
471 if (btrfs_file_extent_type(buf, fi) ==
472 BTRFS_FILE_EXTENT_INLINE)
473 continue;
474 disk_blocknr = btrfs_file_extent_disk_blocknr(buf, fi);
475 if (disk_blocknr == 0)
476 continue;
477 ret = btrfs_inc_extent_ref(trans, root, disk_blocknr,
478 btrfs_file_extent_disk_num_blocks(buf, fi));
479 if (ret) {
480 faili = i;
481 goto fail;
482 }
483 } else {
484 blocknr = btrfs_node_blockptr(buf, i);
485 ret = btrfs_inc_extent_ref(trans, root, blocknr, 1);
486 if (ret) {
487 faili = i;
488 goto fail;
489 }
490 }
491 }
492 return 0;
493fail:
494 WARN_ON(1);
495 for (i =0; i < faili; i++) {
496 if (leaf) {
497 u64 disk_blocknr;
498 btrfs_item_key_to_cpu(buf, &key, i);
499 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
500 continue;
501 fi = btrfs_item_ptr(buf, i,
502 struct btrfs_file_extent_item);
503 if (btrfs_file_extent_type(buf, fi) ==
504 BTRFS_FILE_EXTENT_INLINE)
505 continue;
506 disk_blocknr = btrfs_file_extent_disk_blocknr(buf, fi);
507 if (disk_blocknr == 0)
508 continue;
509 err = btrfs_free_extent(trans, root, disk_blocknr,
510 btrfs_file_extent_disk_num_blocks(buf,
511 fi), 0);
512 BUG_ON(err);
513 } else {
514 blocknr = btrfs_node_blockptr(buf, i);
515 err = btrfs_free_extent(trans, root, blocknr, 1, 0);
516 BUG_ON(err);
517 }
518 }
519 return ret;
520}
521
522static int write_one_cache_group(struct btrfs_trans_handle *trans,
523 struct btrfs_root *root,
524 struct btrfs_path *path,
525 struct btrfs_block_group_cache *cache)
526{
527 int ret;
528 int pending_ret;
529 struct btrfs_root *extent_root = root->fs_info->extent_root;
530 unsigned long bi;
531 struct extent_buffer *leaf;
532
533 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
534 if (ret < 0)
535 goto fail;
536 BUG_ON(ret);
537
538 leaf = path->nodes[0];
539 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
540 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
541 btrfs_mark_buffer_dirty(leaf);
542 btrfs_release_path(extent_root, path);
543fail:
544 finish_current_insert(trans, extent_root);
545 pending_ret = del_pending_extents(trans, extent_root);
546 if (ret)
547 return ret;
548 if (pending_ret)
549 return pending_ret;
550 if (cache->data)
551 cache->last_alloc = cache->first_free;
552 return 0;
553
554}
555
556static int write_dirty_block_radix(struct btrfs_trans_handle *trans,
557 struct btrfs_root *root,
558 struct radix_tree_root *radix)
559{
560 struct btrfs_block_group_cache *cache[8];
561 int ret;
562 int err = 0;
563 int werr = 0;
564 int i;
565 struct btrfs_path *path;
566 unsigned long off = 0;
567
568 path = btrfs_alloc_path();
569 if (!path)
570 return -ENOMEM;
571
572 while(1) {
573 ret = radix_tree_gang_lookup_tag(radix, (void **)cache,
574 off, ARRAY_SIZE(cache),
575 BTRFS_BLOCK_GROUP_DIRTY);
576 if (!ret)
577 break;
578 for (i = 0; i < ret; i++) {
579 err = write_one_cache_group(trans, root,
580 path, cache[i]);
581 /*
582 * if we fail to write the cache group, we want
583 * to keep it marked dirty in hopes that a later
584 * write will work
585 */
586 if (err) {
587 werr = err;
588 off = cache[i]->key.objectid +
589 cache[i]->key.offset;
590 continue;
591 }
592
593 radix_tree_tag_clear(radix, cache[i]->key.objectid +
594 cache[i]->key.offset - 1,
595 BTRFS_BLOCK_GROUP_DIRTY);
596 }
597 }
598 btrfs_free_path(path);
599 return werr;
600}
601
602int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
603 struct btrfs_root *root)
604{
605 int ret;
606 int ret2;
607 ret = write_dirty_block_radix(trans, root,
608 &root->fs_info->block_group_radix);
609 ret2 = write_dirty_block_radix(trans, root,
610 &root->fs_info->block_group_data_radix);
611 if (ret)
612 return ret;
613 if (ret2)
614 return ret2;
615 return 0;
616}
617
618static int update_block_group(struct btrfs_trans_handle *trans,
619 struct btrfs_root *root,
620 u64 blocknr, u64 num, int alloc, int mark_free,
621 int data)
622{
623 struct btrfs_block_group_cache *cache;
624 struct btrfs_fs_info *info = root->fs_info;
625 u64 total = num;
626 u64 old_val;
627 u64 block_in_group;
628 u64 i;
629 int ret;
630
631 while(total) {
632 cache = btrfs_lookup_block_group(info, blocknr);
633 if (!cache) {
634 return -1;
635 }
636 block_in_group = blocknr - cache->key.objectid;
637 WARN_ON(block_in_group > cache->key.offset);
638 radix_tree_tag_set(cache->radix, cache->key.objectid +
639 cache->key.offset - 1,
640 BTRFS_BLOCK_GROUP_DIRTY);
641
642 old_val = btrfs_block_group_used(&cache->item);
643 num = min(total, cache->key.offset - block_in_group);
644 if (alloc) {
645 if (blocknr > cache->last_alloc)
646 cache->last_alloc = blocknr;
647 if (!cache->data) {
648 for (i = 0; i < num; i++) {
649 clear_radix_bit(&info->extent_map_radix,
650 blocknr + i);
651 }
652 }
653 if (cache->data != data &&
654 old_val < (cache->key.offset >> 1)) {
655 cache->data = data;
656 radix_tree_delete(cache->radix,
657 cache->key.objectid +
658 cache->key.offset - 1);
659
660 if (data) {
661 cache->radix =
662 &info->block_group_data_radix;
663 cache->item.flags |=
664 BTRFS_BLOCK_GROUP_DATA;
665 } else {
666 cache->radix = &info->block_group_radix;
667 cache->item.flags &=
668 ~BTRFS_BLOCK_GROUP_DATA;
669 }
670 ret = radix_tree_insert(cache->radix,
671 cache->key.objectid +
672 cache->key.offset - 1,
673 (void *)cache);
674 }
675 old_val += num;
676 } else {
677 old_val -= num;
678 if (blocknr < cache->first_free)
679 cache->first_free = blocknr;
680 if (!cache->data && mark_free) {
681 for (i = 0; i < num; i++) {
682 set_radix_bit(&info->extent_map_radix,
683 blocknr + i);
684 }
685 }
686 if (old_val < (cache->key.offset >> 1) &&
687 old_val + num >= (cache->key.offset >> 1)) {
688 radix_tree_tag_set(cache->radix,
689 cache->key.objectid +
690 cache->key.offset - 1,
691 BTRFS_BLOCK_GROUP_AVAIL);
692 }
693 }
694 btrfs_set_block_group_used(&cache->item, old_val);
695 total -= num;
696 blocknr += num;
697 }
698 return 0;
699}
700
701int btrfs_copy_pinned(struct btrfs_root *root, struct radix_tree_root *copy)
702{
703 unsigned long gang[8];
704 u64 last = 0;
705 struct radix_tree_root *pinned_radix = &root->fs_info->pinned_radix;
706 int ret;
707 int i;
708
709 while(1) {
710 ret = find_first_radix_bit(pinned_radix, gang, last,
711 ARRAY_SIZE(gang));
712 if (!ret)
713 break;
714 for (i = 0 ; i < ret; i++) {
715 set_radix_bit(copy, gang[i]);
716 last = gang[i] + 1;
717 }
718 }
719 ret = find_first_radix_bit(&root->fs_info->extent_ins_radix, gang, 0,
720 ARRAY_SIZE(gang));
721 WARN_ON(ret);
722 return 0;
723}
724
725int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
726 struct btrfs_root *root,
727 struct radix_tree_root *unpin_radix)
728{
729 unsigned long gang[8];
730 struct btrfs_block_group_cache *block_group;
731 u64 first = 0;
732 int ret;
733 int i;
734 struct radix_tree_root *pinned_radix = &root->fs_info->pinned_radix;
735 struct radix_tree_root *extent_radix = &root->fs_info->extent_map_radix;
736
737 while(1) {
738 ret = find_first_radix_bit(unpin_radix, gang, 0,
739 ARRAY_SIZE(gang));
740 if (!ret)
741 break;
742 if (!first)
743 first = gang[0];
744 for (i = 0; i < ret; i++) {
745 clear_radix_bit(pinned_radix, gang[i]);
746 clear_radix_bit(unpin_radix, gang[i]);
747 block_group = btrfs_lookup_block_group(root->fs_info,
748 gang[i]);
749 if (block_group) {
750 WARN_ON(block_group->pinned == 0);
751 block_group->pinned--;
752 if (gang[i] < block_group->last_alloc)
753 block_group->last_alloc = gang[i];
754 if (!block_group->data)
755 set_radix_bit(extent_radix, gang[i]);
756 }
757 }
758 }
759 return 0;
760}
761
762static int finish_current_insert(struct btrfs_trans_handle *trans, struct
763 btrfs_root *extent_root)
764{
765 struct btrfs_key ins;
766 struct btrfs_extent_item extent_item;
767 int i;
768 int ret;
769 int err;
770 unsigned long gang[8];
771 struct btrfs_fs_info *info = extent_root->fs_info;
772
773 btrfs_set_stack_extent_refs(&extent_item, 1);
774 ins.offset = 1;
775 btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
776 btrfs_set_stack_extent_owner(&extent_item,
777 extent_root->root_key.objectid);
778
779 while(1) {
780 ret = find_first_radix_bit(&info->extent_ins_radix, gang, 0,
781 ARRAY_SIZE(gang));
782 if (!ret)
783 break;
784
785 for (i = 0; i < ret; i++) {
786 ins.objectid = gang[i];
787 err = btrfs_insert_item(trans, extent_root, &ins,
788 &extent_item,
789 sizeof(extent_item));
790 clear_radix_bit(&info->extent_ins_radix, gang[i]);
791 WARN_ON(err);
792 }
793 }
794 return 0;
795}
796
797static int pin_down_block(struct btrfs_root *root, u64 blocknr, int pending)
798{
799 int err;
800 struct extent_buffer *buf;
801
802 if (!pending) {
803 buf = btrfs_find_tree_block(root, blocknr);
804 if (buf) {
805 if (btrfs_buffer_uptodate(buf)) {
806 u64 transid =
807 root->fs_info->running_transaction->transid;
808 if (btrfs_header_generation(buf) == transid) {
809 free_extent_buffer(buf);
810 return 0;
811 }
812 }
813 free_extent_buffer(buf);
814 }
815 err = set_radix_bit(&root->fs_info->pinned_radix, blocknr);
816 if (!err) {
817 struct btrfs_block_group_cache *cache;
818 cache = btrfs_lookup_block_group(root->fs_info,
819 blocknr);
820 if (cache)
821 cache->pinned++;
822 }
823 } else {
824 err = set_radix_bit(&root->fs_info->pending_del_radix, blocknr);
825 }
826 BUG_ON(err < 0);
827 return 0;
828}
829
830/*
831 * remove an extent from the root, returns 0 on success
832 */
833static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
834 *root, u64 blocknr, u64 num_blocks, int pin,
835 int mark_free)
836{
837 struct btrfs_path *path;
838 struct btrfs_key key;
839 struct btrfs_fs_info *info = root->fs_info;
840 struct btrfs_root *extent_root = info->extent_root;
841 struct extent_buffer *leaf;
842 int ret;
843 struct btrfs_extent_item *ei;
844 u32 refs;
845
846 key.objectid = blocknr;
847 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
848 key.offset = num_blocks;
849
850 path = btrfs_alloc_path();
851 if (!path)
852 return -ENOMEM;
853
854 ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
855 if (ret < 0)
856 return ret;
857 BUG_ON(ret);
858
859 leaf = path->nodes[0];
860 ei = btrfs_item_ptr(leaf, path->slots[0],
861 struct btrfs_extent_item);
862 refs = btrfs_extent_refs(leaf, ei);
863 BUG_ON(refs == 0);
864 refs -= 1;
865 btrfs_set_extent_refs(leaf, ei, refs);
866 btrfs_mark_buffer_dirty(leaf);
867
868 if (refs == 0) {
869 u64 super_blocks_used, root_blocks_used;
870
871 if (pin) {
872 ret = pin_down_block(root, blocknr, 0);
873 BUG_ON(ret);
874 }
875
876 /* block accounting for super block */
877 super_blocks_used = btrfs_super_blocks_used(&info->super_copy);
878 btrfs_set_super_blocks_used(&info->super_copy,
879 super_blocks_used - num_blocks);
880
881 /* block accounting for root item */
882 root_blocks_used = btrfs_root_used(&root->root_item);
883 btrfs_set_root_used(&root->root_item,
884 root_blocks_used - num_blocks);
885
886 ret = btrfs_del_item(trans, extent_root, path);
887 if (ret) {
888 return ret;
889 }
890 ret = update_block_group(trans, root, blocknr, num_blocks, 0,
891 mark_free, 0);
892 BUG_ON(ret);
893 }
894 btrfs_free_path(path);
895 finish_current_insert(trans, extent_root);
896 return ret;
897}
898
899/*
900 * find all the blocks marked as pending in the radix tree and remove
901 * them from the extent map
902 */
903static int del_pending_extents(struct btrfs_trans_handle *trans, struct
904 btrfs_root *extent_root)
905{
906 int ret;
907 int wret;
908 int err = 0;
909 unsigned long gang[4];
910 int i;
911 struct radix_tree_root *pending_radix;
912 struct radix_tree_root *pinned_radix;
913 struct btrfs_block_group_cache *cache;
914
915 pending_radix = &extent_root->fs_info->pending_del_radix;
916 pinned_radix = &extent_root->fs_info->pinned_radix;
917
918 while(1) {
919 ret = find_first_radix_bit(pending_radix, gang, 0,
920 ARRAY_SIZE(gang));
921 if (!ret)
922 break;
923 for (i = 0; i < ret; i++) {
924 wret = set_radix_bit(pinned_radix, gang[i]);
925 if (wret == 0) {
926 cache =
927 btrfs_lookup_block_group(extent_root->fs_info,
928 gang[i]);
929 if (cache)
930 cache->pinned++;
931 }
932 if (wret < 0) {
933 printk(KERN_CRIT "set_radix_bit, err %d\n",
934 wret);
935 BUG_ON(wret < 0);
936 }
937 wret = clear_radix_bit(pending_radix, gang[i]);
938 BUG_ON(wret);
939 wret = __free_extent(trans, extent_root,
940 gang[i], 1, 0, 0);
941 if (wret)
942 err = wret;
943 }
944 }
945 return err;
946}
947
948/*
949 * remove an extent from the root, returns 0 on success
950 */
951int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
952 *root, u64 blocknr, u64 num_blocks, int pin)
953{
954 struct btrfs_root *extent_root = root->fs_info->extent_root;
955 int pending_ret;
956 int ret;
957
958 if (root == extent_root) {
959 pin_down_block(root, blocknr, 1);
960 return 0;
961 }
962 ret = __free_extent(trans, root, blocknr, num_blocks, pin, pin == 0);
963 pending_ret = del_pending_extents(trans, root->fs_info->extent_root);
964 return ret ? ret : pending_ret;
965}
966
967/*
968 * walks the btree of allocated extents and find a hole of a given size.
969 * The key ins is changed to record the hole:
970 * ins->objectid == block start
971 * ins->flags = BTRFS_EXTENT_ITEM_KEY
972 * ins->offset == number of blocks
973 * Any available blocks before search_start are skipped.
974 */
975static int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
976 *orig_root, u64 num_blocks, u64 empty_size,
977 u64 search_start, u64 search_end, u64 hint_block,
978 struct btrfs_key *ins, u64 exclude_start,
979 u64 exclude_nr, int data)
980{
981 struct btrfs_path *path;
982 struct btrfs_key key;
983 int ret;
984 u64 hole_size = 0;
985 int slot = 0;
986 u64 last_block = 0;
987 u64 test_block;
988 u64 orig_search_start = search_start;
989 int start_found;
990 struct extent_buffer *l;
991 struct btrfs_root * root = orig_root->fs_info->extent_root;
992 struct btrfs_fs_info *info = root->fs_info;
993 int total_needed = num_blocks;
994 int level;
995 struct btrfs_block_group_cache *block_group;
996 int full_scan = 0;
997 int wrapped = 0;
998
999 WARN_ON(num_blocks < 1);
1000 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
1001
1002 level = btrfs_header_level(root->node);
1003
1004 if (search_end == (u64)-1)
1005 search_end = btrfs_super_total_blocks(&info->super_copy);
1006 if (hint_block) {
1007 block_group = btrfs_lookup_block_group(info, hint_block);
1008 block_group = btrfs_find_block_group(root, block_group,
1009 hint_block, data, 1);
1010 } else {
1011 block_group = btrfs_find_block_group(root,
1012 trans->block_group, 0,
1013 data, 1);
1014 }
1015
1016 total_needed += empty_size;
1017 path = btrfs_alloc_path();
1018
1019check_failed:
1020 if (!block_group->data)
1021 search_start = find_search_start(root, &block_group,
1022 search_start, total_needed);
1023 else if (!full_scan)
1024 search_start = max(block_group->last_alloc, search_start);
1025
1026 btrfs_init_path(path);
1027 ins->objectid = search_start;
1028 ins->offset = 0;
1029 start_found = 0;
1030 path->reada = 2;
1031
1032 ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
1033 if (ret < 0)
1034 goto error;
1035
1036 if (path->slots[0] > 0) {
1037 path->slots[0]--;
1038 }
1039
1040 l = path->nodes[0];
1041 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1042
1043 /*
1044 * a rare case, go back one key if we hit a block group item
1045 * instead of an extent item
1046 */
1047 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY &&
1048 key.objectid + key.offset >= search_start) {
1049 ins->objectid = key.objectid;
1050 ins->offset = key.offset - 1;
1051 btrfs_release_path(root, path);
1052 ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
1053 if (ret < 0)
1054 goto error;
1055
1056 if (path->slots[0] > 0) {
1057 path->slots[0]--;
1058 }
1059 }
1060
1061 while (1) {
1062 l = path->nodes[0];
1063 slot = path->slots[0];
1064 if (slot >= btrfs_header_nritems(l)) {
1065 ret = btrfs_next_leaf(root, path);
1066 if (ret == 0)
1067 continue;
1068 if (ret < 0)
1069 goto error;
1070 if (!start_found) {
1071 ins->objectid = search_start;
1072 ins->offset = search_end - search_start;
1073 start_found = 1;
1074 goto check_pending;
1075 }
1076 ins->objectid = last_block > search_start ?
1077 last_block : search_start;
1078 ins->offset = search_end - ins->objectid;
1079 goto check_pending;
1080 }
1081
1082 btrfs_item_key_to_cpu(l, &key, slot);
1083 if (key.objectid >= search_start && key.objectid > last_block &&
1084 start_found) {
1085 if (last_block < search_start)
1086 last_block = search_start;
1087 hole_size = key.objectid - last_block;
1088 if (hole_size >= num_blocks) {
1089 ins->objectid = last_block;
1090 ins->offset = hole_size;
1091 goto check_pending;
1092 }
1093 }
1094
1095 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
1096 goto next;
1097
1098 start_found = 1;
1099 last_block = key.objectid + key.offset;
1100 if (!full_scan && last_block >= block_group->key.objectid +
1101 block_group->key.offset) {
1102 btrfs_release_path(root, path);
1103 search_start = block_group->key.objectid +
1104 block_group->key.offset * 2;
1105 goto new_group;
1106 }
1107next:
1108 path->slots[0]++;
1109 cond_resched();
1110 }
1111check_pending:
1112 /* we have to make sure we didn't find an extent that has already
1113 * been allocated by the map tree or the original allocation
1114 */
1115 btrfs_release_path(root, path);
1116 BUG_ON(ins->objectid < search_start);
1117
1118 if (ins->objectid + num_blocks >= search_end)
1119 goto enospc;
1120
1121 for (test_block = ins->objectid;
1122 test_block < ins->objectid + num_blocks; test_block++) {
1123 if (test_radix_bit(&info->pinned_radix, test_block) ||
1124 test_radix_bit(&info->extent_ins_radix, test_block)) {
1125 search_start = test_block + 1;
1126 goto new_group;
1127 }
1128 }
1129 if (exclude_nr > 0 && (ins->objectid + num_blocks > exclude_start &&
1130 ins->objectid < exclude_start + exclude_nr)) {
1131 search_start = exclude_start + exclude_nr;
1132 goto new_group;
1133 }
1134 if (!data) {
1135 block_group = btrfs_lookup_block_group(info, ins->objectid);
1136 if (block_group)
1137 trans->block_group = block_group;
1138 }
1139 ins->offset = num_blocks;
1140 btrfs_free_path(path);
1141 return 0;
1142
1143new_group:
1144 if (search_start + num_blocks >= search_end) {
1145enospc:
1146 search_start = orig_search_start;
1147 if (full_scan) {
1148 ret = -ENOSPC;
1149 goto error;
1150 }
1151 if (wrapped) {
1152 if (!full_scan)
1153 total_needed -= empty_size;
1154 full_scan = 1;
1155 } else
1156 wrapped = 1;
1157 }
1158 block_group = btrfs_lookup_block_group(info, search_start);
1159 cond_resched();
1160 if (!full_scan)
1161 block_group = btrfs_find_block_group(root, block_group,
1162 search_start, data, 0);
1163 goto check_failed;
1164
1165error:
1166 btrfs_release_path(root, path);
1167 btrfs_free_path(path);
1168 return ret;
1169}
1170/*
1171 * finds a free extent and does all the dirty work required for allocation
1172 * returns the key for the extent through ins, and a tree buffer for
1173 * the first block of the extent through buf.
1174 *
1175 * returns 0 if everything worked, non-zero otherwise.
1176 */
1177int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
1178 struct btrfs_root *root, u64 owner,
1179 u64 num_blocks, u64 empty_size, u64 hint_block,
1180 u64 search_end, struct btrfs_key *ins, int data)
1181{
1182 int ret;
1183 int pending_ret;
1184 u64 super_blocks_used, root_blocks_used;
1185 u64 search_start = 0;
1186 struct btrfs_fs_info *info = root->fs_info;
1187 struct btrfs_root *extent_root = info->extent_root;
1188 struct btrfs_extent_item extent_item;
1189
1190 btrfs_set_stack_extent_refs(&extent_item, 1);
1191 btrfs_set_stack_extent_owner(&extent_item, owner);
1192
1193 WARN_ON(num_blocks < 1);
1194 ret = find_free_extent(trans, root, num_blocks, empty_size,
1195 search_start, search_end, hint_block, ins,
1196 trans->alloc_exclude_start,
1197 trans->alloc_exclude_nr, data);
1198 BUG_ON(ret);
1199 if (ret)
1200 return ret;
1201
1202 /* block accounting for super block */
1203 super_blocks_used = btrfs_super_blocks_used(&info->super_copy);
1204 btrfs_set_super_blocks_used(&info->super_copy, super_blocks_used +
1205 num_blocks);
1206
1207 /* block accounting for root item */
1208 root_blocks_used = btrfs_root_used(&root->root_item);
1209 btrfs_set_root_used(&root->root_item, root_blocks_used +
1210 num_blocks);
1211
1212 if (root == extent_root) {
1213 BUG_ON(num_blocks != 1);
1214 set_radix_bit(&root->fs_info->extent_ins_radix, ins->objectid);
1215 goto update_block;
1216 }
1217
1218 WARN_ON(trans->alloc_exclude_nr);
1219 trans->alloc_exclude_start = ins->objectid;
1220 trans->alloc_exclude_nr = ins->offset;
1221 ret = btrfs_insert_item(trans, extent_root, ins, &extent_item,
1222 sizeof(extent_item));
1223
1224 trans->alloc_exclude_start = 0;
1225 trans->alloc_exclude_nr = 0;
1226
1227 BUG_ON(ret);
1228 finish_current_insert(trans, extent_root);
1229 pending_ret = del_pending_extents(trans, extent_root);
1230 if (ret) {
1231 return ret;
1232 }
1233 if (pending_ret) {
1234 return pending_ret;
1235 }
1236
1237update_block:
1238 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0,
1239 data);
1240 BUG_ON(ret);
1241 return 0;
1242}
1243
1244/*
1245 * helper function to allocate a block for a given tree
1246 * returns the tree buffer or NULL.
1247 */
1248struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
1249 struct btrfs_root *root, u64 hint,
1250 u64 empty_size)
1251{
1252 struct btrfs_key ins;
1253 int ret;
1254 struct extent_buffer *buf;
1255
1256 ret = btrfs_alloc_extent(trans, root, root->root_key.objectid,
1257 1, empty_size, hint, (u64)-1, &ins, 0);
1258 if (ret) {
1259 BUG_ON(ret > 0);
1260 return ERR_PTR(ret);
1261 }
1262 buf = btrfs_find_create_tree_block(root, ins.objectid);
1263 if (!buf) {
1264 btrfs_free_extent(trans, root, ins.objectid, 1, 0);
1265 return ERR_PTR(-ENOMEM);
1266 }
1267 btrfs_set_buffer_uptodate(buf);
1268 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
1269 buf->start + buf->len - 1, GFP_NOFS);
1270 /*
1271 set_buffer_checked(buf);
1272 set_buffer_defrag(buf);
1273 */
1274 /* FIXME!!!!!!!!!!!!!!!!
1275 set_radix_bit(&trans->transaction->dirty_pages, buf->pages[0]->index);
1276 */
1277 trans->blocks_used++;
1278 return buf;
1279}
1280
1281static int drop_leaf_ref(struct btrfs_trans_handle *trans,
1282 struct btrfs_root *root, struct extent_buffer *leaf)
1283{
1284 struct btrfs_key key;
1285 struct btrfs_file_extent_item *fi;
1286 int i;
1287 int nritems;
1288 int ret;
1289
1290 BUG_ON(!btrfs_is_leaf(leaf));
1291 nritems = btrfs_header_nritems(leaf);
1292 for (i = 0; i < nritems; i++) {
1293 u64 disk_blocknr;
1294
1295 btrfs_item_key_to_cpu(leaf, &key, i);
1296 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1297 continue;
1298 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1299 if (btrfs_file_extent_type(leaf, fi) ==
1300 BTRFS_FILE_EXTENT_INLINE)
1301 continue;
1302 /*
1303 * FIXME make sure to insert a trans record that
1304 * repeats the snapshot del on crash
1305 */
1306 disk_blocknr = btrfs_file_extent_disk_blocknr(leaf, fi);
1307 if (disk_blocknr == 0)
1308 continue;
1309 ret = btrfs_free_extent(trans, root, disk_blocknr,
1310 btrfs_file_extent_disk_num_blocks(leaf, fi), 0);
1311 BUG_ON(ret);
1312 }
1313 return 0;
1314}
1315
1316static void reada_walk_down(struct btrfs_root *root,
1317 struct extent_buffer *node)
1318{
1319 int i;
1320 u32 nritems;
1321 u64 blocknr;
1322 int ret;
1323 u32 refs;
1324
1325 nritems = btrfs_header_nritems(node);
1326 for (i = 0; i < nritems; i++) {
1327 blocknr = btrfs_node_blockptr(node, i);
1328 ret = lookup_extent_ref(NULL, root, blocknr, 1, &refs);
1329 BUG_ON(ret);
1330 if (refs != 1)
1331 continue;
1332 mutex_unlock(&root->fs_info->fs_mutex);
1333 ret = readahead_tree_block(root, blocknr);
1334 cond_resched();
1335 mutex_lock(&root->fs_info->fs_mutex);
1336 if (ret)
1337 break;
1338 }
1339}
1340
1341/*
1342 * helper function for drop_snapshot, this walks down the tree dropping ref
1343 * counts as it goes.
1344 */
1345static int walk_down_tree(struct btrfs_trans_handle *trans, struct btrfs_root
1346 *root, struct btrfs_path *path, int *level)
1347{
1348 struct extent_buffer *next;
1349 struct extent_buffer *cur;
1350 u64 blocknr;
1351 int ret;
1352 u32 refs;
1353
1354 WARN_ON(*level < 0);
1355 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1356 ret = lookup_extent_ref(trans, root,
1357 extent_buffer_blocknr(path->nodes[*level]),
1358 1, &refs);
1359 BUG_ON(ret);
1360 if (refs > 1)
1361 goto out;
1362
1363 /*
1364 * walk down to the last node level and free all the leaves
1365 */
1366 while(*level >= 0) {
1367 WARN_ON(*level < 0);
1368 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1369 cur = path->nodes[*level];
1370
1371 if (*level > 0 && path->slots[*level] == 0)
1372 reada_walk_down(root, cur);
1373
1374 if (btrfs_header_level(cur) != *level)
1375 WARN_ON(1);
1376
1377 if (path->slots[*level] >=
1378 btrfs_header_nritems(cur))
1379 break;
1380 if (*level == 0) {
1381 ret = drop_leaf_ref(trans, root, cur);
1382 BUG_ON(ret);
1383 break;
1384 }
1385 blocknr = btrfs_node_blockptr(cur, path->slots[*level]);
1386 ret = lookup_extent_ref(trans, root, blocknr, 1, &refs);
1387 BUG_ON(ret);
1388 if (refs != 1) {
1389 path->slots[*level]++;
1390 ret = btrfs_free_extent(trans, root, blocknr, 1, 1);
1391 BUG_ON(ret);
1392 continue;
1393 }
1394 next = btrfs_find_tree_block(root, blocknr);
1395 if (!next || !btrfs_buffer_uptodate(next)) {
1396 free_extent_buffer(next);
1397 mutex_unlock(&root->fs_info->fs_mutex);
1398 next = read_tree_block(root, blocknr);
1399 mutex_lock(&root->fs_info->fs_mutex);
1400
1401 /* we dropped the lock, check one more time */
1402 ret = lookup_extent_ref(trans, root, blocknr, 1, &refs);
1403 BUG_ON(ret);
1404 if (refs != 1) {
1405 path->slots[*level]++;
1406 free_extent_buffer(next);
1407 ret = btrfs_free_extent(trans, root,
1408 blocknr, 1, 1);
1409 BUG_ON(ret);
1410 continue;
1411 }
1412 }
1413 WARN_ON(*level <= 0);
1414 if (path->nodes[*level-1])
1415 free_extent_buffer(path->nodes[*level-1]);
1416 path->nodes[*level-1] = next;
1417 *level = btrfs_header_level(next);
1418 path->slots[*level] = 0;
1419 }
1420out:
1421 WARN_ON(*level < 0);
1422 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1423 ret = btrfs_free_extent(trans, root,
1424 extent_buffer_blocknr(path->nodes[*level]), 1, 1);
1425 free_extent_buffer(path->nodes[*level]);
1426 path->nodes[*level] = NULL;
1427 *level += 1;
1428 BUG_ON(ret);
1429 return 0;
1430}
1431
1432/*
1433 * helper for dropping snapshots. This walks back up the tree in the path
1434 * to find the first node higher up where we haven't yet gone through
1435 * all the slots
1436 */
1437static int walk_up_tree(struct btrfs_trans_handle *trans, struct btrfs_root
1438 *root, struct btrfs_path *path, int *level)
1439{
1440 int i;
1441 int slot;
1442 int ret;
1443 struct btrfs_root_item *root_item = &root->root_item;
1444
1445 for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
1446 slot = path->slots[i];
1447 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
1448 struct extent_buffer *node;
1449 struct btrfs_disk_key disk_key;
1450 node = path->nodes[i];
1451 path->slots[i]++;
1452 *level = i;
1453 WARN_ON(*level == 0);
1454 btrfs_node_key(node, &disk_key, path->slots[i]);
1455 memcpy(&root_item->drop_progress,
1456 &disk_key, sizeof(disk_key));
1457 root_item->drop_level = i;
1458 return 0;
1459 } else {
1460 ret = btrfs_free_extent(trans, root,
1461 extent_buffer_blocknr(path->nodes[*level]),
1462 1, 1);
1463 BUG_ON(ret);
1464 free_extent_buffer(path->nodes[*level]);
1465 path->nodes[*level] = NULL;
1466 *level = i + 1;
1467 }
1468 }
1469 return 1;
1470}
1471
1472/*
1473 * drop the reference count on the tree rooted at 'snap'. This traverses
1474 * the tree freeing any blocks that have a ref count of zero after being
1475 * decremented.
1476 */
1477int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
1478 *root)
1479{
1480 int ret = 0;
1481 int wret;
1482 int level;
1483 struct btrfs_path *path;
1484 int i;
1485 int orig_level;
1486 struct btrfs_root_item *root_item = &root->root_item;
1487
1488 path = btrfs_alloc_path();
1489 BUG_ON(!path);
1490
1491 level = btrfs_header_level(root->node);
1492 orig_level = level;
1493 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
1494 path->nodes[level] = root->node;
1495 path->slots[level] = 0;
1496 } else {
1497 struct btrfs_key key;
1498 struct btrfs_disk_key found_key;
1499 struct extent_buffer *node;
1500
1501 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
1502 level = root_item->drop_level;
1503 path->lowest_level = level;
1504 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1505 if (wret < 0) {
1506 ret = wret;
1507 goto out;
1508 }
1509 node = path->nodes[level];
1510 btrfs_node_key(node, &found_key, path->slots[level]);
1511 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
1512 sizeof(found_key)));
1513 }
1514 while(1) {
1515 wret = walk_down_tree(trans, root, path, &level);
1516 if (wret > 0)
1517 break;
1518 if (wret < 0)
1519 ret = wret;
1520
1521 wret = walk_up_tree(trans, root, path, &level);
1522 if (wret > 0)
1523 break;
1524 if (wret < 0)
1525 ret = wret;
1526 ret = -EAGAIN;
1527 extent_buffer_get(root->node);
1528 break;
1529 }
1530 for (i = 0; i <= orig_level; i++) {
1531 if (path->nodes[i]) {
1532 free_extent_buffer(path->nodes[i]);
1533 path->nodes[i] = 0;
1534 }
1535 }
1536out:
1537 btrfs_free_path(path);
1538 return ret;
1539}
1540
1541static int free_block_group_radix(struct radix_tree_root *radix)
1542{
1543 int ret;
1544 struct btrfs_block_group_cache *cache[8];
1545 int i;
1546
1547 while(1) {
1548 ret = radix_tree_gang_lookup(radix, (void **)cache, 0,
1549 ARRAY_SIZE(cache));
1550 if (!ret)
1551 break;
1552 for (i = 0; i < ret; i++) {
1553 radix_tree_delete(radix, cache[i]->key.objectid +
1554 cache[i]->key.offset - 1);
1555 kfree(cache[i]);
1556 }
1557 }
1558 return 0;
1559}
1560
1561int btrfs_free_block_groups(struct btrfs_fs_info *info)
1562{
1563 int ret;
1564 int ret2;
1565 unsigned long gang[16];
1566 int i;
1567
1568 ret = free_block_group_radix(&info->block_group_radix);
1569 ret2 = free_block_group_radix(&info->block_group_data_radix);
1570 if (ret)
1571 return ret;
1572 if (ret2)
1573 return ret2;
1574
1575 while(1) {
1576 ret = find_first_radix_bit(&info->extent_map_radix,
1577 gang, 0, ARRAY_SIZE(gang));
1578 if (!ret)
1579 break;
1580 for (i = 0; i < ret; i++) {
1581 clear_radix_bit(&info->extent_map_radix, gang[i]);
1582 }
1583 }
1584 return 0;
1585}
1586
1587int btrfs_read_block_groups(struct btrfs_root *root)
1588{
1589 struct btrfs_path *path;
1590 int ret;
1591 int err = 0;
1592 struct btrfs_block_group_cache *cache;
1593 struct btrfs_fs_info *info = root->fs_info;
1594 struct radix_tree_root *radix;
1595 struct btrfs_key key;
1596 struct btrfs_key found_key;
1597 struct extent_buffer *leaf;
1598 u64 group_size_blocks;
1599 u64 used;
1600
1601 group_size_blocks = BTRFS_BLOCK_GROUP_SIZE >>
1602 root->fs_info->sb->s_blocksize_bits;
1603 root = info->extent_root;
1604 key.objectid = 0;
1605 key.offset = group_size_blocks;
1606 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
1607
1608 path = btrfs_alloc_path();
1609 if (!path)
1610 return -ENOMEM;
1611
1612 while(1) {
1613 ret = btrfs_search_slot(NULL, info->extent_root,
1614 &key, path, 0, 0);
1615 if (ret != 0) {
1616 err = ret;
1617 break;
1618 }
1619 leaf = path->nodes[0];
1620 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1621 cache = kmalloc(sizeof(*cache), GFP_NOFS);
1622 if (!cache) {
1623 err = -1;
1624 break;
1625 }
1626
1627 read_extent_buffer(leaf, &cache->item,
1628 btrfs_item_ptr_offset(leaf, path->slots[0]),
1629 sizeof(cache->item));
1630 if (cache->item.flags & BTRFS_BLOCK_GROUP_DATA) {
1631 radix = &info->block_group_data_radix;
1632 cache->data = 1;
1633 } else {
1634 radix = &info->block_group_radix;
1635 cache->data = 0;
1636 }
1637
1638 memcpy(&cache->key, &found_key, sizeof(found_key));
1639 cache->last_alloc = cache->key.objectid;
1640 cache->first_free = cache->key.objectid;
1641 cache->pinned = 0;
1642 cache->cached = 0;
1643
1644 cache->radix = radix;
1645
1646 key.objectid = found_key.objectid + found_key.offset;
1647 btrfs_release_path(root, path);
1648
1649 ret = radix_tree_insert(radix, found_key.objectid +
1650 found_key.offset - 1,
1651 (void *)cache);
1652 BUG_ON(ret);
1653 used = btrfs_block_group_used(&cache->item);
1654 if (used < div_factor(key.offset, 8)) {
1655 radix_tree_tag_set(radix, found_key.objectid +
1656 found_key.offset - 1,
1657 BTRFS_BLOCK_GROUP_AVAIL);
1658 }
1659 if (key.objectid >=
1660 btrfs_super_total_blocks(&info->super_copy))
1661 break;
1662 }
1663
1664 btrfs_free_path(path);
1665 return 0;
1666}