Btrfs: Add simple stripe size parameter
[linux-2.6-block.git] / fs / btrfs / extent-tree.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include "ctree.h"
21#include "disk-io.h"
22#include "print-tree.h"
23#include "transaction.h"
24
25#define BLOCK_GROUP_DATA EXTENT_WRITEBACK
26#define BLOCK_GROUP_METADATA EXTENT_UPTODATE
27#define BLOCK_GROUP_DIRTY EXTENT_DIRTY
28
29static int finish_current_insert(struct btrfs_trans_handle *trans, struct
30 btrfs_root *extent_root);
31static int del_pending_extents(struct btrfs_trans_handle *trans, struct
32 btrfs_root *extent_root);
33
34static int cache_block_group(struct btrfs_root *root,
35 struct btrfs_block_group_cache *block_group)
36{
37 struct btrfs_path *path;
38 int ret;
39 struct btrfs_key key;
40 struct extent_buffer *leaf;
41 struct extent_map_tree *free_space_cache;
42 int slot;
43 u64 last = 0;
44 u64 hole_size;
45 u64 first_free;
46 int found = 0;
47
48 if (!block_group)
49 return 0;
50
51 root = root->fs_info->extent_root;
52 free_space_cache = &root->fs_info->free_space_cache;
53
54 if (block_group->cached)
55 return 0;
56
57 path = btrfs_alloc_path();
58 if (!path)
59 return -ENOMEM;
60
61 path->reada = 2;
62 first_free = block_group->key.objectid;
63 key.objectid = block_group->key.objectid;
64 key.offset = 0;
65
66 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
67 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
68
69 if (ret < 0)
70 return ret;
71
72 if (ret && path->slots[0] > 0)
73 path->slots[0]--;
74
75 while(1) {
76 leaf = path->nodes[0];
77 slot = path->slots[0];
78 if (slot >= btrfs_header_nritems(leaf)) {
79 ret = btrfs_next_leaf(root, path);
80 if (ret < 0)
81 goto err;
82 if (ret == 0) {
83 continue;
84 } else {
85 break;
86 }
87 }
88
89 btrfs_item_key_to_cpu(leaf, &key, slot);
90 if (key.objectid < block_group->key.objectid) {
91 if (key.objectid + key.offset > first_free)
92 first_free = key.objectid + key.offset;
93 goto next;
94 }
95
96 if (key.objectid >= block_group->key.objectid +
97 block_group->key.offset) {
98 break;
99 }
100
101 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
102 if (!found) {
103 last = first_free;
104 found = 1;
105 }
106 if (key.objectid > last) {
107 hole_size = key.objectid - last;
108 set_extent_dirty(free_space_cache, last,
109 last + hole_size - 1,
110 GFP_NOFS);
111 }
112 last = key.objectid + key.offset;
113 }
114next:
115 path->slots[0]++;
116 }
117
118 if (!found)
119 last = first_free;
120 if (block_group->key.objectid +
121 block_group->key.offset > last) {
122 hole_size = block_group->key.objectid +
123 block_group->key.offset - last;
124 set_extent_dirty(free_space_cache, last,
125 last + hole_size - 1, GFP_NOFS);
126 }
127 block_group->cached = 1;
128err:
129 btrfs_free_path(path);
130 return 0;
131}
132
133struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
134 btrfs_fs_info *info,
135 u64 bytenr)
136{
137 struct extent_map_tree *block_group_cache;
138 struct btrfs_block_group_cache *block_group = NULL;
139 u64 ptr;
140 u64 start;
141 u64 end;
142 int ret;
143
144 block_group_cache = &info->block_group_cache;
145 ret = find_first_extent_bit(block_group_cache,
146 bytenr, &start, &end,
147 BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA);
148 if (ret) {
149 return NULL;
150 }
151 ret = get_state_private(block_group_cache, start, &ptr);
152 if (ret)
153 return NULL;
154
155 block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
156 if (block_group->key.objectid <= bytenr && bytenr <
157 block_group->key.objectid + block_group->key.offset)
158 return block_group;
159 return NULL;
160}
161static u64 find_search_start(struct btrfs_root *root,
162 struct btrfs_block_group_cache **cache_ret,
163 u64 search_start, int num,
164 int data, int full_scan)
165{
166 int ret;
167 struct btrfs_block_group_cache *cache = *cache_ret;
168 u64 last;
169 u64 start = 0;
170 u64 end = 0;
171 u64 cache_miss = 0;
172 int wrapped = 0;
173
174 if (!cache) {
175 cache = btrfs_lookup_block_group(root->fs_info, search_start);
176 if (!cache)
177 return search_start;
178 }
179again:
180 ret = cache_block_group(root, cache);
181 if (ret)
182 goto out;
183
184 last = max(search_start, cache->key.objectid);
185
186 while(1) {
187 ret = find_first_extent_bit(&root->fs_info->free_space_cache,
188 last, &start, &end, EXTENT_DIRTY);
189 if (ret) {
190 if (!cache_miss)
191 cache_miss = last;
192 goto new_group;
193 }
194
195 start = max(last, start);
196 last = end + 1;
197 if (last - start < num) {
198 if (last == cache->key.objectid + cache->key.offset)
199 cache_miss = start;
200 continue;
201 }
202 if (data != BTRFS_BLOCK_GROUP_MIXED &&
203 start + num > cache->key.objectid + cache->key.offset)
204 goto new_group;
205 return start;
206 }
207out:
208 return search_start;
209
210new_group:
211 last = cache->key.objectid + cache->key.offset;
212wrapped:
213 cache = btrfs_lookup_block_group(root->fs_info, last);
214 if (!cache) {
215no_cache:
216 if (!wrapped) {
217 wrapped = 1;
218 last = search_start;
219 data = BTRFS_BLOCK_GROUP_MIXED;
220 goto wrapped;
221 }
222 return search_start;
223 }
224 if (cache_miss && !cache->cached) {
225 cache_block_group(root, cache);
226 last = cache_miss;
227 cache = btrfs_lookup_block_group(root->fs_info, last);
228 }
229 if (!full_scan)
230 cache = btrfs_find_block_group(root, cache, last, data, 0);
231 if (!cache)
232 goto no_cache;
233 *cache_ret = cache;
234 cache_miss = 0;
235 goto again;
236}
237
238static u64 div_factor(u64 num, int factor)
239{
240 if (factor == 10)
241 return num;
242 num *= factor;
243 do_div(num, 10);
244 return num;
245}
246
247struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
248 struct btrfs_block_group_cache
249 *hint, u64 search_start,
250 int data, int owner)
251{
252 struct btrfs_block_group_cache *cache;
253 struct extent_map_tree *block_group_cache;
254 struct btrfs_block_group_cache *found_group = NULL;
255 struct btrfs_fs_info *info = root->fs_info;
256 u64 used;
257 u64 last = 0;
258 u64 hint_last;
259 u64 start;
260 u64 end;
261 u64 free_check;
262 u64 ptr;
263 int bit;
264 int ret;
265 int full_search = 0;
266 int factor = 8;
267 int data_swap = 0;
268
269 block_group_cache = &info->block_group_cache;
270
271 if (!owner)
272 factor = 8;
273
274 if (data == BTRFS_BLOCK_GROUP_MIXED) {
275 bit = BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA;
276 factor = 10;
277 } else if (data)
278 bit = BLOCK_GROUP_DATA;
279 else
280 bit = BLOCK_GROUP_METADATA;
281
282 if (search_start) {
283 struct btrfs_block_group_cache *shint;
284 shint = btrfs_lookup_block_group(info, search_start);
285 if (shint && (shint->data == data ||
286 shint->data == BTRFS_BLOCK_GROUP_MIXED)) {
287 used = btrfs_block_group_used(&shint->item);
288 if (used + shint->pinned <
289 div_factor(shint->key.offset, factor)) {
290 return shint;
291 }
292 }
293 }
294 if (hint && (hint->data == data ||
295 hint->data == BTRFS_BLOCK_GROUP_MIXED)) {
296 used = btrfs_block_group_used(&hint->item);
297 if (used + hint->pinned <
298 div_factor(hint->key.offset, factor)) {
299 return hint;
300 }
301 last = hint->key.objectid + hint->key.offset;
302 hint_last = last;
303 } else {
304 if (hint)
305 hint_last = max(hint->key.objectid, search_start);
306 else
307 hint_last = search_start;
308
309 last = hint_last;
310 }
311again:
312 while(1) {
313 ret = find_first_extent_bit(block_group_cache, last,
314 &start, &end, bit);
315 if (ret)
316 break;
317
318 ret = get_state_private(block_group_cache, start, &ptr);
319 if (ret)
320 break;
321
322 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
323 last = cache->key.objectid + cache->key.offset;
324 used = btrfs_block_group_used(&cache->item);
325
326 if (full_search)
327 free_check = cache->key.offset;
328 else
329 free_check = div_factor(cache->key.offset, factor);
330 if (used + cache->pinned < free_check) {
331 found_group = cache;
332 goto found;
333 }
334 cond_resched();
335 }
336 if (!full_search) {
337 last = search_start;
338 full_search = 1;
339 goto again;
340 }
341 if (!data_swap) {
342 data_swap = 1;
343 bit = BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA;
344 last = search_start;
345 goto again;
346 }
347found:
348 return found_group;
349}
350
351int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
352 struct btrfs_root *root,
353 u64 bytenr, u64 num_bytes)
354{
355 struct btrfs_path *path;
356 int ret;
357 struct btrfs_key key;
358 struct extent_buffer *l;
359 struct btrfs_extent_item *item;
360 u32 refs;
361
362 WARN_ON(num_bytes < root->sectorsize);
363 path = btrfs_alloc_path();
364 if (!path)
365 return -ENOMEM;
366
367 key.objectid = bytenr;
368 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
369 key.offset = num_bytes;
370 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
371 0, 1);
372 if (ret < 0)
373 return ret;
374 if (ret != 0) {
375 BUG();
376 }
377 BUG_ON(ret != 0);
378 l = path->nodes[0];
379 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
380 refs = btrfs_extent_refs(l, item);
381 btrfs_set_extent_refs(l, item, refs + 1);
382 btrfs_mark_buffer_dirty(path->nodes[0]);
383
384 btrfs_release_path(root->fs_info->extent_root, path);
385 btrfs_free_path(path);
386 finish_current_insert(trans, root->fs_info->extent_root);
387 del_pending_extents(trans, root->fs_info->extent_root);
388 return 0;
389}
390
391int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
392 struct btrfs_root *root)
393{
394 finish_current_insert(trans, root->fs_info->extent_root);
395 del_pending_extents(trans, root->fs_info->extent_root);
396 return 0;
397}
398
399static int lookup_extent_ref(struct btrfs_trans_handle *trans,
400 struct btrfs_root *root, u64 bytenr,
401 u64 num_bytes, u32 *refs)
402{
403 struct btrfs_path *path;
404 int ret;
405 struct btrfs_key key;
406 struct extent_buffer *l;
407 struct btrfs_extent_item *item;
408
409 WARN_ON(num_bytes < root->sectorsize);
410 path = btrfs_alloc_path();
411 key.objectid = bytenr;
412 key.offset = num_bytes;
413 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
414 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
415 0, 0);
416 if (ret < 0)
417 goto out;
418 if (ret != 0) {
419 btrfs_print_leaf(root, path->nodes[0]);
420 printk("failed to find block number %Lu\n", bytenr);
421 BUG();
422 }
423 l = path->nodes[0];
424 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
425 *refs = btrfs_extent_refs(l, item);
426out:
427 btrfs_free_path(path);
428 return 0;
429}
430
431int btrfs_inc_root_ref(struct btrfs_trans_handle *trans,
432 struct btrfs_root *root)
433{
434 return btrfs_inc_extent_ref(trans, root, root->node->start,
435 root->node->len);
436}
437
438int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
439 struct extent_buffer *buf)
440{
441 u64 bytenr;
442 u32 nritems;
443 struct btrfs_key key;
444 struct btrfs_file_extent_item *fi;
445 int i;
446 int level;
447 int ret;
448 int faili;
449 int err;
450
451 if (!root->ref_cows)
452 return 0;
453
454 level = btrfs_header_level(buf);
455 nritems = btrfs_header_nritems(buf);
456 for (i = 0; i < nritems; i++) {
457 if (level == 0) {
458 u64 disk_bytenr;
459 btrfs_item_key_to_cpu(buf, &key, i);
460 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
461 continue;
462 fi = btrfs_item_ptr(buf, i,
463 struct btrfs_file_extent_item);
464 if (btrfs_file_extent_type(buf, fi) ==
465 BTRFS_FILE_EXTENT_INLINE)
466 continue;
467 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
468 if (disk_bytenr == 0)
469 continue;
470 ret = btrfs_inc_extent_ref(trans, root, disk_bytenr,
471 btrfs_file_extent_disk_num_bytes(buf, fi));
472 if (ret) {
473 faili = i;
474 goto fail;
475 }
476 } else {
477 bytenr = btrfs_node_blockptr(buf, i);
478 ret = btrfs_inc_extent_ref(trans, root, bytenr,
479 btrfs_level_size(root, level - 1));
480 if (ret) {
481 faili = i;
482 goto fail;
483 }
484 }
485 }
486 return 0;
487fail:
488 WARN_ON(1);
489 for (i =0; i < faili; i++) {
490 if (level == 0) {
491 u64 disk_bytenr;
492 btrfs_item_key_to_cpu(buf, &key, i);
493 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
494 continue;
495 fi = btrfs_item_ptr(buf, i,
496 struct btrfs_file_extent_item);
497 if (btrfs_file_extent_type(buf, fi) ==
498 BTRFS_FILE_EXTENT_INLINE)
499 continue;
500 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
501 if (disk_bytenr == 0)
502 continue;
503 err = btrfs_free_extent(trans, root, disk_bytenr,
504 btrfs_file_extent_disk_num_bytes(buf,
505 fi), 0);
506 BUG_ON(err);
507 } else {
508 bytenr = btrfs_node_blockptr(buf, i);
509 err = btrfs_free_extent(trans, root, bytenr,
510 btrfs_level_size(root, level - 1), 0);
511 BUG_ON(err);
512 }
513 }
514 return ret;
515}
516
517static int write_one_cache_group(struct btrfs_trans_handle *trans,
518 struct btrfs_root *root,
519 struct btrfs_path *path,
520 struct btrfs_block_group_cache *cache)
521{
522 int ret;
523 int pending_ret;
524 struct btrfs_root *extent_root = root->fs_info->extent_root;
525 unsigned long bi;
526 struct extent_buffer *leaf;
527
528 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
529 if (ret < 0)
530 goto fail;
531 BUG_ON(ret);
532
533 leaf = path->nodes[0];
534 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
535 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
536 btrfs_mark_buffer_dirty(leaf);
537 btrfs_release_path(extent_root, path);
538fail:
539 finish_current_insert(trans, extent_root);
540 pending_ret = del_pending_extents(trans, extent_root);
541 if (ret)
542 return ret;
543 if (pending_ret)
544 return pending_ret;
545 return 0;
546
547}
548
549int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
550 struct btrfs_root *root)
551{
552 struct extent_map_tree *block_group_cache;
553 struct btrfs_block_group_cache *cache;
554 int ret;
555 int err = 0;
556 int werr = 0;
557 struct btrfs_path *path;
558 u64 last = 0;
559 u64 start;
560 u64 end;
561 u64 ptr;
562
563 block_group_cache = &root->fs_info->block_group_cache;
564 path = btrfs_alloc_path();
565 if (!path)
566 return -ENOMEM;
567
568 while(1) {
569 ret = find_first_extent_bit(block_group_cache, last,
570 &start, &end, BLOCK_GROUP_DIRTY);
571 if (ret)
572 break;
573
574 last = end + 1;
575 ret = get_state_private(block_group_cache, start, &ptr);
576 if (ret)
577 break;
578
579 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
580 err = write_one_cache_group(trans, root,
581 path, cache);
582 /*
583 * if we fail to write the cache group, we want
584 * to keep it marked dirty in hopes that a later
585 * write will work
586 */
587 if (err) {
588 werr = err;
589 continue;
590 }
591 clear_extent_bits(block_group_cache, start, end,
592 BLOCK_GROUP_DIRTY, GFP_NOFS);
593 }
594 btrfs_free_path(path);
595 return werr;
596}
597
598static int update_block_group(struct btrfs_trans_handle *trans,
599 struct btrfs_root *root,
600 u64 bytenr, u64 num_bytes, int alloc,
601 int mark_free, int data)
602{
603 struct btrfs_block_group_cache *cache;
604 struct btrfs_fs_info *info = root->fs_info;
605 u64 total = num_bytes;
606 u64 old_val;
607 u64 byte_in_group;
608 u64 start;
609 u64 end;
610
611 while(total) {
612 cache = btrfs_lookup_block_group(info, bytenr);
613 if (!cache) {
614 return -1;
615 }
616 byte_in_group = bytenr - cache->key.objectid;
617 WARN_ON(byte_in_group > cache->key.offset);
618 start = cache->key.objectid;
619 end = start + cache->key.offset - 1;
620 set_extent_bits(&info->block_group_cache, start, end,
621 BLOCK_GROUP_DIRTY, GFP_NOFS);
622
623 old_val = btrfs_block_group_used(&cache->item);
624 num_bytes = min(total, cache->key.offset - byte_in_group);
625 if (alloc) {
626 if (cache->data != data &&
627 old_val < (cache->key.offset >> 1)) {
628 int bit_to_clear;
629 int bit_to_set;
630 cache->data = data;
631 if (data) {
632 bit_to_clear = BLOCK_GROUP_METADATA;
633 bit_to_set = BLOCK_GROUP_DATA;
634 cache->item.flags &=
635 ~BTRFS_BLOCK_GROUP_MIXED;
636 cache->item.flags |=
637 BTRFS_BLOCK_GROUP_DATA;
638 } else {
639 bit_to_clear = BLOCK_GROUP_DATA;
640 bit_to_set = BLOCK_GROUP_METADATA;
641 cache->item.flags &=
642 ~BTRFS_BLOCK_GROUP_MIXED;
643 cache->item.flags &=
644 ~BTRFS_BLOCK_GROUP_DATA;
645 }
646 clear_extent_bits(&info->block_group_cache,
647 start, end, bit_to_clear,
648 GFP_NOFS);
649 set_extent_bits(&info->block_group_cache,
650 start, end, bit_to_set,
651 GFP_NOFS);
652 } else if (cache->data != data &&
653 cache->data != BTRFS_BLOCK_GROUP_MIXED) {
654 cache->data = BTRFS_BLOCK_GROUP_MIXED;
655 set_extent_bits(&info->block_group_cache,
656 start, end,
657 BLOCK_GROUP_DATA |
658 BLOCK_GROUP_METADATA,
659 GFP_NOFS);
660 }
661 old_val += num_bytes;
662 } else {
663 old_val -= num_bytes;
664 if (mark_free) {
665 set_extent_dirty(&info->free_space_cache,
666 bytenr, bytenr + num_bytes - 1,
667 GFP_NOFS);
668 }
669 }
670 btrfs_set_block_group_used(&cache->item, old_val);
671 total -= num_bytes;
672 bytenr += num_bytes;
673 }
674 return 0;
675}
676static int update_pinned_extents(struct btrfs_root *root,
677 u64 bytenr, u64 num, int pin)
678{
679 u64 len;
680 struct btrfs_block_group_cache *cache;
681 struct btrfs_fs_info *fs_info = root->fs_info;
682
683 if (pin) {
684 set_extent_dirty(&fs_info->pinned_extents,
685 bytenr, bytenr + num - 1, GFP_NOFS);
686 } else {
687 clear_extent_dirty(&fs_info->pinned_extents,
688 bytenr, bytenr + num - 1, GFP_NOFS);
689 }
690 while (num > 0) {
691 cache = btrfs_lookup_block_group(fs_info, bytenr);
692 WARN_ON(!cache);
693 len = min(num, cache->key.offset -
694 (bytenr - cache->key.objectid));
695 if (pin) {
696 cache->pinned += len;
697 fs_info->total_pinned += len;
698 } else {
699 cache->pinned -= len;
700 fs_info->total_pinned -= len;
701 }
702 bytenr += len;
703 num -= len;
704 }
705 return 0;
706}
707
708int btrfs_copy_pinned(struct btrfs_root *root, struct extent_map_tree *copy)
709{
710 u64 last = 0;
711 u64 start;
712 u64 end;
713 struct extent_map_tree *pinned_extents = &root->fs_info->pinned_extents;
714 int ret;
715
716 while(1) {
717 ret = find_first_extent_bit(pinned_extents, last,
718 &start, &end, EXTENT_DIRTY);
719 if (ret)
720 break;
721 set_extent_dirty(copy, start, end, GFP_NOFS);
722 last = end + 1;
723 }
724 return 0;
725}
726
727int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
728 struct btrfs_root *root,
729 struct extent_map_tree *unpin)
730{
731 u64 start;
732 u64 end;
733 int ret;
734 struct extent_map_tree *free_space_cache;
735 free_space_cache = &root->fs_info->free_space_cache;
736
737 while(1) {
738 ret = find_first_extent_bit(unpin, 0, &start, &end,
739 EXTENT_DIRTY);
740 if (ret)
741 break;
742 update_pinned_extents(root, start, end + 1 - start, 0);
743 clear_extent_dirty(unpin, start, end, GFP_NOFS);
744 set_extent_dirty(free_space_cache, start, end, GFP_NOFS);
745 }
746 return 0;
747}
748
749static int finish_current_insert(struct btrfs_trans_handle *trans, struct
750 btrfs_root *extent_root)
751{
752 struct btrfs_key ins;
753 struct btrfs_extent_item extent_item;
754 int ret;
755 int err = 0;
756 u64 start;
757 u64 end;
758 struct btrfs_fs_info *info = extent_root->fs_info;
759
760 btrfs_set_stack_extent_refs(&extent_item, 1);
761 btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
762 btrfs_set_stack_extent_owner(&extent_item,
763 extent_root->root_key.objectid);
764
765 while(1) {
766 ret = find_first_extent_bit(&info->extent_ins, 0, &start,
767 &end, EXTENT_LOCKED);
768 if (ret)
769 break;
770
771 ins.objectid = start;
772 ins.offset = end + 1 - start;
773 err = btrfs_insert_item(trans, extent_root, &ins,
774 &extent_item, sizeof(extent_item));
775 clear_extent_bits(&info->extent_ins, start, end, EXTENT_LOCKED,
776 GFP_NOFS);
777 }
778 return 0;
779}
780
781static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes,
782 int pending)
783{
784 int err = 0;
785 struct extent_buffer *buf;
786
787 if (!pending) {
788 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
789 if (buf) {
790 if (btrfs_buffer_uptodate(buf)) {
791 u64 transid =
792 root->fs_info->running_transaction->transid;
793 if (btrfs_header_generation(buf) == transid) {
794 free_extent_buffer(buf);
795 return 1;
796 }
797 }
798 free_extent_buffer(buf);
799 }
800 update_pinned_extents(root, bytenr, num_bytes, 1);
801 } else {
802 set_extent_bits(&root->fs_info->pending_del,
803 bytenr, bytenr + num_bytes - 1,
804 EXTENT_LOCKED, GFP_NOFS);
805 }
806 BUG_ON(err < 0);
807 return 0;
808}
809
810/*
811 * remove an extent from the root, returns 0 on success
812 */
813static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
814 *root, u64 bytenr, u64 num_bytes, int pin,
815 int mark_free)
816{
817 struct btrfs_path *path;
818 struct btrfs_key key;
819 struct btrfs_fs_info *info = root->fs_info;
820 struct btrfs_root *extent_root = info->extent_root;
821 struct extent_buffer *leaf;
822 int ret;
823 struct btrfs_extent_item *ei;
824 u32 refs;
825
826 key.objectid = bytenr;
827 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
828 key.offset = num_bytes;
829
830 path = btrfs_alloc_path();
831 if (!path)
832 return -ENOMEM;
833
834 ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
835 if (ret < 0)
836 return ret;
837 BUG_ON(ret);
838
839 leaf = path->nodes[0];
840 ei = btrfs_item_ptr(leaf, path->slots[0],
841 struct btrfs_extent_item);
842 refs = btrfs_extent_refs(leaf, ei);
843 BUG_ON(refs == 0);
844 refs -= 1;
845 btrfs_set_extent_refs(leaf, ei, refs);
846 btrfs_mark_buffer_dirty(leaf);
847
848 if (refs == 0) {
849 u64 super_used;
850 u64 root_used;
851
852 if (pin) {
853 ret = pin_down_bytes(root, bytenr, num_bytes, 0);
854 if (ret > 0)
855 mark_free = 1;
856 BUG_ON(ret < 0);
857 }
858
859 /* block accounting for super block */
860 super_used = btrfs_super_bytes_used(&info->super_copy);
861 btrfs_set_super_bytes_used(&info->super_copy,
862 super_used - num_bytes);
863
864 /* block accounting for root item */
865 root_used = btrfs_root_used(&root->root_item);
866 btrfs_set_root_used(&root->root_item,
867 root_used - num_bytes);
868
869 ret = btrfs_del_item(trans, extent_root, path);
870 if (ret) {
871 return ret;
872 }
873 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
874 mark_free, 0);
875 BUG_ON(ret);
876 }
877 btrfs_free_path(path);
878 finish_current_insert(trans, extent_root);
879 return ret;
880}
881
882/*
883 * find all the blocks marked as pending in the radix tree and remove
884 * them from the extent map
885 */
886static int del_pending_extents(struct btrfs_trans_handle *trans, struct
887 btrfs_root *extent_root)
888{
889 int ret;
890 int err = 0;
891 u64 start;
892 u64 end;
893 struct extent_map_tree *pending_del;
894 struct extent_map_tree *pinned_extents;
895
896 pending_del = &extent_root->fs_info->pending_del;
897 pinned_extents = &extent_root->fs_info->pinned_extents;
898
899 while(1) {
900 ret = find_first_extent_bit(pending_del, 0, &start, &end,
901 EXTENT_LOCKED);
902 if (ret)
903 break;
904 update_pinned_extents(extent_root, start, end + 1 - start, 1);
905 clear_extent_bits(pending_del, start, end, EXTENT_LOCKED,
906 GFP_NOFS);
907 ret = __free_extent(trans, extent_root,
908 start, end + 1 - start, 0, 0);
909 if (ret)
910 err = ret;
911 }
912 return err;
913}
914
915/*
916 * remove an extent from the root, returns 0 on success
917 */
918int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
919 *root, u64 bytenr, u64 num_bytes, int pin)
920{
921 struct btrfs_root *extent_root = root->fs_info->extent_root;
922 int pending_ret;
923 int ret;
924
925 WARN_ON(num_bytes < root->sectorsize);
926 if (root == extent_root) {
927 pin_down_bytes(root, bytenr, num_bytes, 1);
928 return 0;
929 }
930 ret = __free_extent(trans, root, bytenr, num_bytes, pin, pin == 0);
931 pending_ret = del_pending_extents(trans, root->fs_info->extent_root);
932 return ret ? ret : pending_ret;
933}
934
935static u64 stripe_align(struct btrfs_root *root, u64 val)
936{
937 u64 mask = ((u64)root->stripesize - 1);
938 u64 ret = (val + mask) & ~mask;
939 return ret;
940}
941
942/*
943 * walks the btree of allocated extents and find a hole of a given size.
944 * The key ins is changed to record the hole:
945 * ins->objectid == block start
946 * ins->flags = BTRFS_EXTENT_ITEM_KEY
947 * ins->offset == number of blocks
948 * Any available blocks before search_start are skipped.
949 */
950static int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
951 *orig_root, u64 num_bytes, u64 empty_size,
952 u64 search_start, u64 search_end, u64 hint_byte,
953 struct btrfs_key *ins, u64 exclude_start,
954 u64 exclude_nr, int data)
955{
956 struct btrfs_path *path;
957 struct btrfs_key key;
958 u64 hole_size = 0;
959 u64 aligned;
960 int ret;
961 int slot = 0;
962 u64 last_byte = 0;
963 u64 orig_search_start = search_start;
964 int start_found;
965 struct extent_buffer *l;
966 struct btrfs_root * root = orig_root->fs_info->extent_root;
967 struct btrfs_fs_info *info = root->fs_info;
968 u64 total_needed = num_bytes;
969 int level;
970 struct btrfs_block_group_cache *block_group;
971 int full_scan = 0;
972 int wrapped = 0;
973 u64 cached_start;
974
975 WARN_ON(num_bytes < root->sectorsize);
976 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
977
978 level = btrfs_header_level(root->node);
979
980 if (num_bytes >= 32 * 1024 * 1024 && hint_byte) {
981 data = BTRFS_BLOCK_GROUP_MIXED;
982 }
983
984 if (search_end == (u64)-1)
985 search_end = btrfs_super_total_bytes(&info->super_copy);
986 if (hint_byte) {
987 block_group = btrfs_lookup_block_group(info, hint_byte);
988 block_group = btrfs_find_block_group(root, block_group,
989 hint_byte, data, 1);
990 } else {
991 block_group = btrfs_find_block_group(root,
992 trans->block_group, 0,
993 data, 1);
994 }
995
996 total_needed += empty_size;
997 path = btrfs_alloc_path();
998check_failed:
999 search_start = find_search_start(root, &block_group, search_start,
1000 total_needed, data, full_scan);
1001 search_start = stripe_align(root, search_start);
1002 cached_start = search_start;
1003 btrfs_init_path(path);
1004 ins->objectid = search_start;
1005 ins->offset = 0;
1006 start_found = 0;
1007 path->reada = 2;
1008
1009 ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
1010 if (ret < 0)
1011 goto error;
1012
1013 if (path->slots[0] > 0) {
1014 path->slots[0]--;
1015 }
1016
1017 l = path->nodes[0];
1018 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1019
1020 /*
1021 * a rare case, go back one key if we hit a block group item
1022 * instead of an extent item
1023 */
1024 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY &&
1025 key.objectid + key.offset >= search_start) {
1026 ins->objectid = key.objectid;
1027 ins->offset = key.offset - 1;
1028 btrfs_release_path(root, path);
1029 ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
1030 if (ret < 0)
1031 goto error;
1032
1033 if (path->slots[0] > 0) {
1034 path->slots[0]--;
1035 }
1036 }
1037
1038 while (1) {
1039 l = path->nodes[0];
1040 slot = path->slots[0];
1041 if (slot >= btrfs_header_nritems(l)) {
1042 ret = btrfs_next_leaf(root, path);
1043 if (ret == 0)
1044 continue;
1045 if (ret < 0)
1046 goto error;
1047
1048 search_start = max(search_start,
1049 block_group->key.objectid);
1050 if (!start_found) {
1051 aligned = stripe_align(root, search_start);
1052 ins->objectid = aligned;
1053 if (aligned >= search_end) {
1054 ret = -ENOSPC;
1055 goto error;
1056 }
1057 ins->offset = search_end - aligned;
1058 start_found = 1;
1059 goto check_pending;
1060 }
1061 ins->objectid = stripe_align(root,
1062 last_byte > search_start ?
1063 last_byte : search_start);
1064 if (search_end <= ins->objectid) {
1065 ret = -ENOSPC;
1066 goto error;
1067 }
1068 ins->offset = search_end - ins->objectid;
1069 BUG_ON(ins->objectid >= search_end);
1070 goto check_pending;
1071 }
1072 btrfs_item_key_to_cpu(l, &key, slot);
1073
1074 if (key.objectid >= search_start && key.objectid > last_byte &&
1075 start_found) {
1076 if (last_byte < search_start)
1077 last_byte = search_start;
1078 aligned = stripe_align(root, last_byte);
1079 hole_size = key.objectid - aligned;
1080 if (key.objectid > aligned && hole_size >= num_bytes) {
1081 ins->objectid = aligned;
1082 ins->offset = hole_size;
1083 goto check_pending;
1084 }
1085 }
1086 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY) {
1087 if (!start_found) {
1088 last_byte = key.objectid;
1089 start_found = 1;
1090 }
1091 goto next;
1092 }
1093
1094
1095 start_found = 1;
1096 last_byte = key.objectid + key.offset;
1097
1098 if (!full_scan && data != BTRFS_BLOCK_GROUP_MIXED &&
1099 last_byte >= block_group->key.objectid +
1100 block_group->key.offset) {
1101 btrfs_release_path(root, path);
1102 search_start = block_group->key.objectid +
1103 block_group->key.offset;
1104 goto new_group;
1105 }
1106next:
1107 path->slots[0]++;
1108 cond_resched();
1109 }
1110check_pending:
1111 /* we have to make sure we didn't find an extent that has already
1112 * been allocated by the map tree or the original allocation
1113 */
1114 btrfs_release_path(root, path);
1115 BUG_ON(ins->objectid < search_start);
1116
1117 if (ins->objectid + num_bytes >= search_end)
1118 goto enospc;
1119 if (!full_scan && data != BTRFS_BLOCK_GROUP_MIXED &&
1120 ins->objectid + num_bytes > block_group->
1121 key.objectid + block_group->key.offset) {
1122 search_start = block_group->key.objectid +
1123 block_group->key.offset;
1124 goto new_group;
1125 }
1126 if (test_range_bit(&info->extent_ins, ins->objectid,
1127 ins->objectid + num_bytes -1, EXTENT_LOCKED, 0)) {
1128 search_start = ins->objectid + num_bytes;
1129 goto new_group;
1130 }
1131 if (test_range_bit(&info->pinned_extents, ins->objectid,
1132 ins->objectid + num_bytes -1, EXTENT_DIRTY, 0)) {
1133 search_start = ins->objectid + num_bytes;
1134 goto new_group;
1135 }
1136 if (exclude_nr > 0 && (ins->objectid + num_bytes > exclude_start &&
1137 ins->objectid < exclude_start + exclude_nr)) {
1138 search_start = exclude_start + exclude_nr;
1139 goto new_group;
1140 }
1141 if (!data) {
1142 block_group = btrfs_lookup_block_group(info, ins->objectid);
1143 if (block_group)
1144 trans->block_group = block_group;
1145 }
1146 ins->offset = num_bytes;
1147 btrfs_free_path(path);
1148 return 0;
1149
1150new_group:
1151 if (search_start + num_bytes >= search_end) {
1152enospc:
1153 search_start = orig_search_start;
1154 if (full_scan) {
1155 ret = -ENOSPC;
1156 goto error;
1157 }
1158 if (wrapped) {
1159 if (!full_scan)
1160 total_needed -= empty_size;
1161 full_scan = 1;
1162 } else
1163 wrapped = 1;
1164 }
1165 block_group = btrfs_lookup_block_group(info, search_start);
1166 cond_resched();
1167 if (!full_scan)
1168 block_group = btrfs_find_block_group(root, block_group,
1169 search_start, data, 0);
1170 goto check_failed;
1171
1172error:
1173 btrfs_release_path(root, path);
1174 btrfs_free_path(path);
1175 return ret;
1176}
1177/*
1178 * finds a free extent and does all the dirty work required for allocation
1179 * returns the key for the extent through ins, and a tree buffer for
1180 * the first block of the extent through buf.
1181 *
1182 * returns 0 if everything worked, non-zero otherwise.
1183 */
1184int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
1185 struct btrfs_root *root, u64 owner,
1186 u64 num_bytes, u64 empty_size, u64 hint_byte,
1187 u64 search_end, struct btrfs_key *ins, int data)
1188{
1189 int ret;
1190 int pending_ret;
1191 u64 super_used, root_used;
1192 u64 search_start = 0;
1193 struct btrfs_fs_info *info = root->fs_info;
1194 struct btrfs_root *extent_root = info->extent_root;
1195 struct btrfs_extent_item extent_item;
1196
1197 btrfs_set_stack_extent_refs(&extent_item, 1);
1198 btrfs_set_stack_extent_owner(&extent_item, owner);
1199
1200 WARN_ON(num_bytes < root->sectorsize);
1201 ret = find_free_extent(trans, root, num_bytes, empty_size,
1202 search_start, search_end, hint_byte, ins,
1203 trans->alloc_exclude_start,
1204 trans->alloc_exclude_nr, data);
1205 BUG_ON(ret);
1206 if (ret)
1207 return ret;
1208
1209 /* block accounting for super block */
1210 super_used = btrfs_super_bytes_used(&info->super_copy);
1211 btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
1212
1213 /* block accounting for root item */
1214 root_used = btrfs_root_used(&root->root_item);
1215 btrfs_set_root_used(&root->root_item, root_used + num_bytes);
1216
1217 clear_extent_dirty(&root->fs_info->free_space_cache,
1218 ins->objectid, ins->objectid + ins->offset - 1,
1219 GFP_NOFS);
1220
1221 if (root == extent_root) {
1222 set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
1223 ins->objectid + ins->offset - 1,
1224 EXTENT_LOCKED, GFP_NOFS);
1225 WARN_ON(data == 1);
1226 goto update_block;
1227 }
1228
1229 WARN_ON(trans->alloc_exclude_nr);
1230 trans->alloc_exclude_start = ins->objectid;
1231 trans->alloc_exclude_nr = ins->offset;
1232 ret = btrfs_insert_item(trans, extent_root, ins, &extent_item,
1233 sizeof(extent_item));
1234
1235 trans->alloc_exclude_start = 0;
1236 trans->alloc_exclude_nr = 0;
1237
1238 BUG_ON(ret);
1239 finish_current_insert(trans, extent_root);
1240 pending_ret = del_pending_extents(trans, extent_root);
1241
1242 if (ret) {
1243 return ret;
1244 }
1245 if (pending_ret) {
1246 return pending_ret;
1247 }
1248
1249update_block:
1250 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0,
1251 data);
1252 BUG_ON(ret);
1253 return 0;
1254}
1255
1256/*
1257 * helper function to allocate a block for a given tree
1258 * returns the tree buffer or NULL.
1259 */
1260struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
1261 struct btrfs_root *root,
1262 u32 blocksize, u64 hint,
1263 u64 empty_size)
1264{
1265 struct btrfs_key ins;
1266 int ret;
1267 struct extent_buffer *buf;
1268
1269 ret = btrfs_alloc_extent(trans, root, root->root_key.objectid,
1270 blocksize, empty_size, hint,
1271 (u64)-1, &ins, 0);
1272 if (ret) {
1273 BUG_ON(ret > 0);
1274 return ERR_PTR(ret);
1275 }
1276 buf = btrfs_find_create_tree_block(root, ins.objectid, blocksize);
1277 if (!buf) {
1278 btrfs_free_extent(trans, root, ins.objectid, blocksize, 0);
1279 return ERR_PTR(-ENOMEM);
1280 }
1281 btrfs_set_buffer_uptodate(buf);
1282 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
1283 buf->start + buf->len - 1, GFP_NOFS);
1284 set_extent_bits(&BTRFS_I(root->fs_info->btree_inode)->extent_tree,
1285 buf->start, buf->start + buf->len - 1,
1286 EXTENT_CSUM, GFP_NOFS);
1287 buf->flags |= EXTENT_CSUM;
1288 btrfs_set_buffer_defrag(buf);
1289 trans->blocks_used++;
1290 return buf;
1291}
1292
1293static int drop_leaf_ref(struct btrfs_trans_handle *trans,
1294 struct btrfs_root *root, struct extent_buffer *leaf)
1295{
1296 struct btrfs_key key;
1297 struct btrfs_file_extent_item *fi;
1298 int i;
1299 int nritems;
1300 int ret;
1301
1302 BUG_ON(!btrfs_is_leaf(leaf));
1303 nritems = btrfs_header_nritems(leaf);
1304 for (i = 0; i < nritems; i++) {
1305 u64 disk_bytenr;
1306
1307 btrfs_item_key_to_cpu(leaf, &key, i);
1308 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1309 continue;
1310 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1311 if (btrfs_file_extent_type(leaf, fi) ==
1312 BTRFS_FILE_EXTENT_INLINE)
1313 continue;
1314 /*
1315 * FIXME make sure to insert a trans record that
1316 * repeats the snapshot del on crash
1317 */
1318 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1319 if (disk_bytenr == 0)
1320 continue;
1321 ret = btrfs_free_extent(trans, root, disk_bytenr,
1322 btrfs_file_extent_disk_num_bytes(leaf, fi), 0);
1323 BUG_ON(ret);
1324 }
1325 return 0;
1326}
1327
1328static void reada_walk_down(struct btrfs_root *root,
1329 struct extent_buffer *node)
1330{
1331 int i;
1332 u32 nritems;
1333 u64 bytenr;
1334 int ret;
1335 u32 refs;
1336 int level;
1337 u32 blocksize;
1338
1339 nritems = btrfs_header_nritems(node);
1340 level = btrfs_header_level(node);
1341 for (i = 0; i < nritems; i++) {
1342 bytenr = btrfs_node_blockptr(node, i);
1343 blocksize = btrfs_level_size(root, level - 1);
1344 ret = lookup_extent_ref(NULL, root, bytenr, blocksize, &refs);
1345 BUG_ON(ret);
1346 if (refs != 1)
1347 continue;
1348 mutex_unlock(&root->fs_info->fs_mutex);
1349 ret = readahead_tree_block(root, bytenr, blocksize);
1350 cond_resched();
1351 mutex_lock(&root->fs_info->fs_mutex);
1352 if (ret)
1353 break;
1354 }
1355}
1356
1357/*
1358 * helper function for drop_snapshot, this walks down the tree dropping ref
1359 * counts as it goes.
1360 */
1361static int walk_down_tree(struct btrfs_trans_handle *trans, struct btrfs_root
1362 *root, struct btrfs_path *path, int *level)
1363{
1364 struct extent_buffer *next;
1365 struct extent_buffer *cur;
1366 u64 bytenr;
1367 u32 blocksize;
1368 int ret;
1369 u32 refs;
1370
1371 WARN_ON(*level < 0);
1372 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1373 ret = lookup_extent_ref(trans, root,
1374 path->nodes[*level]->start,
1375 path->nodes[*level]->len, &refs);
1376 BUG_ON(ret);
1377 if (refs > 1)
1378 goto out;
1379
1380 /*
1381 * walk down to the last node level and free all the leaves
1382 */
1383 while(*level >= 0) {
1384 WARN_ON(*level < 0);
1385 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1386 cur = path->nodes[*level];
1387
1388 if (*level > 0 && path->slots[*level] == 0)
1389 reada_walk_down(root, cur);
1390
1391 if (btrfs_header_level(cur) != *level)
1392 WARN_ON(1);
1393
1394 if (path->slots[*level] >=
1395 btrfs_header_nritems(cur))
1396 break;
1397 if (*level == 0) {
1398 ret = drop_leaf_ref(trans, root, cur);
1399 BUG_ON(ret);
1400 break;
1401 }
1402 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
1403 blocksize = btrfs_level_size(root, *level - 1);
1404 ret = lookup_extent_ref(trans, root, bytenr, blocksize, &refs);
1405 BUG_ON(ret);
1406 if (refs != 1) {
1407 path->slots[*level]++;
1408 ret = btrfs_free_extent(trans, root, bytenr,
1409 blocksize, 1);
1410 BUG_ON(ret);
1411 continue;
1412 }
1413 next = btrfs_find_tree_block(root, bytenr, blocksize);
1414 if (!next || !btrfs_buffer_uptodate(next)) {
1415 free_extent_buffer(next);
1416 mutex_unlock(&root->fs_info->fs_mutex);
1417 next = read_tree_block(root, bytenr, blocksize);
1418 mutex_lock(&root->fs_info->fs_mutex);
1419
1420 /* we dropped the lock, check one more time */
1421 ret = lookup_extent_ref(trans, root, bytenr,
1422 blocksize, &refs);
1423 BUG_ON(ret);
1424 if (refs != 1) {
1425 path->slots[*level]++;
1426 free_extent_buffer(next);
1427 ret = btrfs_free_extent(trans, root,
1428 bytenr, blocksize, 1);
1429 BUG_ON(ret);
1430 continue;
1431 }
1432 }
1433 WARN_ON(*level <= 0);
1434 if (path->nodes[*level-1])
1435 free_extent_buffer(path->nodes[*level-1]);
1436 path->nodes[*level-1] = next;
1437 *level = btrfs_header_level(next);
1438 path->slots[*level] = 0;
1439 }
1440out:
1441 WARN_ON(*level < 0);
1442 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1443 ret = btrfs_free_extent(trans, root, path->nodes[*level]->start,
1444 path->nodes[*level]->len, 1);
1445 free_extent_buffer(path->nodes[*level]);
1446 path->nodes[*level] = NULL;
1447 *level += 1;
1448 BUG_ON(ret);
1449 return 0;
1450}
1451
1452/*
1453 * helper for dropping snapshots. This walks back up the tree in the path
1454 * to find the first node higher up where we haven't yet gone through
1455 * all the slots
1456 */
1457static int walk_up_tree(struct btrfs_trans_handle *trans, struct btrfs_root
1458 *root, struct btrfs_path *path, int *level)
1459{
1460 int i;
1461 int slot;
1462 int ret;
1463 struct btrfs_root_item *root_item = &root->root_item;
1464
1465 for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
1466 slot = path->slots[i];
1467 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
1468 struct extent_buffer *node;
1469 struct btrfs_disk_key disk_key;
1470 node = path->nodes[i];
1471 path->slots[i]++;
1472 *level = i;
1473 WARN_ON(*level == 0);
1474 btrfs_node_key(node, &disk_key, path->slots[i]);
1475 memcpy(&root_item->drop_progress,
1476 &disk_key, sizeof(disk_key));
1477 root_item->drop_level = i;
1478 return 0;
1479 } else {
1480 ret = btrfs_free_extent(trans, root,
1481 path->nodes[*level]->start,
1482 path->nodes[*level]->len, 1);
1483 BUG_ON(ret);
1484 free_extent_buffer(path->nodes[*level]);
1485 path->nodes[*level] = NULL;
1486 *level = i + 1;
1487 }
1488 }
1489 return 1;
1490}
1491
1492/*
1493 * drop the reference count on the tree rooted at 'snap'. This traverses
1494 * the tree freeing any blocks that have a ref count of zero after being
1495 * decremented.
1496 */
1497int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
1498 *root)
1499{
1500 int ret = 0;
1501 int wret;
1502 int level;
1503 struct btrfs_path *path;
1504 int i;
1505 int orig_level;
1506 struct btrfs_root_item *root_item = &root->root_item;
1507
1508 path = btrfs_alloc_path();
1509 BUG_ON(!path);
1510
1511 level = btrfs_header_level(root->node);
1512 orig_level = level;
1513 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
1514 path->nodes[level] = root->node;
1515 extent_buffer_get(root->node);
1516 path->slots[level] = 0;
1517 } else {
1518 struct btrfs_key key;
1519 struct btrfs_disk_key found_key;
1520 struct extent_buffer *node;
1521
1522 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
1523 level = root_item->drop_level;
1524 path->lowest_level = level;
1525 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1526 if (wret < 0) {
1527 ret = wret;
1528 goto out;
1529 }
1530 node = path->nodes[level];
1531 btrfs_node_key(node, &found_key, path->slots[level]);
1532 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
1533 sizeof(found_key)));
1534 }
1535 while(1) {
1536 wret = walk_down_tree(trans, root, path, &level);
1537 if (wret > 0)
1538 break;
1539 if (wret < 0)
1540 ret = wret;
1541
1542 wret = walk_up_tree(trans, root, path, &level);
1543 if (wret > 0)
1544 break;
1545 if (wret < 0)
1546 ret = wret;
1547 ret = -EAGAIN;
1548 break;
1549 }
1550 for (i = 0; i <= orig_level; i++) {
1551 if (path->nodes[i]) {
1552 free_extent_buffer(path->nodes[i]);
1553 path->nodes[i] = NULL;
1554 }
1555 }
1556out:
1557 btrfs_free_path(path);
1558 return ret;
1559}
1560
1561int btrfs_free_block_groups(struct btrfs_fs_info *info)
1562{
1563 u64 start;
1564 u64 end;
1565 u64 ptr;
1566 int ret;
1567 while(1) {
1568 ret = find_first_extent_bit(&info->block_group_cache, 0,
1569 &start, &end, (unsigned int)-1);
1570 if (ret)
1571 break;
1572 ret = get_state_private(&info->block_group_cache, start, &ptr);
1573 if (!ret)
1574 kfree((void *)(unsigned long)ptr);
1575 clear_extent_bits(&info->block_group_cache, start,
1576 end, (unsigned int)-1, GFP_NOFS);
1577 }
1578 while(1) {
1579 ret = find_first_extent_bit(&info->free_space_cache, 0,
1580 &start, &end, EXTENT_DIRTY);
1581 if (ret)
1582 break;
1583 clear_extent_dirty(&info->free_space_cache, start,
1584 end, GFP_NOFS);
1585 }
1586 return 0;
1587}
1588
1589int btrfs_read_block_groups(struct btrfs_root *root)
1590{
1591 struct btrfs_path *path;
1592 int ret;
1593 int err = 0;
1594 int bit;
1595 struct btrfs_block_group_cache *cache;
1596 struct btrfs_fs_info *info = root->fs_info;
1597 struct extent_map_tree *block_group_cache;
1598 struct btrfs_key key;
1599 struct btrfs_key found_key;
1600 struct extent_buffer *leaf;
1601
1602 block_group_cache = &info->block_group_cache;
1603
1604 root = info->extent_root;
1605 key.objectid = 0;
1606 key.offset = BTRFS_BLOCK_GROUP_SIZE;
1607 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
1608
1609 path = btrfs_alloc_path();
1610 if (!path)
1611 return -ENOMEM;
1612
1613 while(1) {
1614 ret = btrfs_search_slot(NULL, info->extent_root,
1615 &key, path, 0, 0);
1616 if (ret != 0) {
1617 err = ret;
1618 break;
1619 }
1620 leaf = path->nodes[0];
1621 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1622 cache = kmalloc(sizeof(*cache), GFP_NOFS);
1623 if (!cache) {
1624 err = -1;
1625 break;
1626 }
1627
1628 read_extent_buffer(leaf, &cache->item,
1629 btrfs_item_ptr_offset(leaf, path->slots[0]),
1630 sizeof(cache->item));
1631 memcpy(&cache->key, &found_key, sizeof(found_key));
1632 cache->cached = 0;
1633 cache->pinned = 0;
1634 key.objectid = found_key.objectid + found_key.offset;
1635 btrfs_release_path(root, path);
1636
1637 if (cache->item.flags & BTRFS_BLOCK_GROUP_MIXED) {
1638 bit = BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA;
1639 cache->data = BTRFS_BLOCK_GROUP_MIXED;
1640 } else if (cache->item.flags & BTRFS_BLOCK_GROUP_DATA) {
1641 bit = BLOCK_GROUP_DATA;
1642 cache->data = BTRFS_BLOCK_GROUP_DATA;
1643 } else {
1644 bit = BLOCK_GROUP_METADATA;
1645 cache->data = 0;
1646 }
1647
1648 /* use EXTENT_LOCKED to prevent merging */
1649 set_extent_bits(block_group_cache, found_key.objectid,
1650 found_key.objectid + found_key.offset - 1,
1651 bit | EXTENT_LOCKED, GFP_NOFS);
1652 set_state_private(block_group_cache, found_key.objectid,
1653 (unsigned long)cache);
1654
1655 if (key.objectid >=
1656 btrfs_super_total_bytes(&info->super_copy))
1657 break;
1658 }
1659
1660 btrfs_free_path(path);
1661 return 0;
1662}