Btrfs: Add back pointers from extents to the btree or file referencing them
[linux-2.6-block.git] / fs / btrfs / extent-tree.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include <linux/crc32c.h>
21#include "hash.h"
22#include "ctree.h"
23#include "disk-io.h"
24#include "print-tree.h"
25#include "transaction.h"
26
27#define BLOCK_GROUP_DATA EXTENT_WRITEBACK
28#define BLOCK_GROUP_METADATA EXTENT_UPTODATE
29#define BLOCK_GROUP_DIRTY EXTENT_DIRTY
30
31static int finish_current_insert(struct btrfs_trans_handle *trans, struct
32 btrfs_root *extent_root);
33static int del_pending_extents(struct btrfs_trans_handle *trans, struct
34 btrfs_root *extent_root);
35
36static int cache_block_group(struct btrfs_root *root,
37 struct btrfs_block_group_cache *block_group)
38{
39 struct btrfs_path *path;
40 int ret;
41 struct btrfs_key key;
42 struct extent_buffer *leaf;
43 struct extent_map_tree *free_space_cache;
44 int slot;
45 u64 last = 0;
46 u64 hole_size;
47 u64 first_free;
48 int found = 0;
49
50 if (!block_group)
51 return 0;
52
53 root = root->fs_info->extent_root;
54 free_space_cache = &root->fs_info->free_space_cache;
55
56 if (block_group->cached)
57 return 0;
58
59 path = btrfs_alloc_path();
60 if (!path)
61 return -ENOMEM;
62
63 path->reada = 2;
64 first_free = block_group->key.objectid;
65 key.objectid = block_group->key.objectid;
66 key.offset = 0;
67
68 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
69 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
70
71 if (ret < 0)
72 return ret;
73
74 if (ret && path->slots[0] > 0)
75 path->slots[0]--;
76
77 while(1) {
78 leaf = path->nodes[0];
79 slot = path->slots[0];
80 if (slot >= btrfs_header_nritems(leaf)) {
81 ret = btrfs_next_leaf(root, path);
82 if (ret < 0)
83 goto err;
84 if (ret == 0) {
85 continue;
86 } else {
87 break;
88 }
89 }
90
91 btrfs_item_key_to_cpu(leaf, &key, slot);
92 if (key.objectid < block_group->key.objectid) {
93 if (btrfs_key_type(&key) != BTRFS_EXTENT_REF_KEY &&
94 key.objectid + key.offset > first_free)
95 first_free = key.objectid + key.offset;
96 goto next;
97 }
98
99 if (key.objectid >= block_group->key.objectid +
100 block_group->key.offset) {
101 break;
102 }
103
104 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
105 if (!found) {
106 last = first_free;
107 found = 1;
108 }
109 if (key.objectid > last) {
110 hole_size = key.objectid - last;
111 set_extent_dirty(free_space_cache, last,
112 last + hole_size - 1,
113 GFP_NOFS);
114 }
115 last = key.objectid + key.offset;
116 }
117next:
118 path->slots[0]++;
119 }
120
121 if (!found)
122 last = first_free;
123 if (block_group->key.objectid +
124 block_group->key.offset > last) {
125 hole_size = block_group->key.objectid +
126 block_group->key.offset - last;
127 set_extent_dirty(free_space_cache, last,
128 last + hole_size - 1, GFP_NOFS);
129 }
130 block_group->cached = 1;
131err:
132 btrfs_free_path(path);
133 return 0;
134}
135
136struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
137 btrfs_fs_info *info,
138 u64 bytenr)
139{
140 struct extent_map_tree *block_group_cache;
141 struct btrfs_block_group_cache *block_group = NULL;
142 u64 ptr;
143 u64 start;
144 u64 end;
145 int ret;
146
147 block_group_cache = &info->block_group_cache;
148 ret = find_first_extent_bit(block_group_cache,
149 bytenr, &start, &end,
150 BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA);
151 if (ret) {
152 return NULL;
153 }
154 ret = get_state_private(block_group_cache, start, &ptr);
155 if (ret)
156 return NULL;
157
158 block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
159 if (block_group->key.objectid <= bytenr && bytenr <
160 block_group->key.objectid + block_group->key.offset)
161 return block_group;
162 return NULL;
163}
164static u64 find_search_start(struct btrfs_root *root,
165 struct btrfs_block_group_cache **cache_ret,
166 u64 search_start, int num,
167 int data, int full_scan)
168{
169 int ret;
170 struct btrfs_block_group_cache *cache = *cache_ret;
171 u64 last;
172 u64 start = 0;
173 u64 end = 0;
174 u64 cache_miss = 0;
175 int wrapped = 0;
176
177 if (!cache) {
178 goto out;
179 }
180again:
181 ret = cache_block_group(root, cache);
182 if (ret)
183 goto out;
184
185 last = max(search_start, cache->key.objectid);
186
187 while(1) {
188 ret = find_first_extent_bit(&root->fs_info->free_space_cache,
189 last, &start, &end, EXTENT_DIRTY);
190 if (ret) {
191 if (!cache_miss)
192 cache_miss = last;
193 goto new_group;
194 }
195
196 start = max(last, start);
197 last = end + 1;
198 if (last - start < num) {
199 if (last == cache->key.objectid + cache->key.offset)
200 cache_miss = start;
201 continue;
202 }
203 if (data != BTRFS_BLOCK_GROUP_MIXED &&
204 start + num > cache->key.objectid + cache->key.offset)
205 goto new_group;
206 return start;
207 }
208out:
209 cache = btrfs_lookup_block_group(root->fs_info, search_start);
210 if (!cache) {
211 printk("Unable to find block group for %Lu\n",
212 search_start);
213 WARN_ON(1);
214 return search_start;
215 }
216 return search_start;
217
218new_group:
219 last = cache->key.objectid + cache->key.offset;
220wrapped:
221 cache = btrfs_lookup_block_group(root->fs_info, last);
222 if (!cache) {
223no_cache:
224 if (!wrapped) {
225 wrapped = 1;
226 last = search_start;
227 data = BTRFS_BLOCK_GROUP_MIXED;
228 goto wrapped;
229 }
230 goto out;
231 }
232 if (cache_miss && !cache->cached) {
233 cache_block_group(root, cache);
234 last = cache_miss;
235 cache = btrfs_lookup_block_group(root->fs_info, last);
236 }
237 cache = btrfs_find_block_group(root, cache, last, data, 0);
238 if (!cache)
239 goto no_cache;
240 *cache_ret = cache;
241 cache_miss = 0;
242 goto again;
243}
244
245static u64 div_factor(u64 num, int factor)
246{
247 if (factor == 10)
248 return num;
249 num *= factor;
250 do_div(num, 10);
251 return num;
252}
253
254struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
255 struct btrfs_block_group_cache
256 *hint, u64 search_start,
257 int data, int owner)
258{
259 struct btrfs_block_group_cache *cache;
260 struct extent_map_tree *block_group_cache;
261 struct btrfs_block_group_cache *found_group = NULL;
262 struct btrfs_fs_info *info = root->fs_info;
263 u64 used;
264 u64 last = 0;
265 u64 hint_last;
266 u64 start;
267 u64 end;
268 u64 free_check;
269 u64 ptr;
270 int bit;
271 int ret;
272 int full_search = 0;
273 int factor = 8;
274 int data_swap = 0;
275
276 block_group_cache = &info->block_group_cache;
277
278 if (!owner)
279 factor = 8;
280
281 if (data == BTRFS_BLOCK_GROUP_MIXED) {
282 bit = BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA;
283 factor = 10;
284 } else if (data)
285 bit = BLOCK_GROUP_DATA;
286 else
287 bit = BLOCK_GROUP_METADATA;
288
289 if (search_start) {
290 struct btrfs_block_group_cache *shint;
291 shint = btrfs_lookup_block_group(info, search_start);
292 if (shint && (shint->data == data ||
293 shint->data == BTRFS_BLOCK_GROUP_MIXED)) {
294 used = btrfs_block_group_used(&shint->item);
295 if (used + shint->pinned <
296 div_factor(shint->key.offset, factor)) {
297 return shint;
298 }
299 }
300 }
301 if (hint && (hint->data == data ||
302 hint->data == BTRFS_BLOCK_GROUP_MIXED)) {
303 used = btrfs_block_group_used(&hint->item);
304 if (used + hint->pinned <
305 div_factor(hint->key.offset, factor)) {
306 return hint;
307 }
308 last = hint->key.objectid + hint->key.offset;
309 hint_last = last;
310 } else {
311 if (hint)
312 hint_last = max(hint->key.objectid, search_start);
313 else
314 hint_last = search_start;
315
316 last = hint_last;
317 }
318again:
319 while(1) {
320 ret = find_first_extent_bit(block_group_cache, last,
321 &start, &end, bit);
322 if (ret)
323 break;
324
325 ret = get_state_private(block_group_cache, start, &ptr);
326 if (ret)
327 break;
328
329 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
330 last = cache->key.objectid + cache->key.offset;
331 used = btrfs_block_group_used(&cache->item);
332
333 if (full_search)
334 free_check = cache->key.offset;
335 else
336 free_check = div_factor(cache->key.offset, factor);
337 if (used + cache->pinned < free_check) {
338 found_group = cache;
339 goto found;
340 }
341 cond_resched();
342 }
343 if (!full_search) {
344 last = search_start;
345 full_search = 1;
346 goto again;
347 }
348 if (!data_swap) {
349 data_swap = 1;
350 bit = BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA;
351 last = search_start;
352 goto again;
353 }
354found:
355 return found_group;
356}
357
358static u64 hash_extent_ref(u64 root_objectid, u64 ref_generation,
359 u64 owner, u64 owner_offset)
360{
361 u32 high_crc = ~(u32)0;
362 u32 low_crc = ~(u32)0;
363 __le64 lenum;
364
365 lenum = cpu_to_le64(root_objectid);
366 high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
367 lenum = cpu_to_le64(ref_generation);
368 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
369
370#if 0
371 lenum = cpu_to_le64(owner);
372 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
373 lenum = cpu_to_le64(owner_offset);
374 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
375#endif
376 return ((u64)high_crc << 32) | (u64)low_crc;
377}
378
379static int match_extent_ref(struct extent_buffer *leaf,
380 struct btrfs_extent_ref *disk_ref,
381 struct btrfs_extent_ref *cpu_ref)
382{
383 int ret;
384 int len;
385
386 if (cpu_ref->objectid)
387 len = sizeof(*cpu_ref);
388 else
389 len = 2 * sizeof(u64);
390 ret = memcmp_extent_buffer(leaf, cpu_ref, (unsigned long)disk_ref,
391 len);
392 return ret == 0;
393}
394
395static int lookup_extent_backref(struct btrfs_trans_handle *trans,
396 struct btrfs_root *root,
397 struct btrfs_path *path, u64 bytenr,
398 u64 root_objectid, u64 ref_generation,
399 u64 owner, u64 owner_offset, int del)
400{
401 u64 hash;
402 struct btrfs_key key;
403 struct btrfs_key found_key;
404 struct btrfs_extent_ref ref;
405 struct extent_buffer *leaf;
406 struct btrfs_extent_ref *disk_ref;
407 int ret;
408 int ret2;
409
410 btrfs_set_stack_ref_root(&ref, root_objectid);
411 btrfs_set_stack_ref_generation(&ref, ref_generation);
412 btrfs_set_stack_ref_objectid(&ref, owner);
413 btrfs_set_stack_ref_offset(&ref, owner_offset);
414
415 hash = hash_extent_ref(root_objectid, ref_generation, owner,
416 owner_offset);
417 key.offset = hash;
418 key.objectid = bytenr;
419 key.type = BTRFS_EXTENT_REF_KEY;
420
421 while (1) {
422 ret = btrfs_search_slot(trans, root, &key, path,
423 del ? -1 : 0, del);
424 if (ret < 0)
425 goto out;
426 leaf = path->nodes[0];
427 if (ret != 0) {
428 u32 nritems = btrfs_header_nritems(leaf);
429 if (path->slots[0] >= nritems) {
430 ret2 = btrfs_next_leaf(root, path);
431 if (ret2)
432 goto out;
433 leaf = path->nodes[0];
434 }
435 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
436 if (found_key.objectid != bytenr ||
437 found_key.type != BTRFS_EXTENT_REF_KEY)
438 goto out;
439 key.offset = found_key.offset;
440 if (del) {
441 btrfs_release_path(root, path);
442 continue;
443 }
444 }
445 disk_ref = btrfs_item_ptr(path->nodes[0],
446 path->slots[0],
447 struct btrfs_extent_ref);
448 if (match_extent_ref(path->nodes[0], disk_ref, &ref)) {
449 ret = 0;
450 goto out;
451 }
452 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
453 key.offset = found_key.offset + 1;
454 btrfs_release_path(root, path);
455 }
456out:
457 return ret;
458}
459
460int btrfs_insert_extent_backref(struct btrfs_trans_handle *trans,
461 struct btrfs_root *root,
462 struct btrfs_path *path, u64 bytenr,
463 u64 root_objectid, u64 ref_generation,
464 u64 owner, u64 owner_offset)
465{
466 u64 hash;
467 struct btrfs_key key;
468 struct btrfs_extent_ref ref;
469 struct btrfs_extent_ref *disk_ref;
470 int ret;
471
472 btrfs_set_stack_ref_root(&ref, root_objectid);
473 btrfs_set_stack_ref_generation(&ref, ref_generation);
474 btrfs_set_stack_ref_objectid(&ref, owner);
475 btrfs_set_stack_ref_offset(&ref, owner_offset);
476
477 hash = hash_extent_ref(root_objectid, ref_generation, owner,
478 owner_offset);
479 key.offset = hash;
480 key.objectid = bytenr;
481 key.type = BTRFS_EXTENT_REF_KEY;
482
483 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(ref));
484 while (ret == -EEXIST) {
485 disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
486 struct btrfs_extent_ref);
487 if (match_extent_ref(path->nodes[0], disk_ref, &ref))
488 goto out;
489 key.offset++;
490 btrfs_release_path(root, path);
491 ret = btrfs_insert_empty_item(trans, root, path, &key,
492 sizeof(ref));
493 }
494 if (ret)
495 goto out;
496 disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
497 struct btrfs_extent_ref);
498 write_extent_buffer(path->nodes[0], &ref, (unsigned long)disk_ref,
499 sizeof(ref));
500 btrfs_mark_buffer_dirty(path->nodes[0]);
501out:
502 btrfs_release_path(root, path);
503 return ret;
504}
505
506int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
507 struct btrfs_root *root,
508 u64 bytenr, u64 num_bytes,
509 u64 root_objectid, u64 ref_generation,
510 u64 owner, u64 owner_offset)
511{
512 struct btrfs_path *path;
513 int ret;
514 struct btrfs_key key;
515 struct extent_buffer *l;
516 struct btrfs_extent_item *item;
517 u32 refs;
518
519 WARN_ON(num_bytes < root->sectorsize);
520 path = btrfs_alloc_path();
521 if (!path)
522 return -ENOMEM;
523
524 key.objectid = bytenr;
525 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
526 key.offset = num_bytes;
527 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
528 0, 1);
529 if (ret < 0)
530 return ret;
531 if (ret != 0) {
532 BUG();
533 }
534 BUG_ON(ret != 0);
535 l = path->nodes[0];
536 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
537 refs = btrfs_extent_refs(l, item);
538 btrfs_set_extent_refs(l, item, refs + 1);
539 btrfs_mark_buffer_dirty(path->nodes[0]);
540
541 btrfs_release_path(root->fs_info->extent_root, path);
542
543 ret = btrfs_insert_extent_backref(trans, root->fs_info->extent_root,
544 path, bytenr, root_objectid,
545 ref_generation, owner, owner_offset);
546 BUG_ON(ret);
547 finish_current_insert(trans, root->fs_info->extent_root);
548 del_pending_extents(trans, root->fs_info->extent_root);
549
550 btrfs_free_path(path);
551 return 0;
552}
553
554int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
555 struct btrfs_root *root)
556{
557 finish_current_insert(trans, root->fs_info->extent_root);
558 del_pending_extents(trans, root->fs_info->extent_root);
559 return 0;
560}
561
562static int lookup_extent_ref(struct btrfs_trans_handle *trans,
563 struct btrfs_root *root, u64 bytenr,
564 u64 num_bytes, u32 *refs)
565{
566 struct btrfs_path *path;
567 int ret;
568 struct btrfs_key key;
569 struct extent_buffer *l;
570 struct btrfs_extent_item *item;
571
572 WARN_ON(num_bytes < root->sectorsize);
573 path = btrfs_alloc_path();
574 key.objectid = bytenr;
575 key.offset = num_bytes;
576 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
577 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
578 0, 0);
579 if (ret < 0)
580 goto out;
581 if (ret != 0) {
582 btrfs_print_leaf(root, path->nodes[0]);
583 printk("failed to find block number %Lu\n", bytenr);
584 BUG();
585 }
586 l = path->nodes[0];
587 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
588 *refs = btrfs_extent_refs(l, item);
589out:
590 btrfs_free_path(path);
591 return 0;
592}
593
594int btrfs_inc_root_ref(struct btrfs_trans_handle *trans,
595 struct btrfs_root *root, u64 owner_objectid)
596{
597 u64 generation;
598 u64 key_objectid;
599 u64 level;
600 u32 nritems;
601 struct btrfs_disk_key disk_key;
602
603 level = btrfs_header_level(root->node);
604 generation = trans->transid;
605 nritems = btrfs_header_nritems(root->node);
606 if (nritems > 0) {
607 if (level == 0)
608 btrfs_item_key(root->node, &disk_key, 0);
609 else
610 btrfs_node_key(root->node, &disk_key, 0);
611 key_objectid = btrfs_disk_key_objectid(&disk_key);
612 } else {
613 key_objectid = 0;
614 }
615 return btrfs_inc_extent_ref(trans, root, root->node->start,
616 root->node->len, owner_objectid,
617 generation, 0, 0);
618}
619
620int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
621 struct extent_buffer *buf)
622{
623 u64 bytenr;
624 u32 nritems;
625 struct btrfs_key key;
626 struct btrfs_file_extent_item *fi;
627 int i;
628 int level;
629 int ret;
630 int faili;
631
632 if (!root->ref_cows)
633 return 0;
634
635 level = btrfs_header_level(buf);
636 nritems = btrfs_header_nritems(buf);
637 for (i = 0; i < nritems; i++) {
638 if (level == 0) {
639 u64 disk_bytenr;
640 btrfs_item_key_to_cpu(buf, &key, i);
641 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
642 continue;
643 fi = btrfs_item_ptr(buf, i,
644 struct btrfs_file_extent_item);
645 if (btrfs_file_extent_type(buf, fi) ==
646 BTRFS_FILE_EXTENT_INLINE)
647 continue;
648 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
649 if (disk_bytenr == 0)
650 continue;
651 ret = btrfs_inc_extent_ref(trans, root, disk_bytenr,
652 btrfs_file_extent_disk_num_bytes(buf, fi),
653 root->root_key.objectid, trans->transid,
654 key.objectid, key.offset);
655 if (ret) {
656 faili = i;
657 goto fail;
658 }
659 } else {
660 bytenr = btrfs_node_blockptr(buf, i);
661 ret = btrfs_inc_extent_ref(trans, root, bytenr,
662 btrfs_level_size(root, level - 1),
663 root->root_key.objectid,
664 trans->transid, 0, 0);
665 if (ret) {
666 faili = i;
667 goto fail;
668 }
669 }
670 }
671 return 0;
672fail:
673 WARN_ON(1);
674#if 0
675 for (i =0; i < faili; i++) {
676 if (level == 0) {
677 u64 disk_bytenr;
678 btrfs_item_key_to_cpu(buf, &key, i);
679 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
680 continue;
681 fi = btrfs_item_ptr(buf, i,
682 struct btrfs_file_extent_item);
683 if (btrfs_file_extent_type(buf, fi) ==
684 BTRFS_FILE_EXTENT_INLINE)
685 continue;
686 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
687 if (disk_bytenr == 0)
688 continue;
689 err = btrfs_free_extent(trans, root, disk_bytenr,
690 btrfs_file_extent_disk_num_bytes(buf,
691 fi), 0);
692 BUG_ON(err);
693 } else {
694 bytenr = btrfs_node_blockptr(buf, i);
695 err = btrfs_free_extent(trans, root, bytenr,
696 btrfs_level_size(root, level - 1), 0);
697 BUG_ON(err);
698 }
699 }
700#endif
701 return ret;
702}
703
704static int write_one_cache_group(struct btrfs_trans_handle *trans,
705 struct btrfs_root *root,
706 struct btrfs_path *path,
707 struct btrfs_block_group_cache *cache)
708{
709 int ret;
710 int pending_ret;
711 struct btrfs_root *extent_root = root->fs_info->extent_root;
712 unsigned long bi;
713 struct extent_buffer *leaf;
714
715 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
716 if (ret < 0)
717 goto fail;
718 BUG_ON(ret);
719
720 leaf = path->nodes[0];
721 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
722 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
723 btrfs_mark_buffer_dirty(leaf);
724 btrfs_release_path(extent_root, path);
725fail:
726 finish_current_insert(trans, extent_root);
727 pending_ret = del_pending_extents(trans, extent_root);
728 if (ret)
729 return ret;
730 if (pending_ret)
731 return pending_ret;
732 return 0;
733
734}
735
736int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
737 struct btrfs_root *root)
738{
739 struct extent_map_tree *block_group_cache;
740 struct btrfs_block_group_cache *cache;
741 int ret;
742 int err = 0;
743 int werr = 0;
744 struct btrfs_path *path;
745 u64 last = 0;
746 u64 start;
747 u64 end;
748 u64 ptr;
749
750 block_group_cache = &root->fs_info->block_group_cache;
751 path = btrfs_alloc_path();
752 if (!path)
753 return -ENOMEM;
754
755 while(1) {
756 ret = find_first_extent_bit(block_group_cache, last,
757 &start, &end, BLOCK_GROUP_DIRTY);
758 if (ret)
759 break;
760
761 last = end + 1;
762 ret = get_state_private(block_group_cache, start, &ptr);
763 if (ret)
764 break;
765
766 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
767 err = write_one_cache_group(trans, root,
768 path, cache);
769 /*
770 * if we fail to write the cache group, we want
771 * to keep it marked dirty in hopes that a later
772 * write will work
773 */
774 if (err) {
775 werr = err;
776 continue;
777 }
778 clear_extent_bits(block_group_cache, start, end,
779 BLOCK_GROUP_DIRTY, GFP_NOFS);
780 }
781 btrfs_free_path(path);
782 return werr;
783}
784
785static int update_block_group(struct btrfs_trans_handle *trans,
786 struct btrfs_root *root,
787 u64 bytenr, u64 num_bytes, int alloc,
788 int mark_free, int data)
789{
790 struct btrfs_block_group_cache *cache;
791 struct btrfs_fs_info *info = root->fs_info;
792 u64 total = num_bytes;
793 u64 old_val;
794 u64 byte_in_group;
795 u64 start;
796 u64 end;
797
798 while(total) {
799 cache = btrfs_lookup_block_group(info, bytenr);
800 if (!cache) {
801 return -1;
802 }
803 byte_in_group = bytenr - cache->key.objectid;
804 WARN_ON(byte_in_group > cache->key.offset);
805 start = cache->key.objectid;
806 end = start + cache->key.offset - 1;
807 set_extent_bits(&info->block_group_cache, start, end,
808 BLOCK_GROUP_DIRTY, GFP_NOFS);
809
810 old_val = btrfs_block_group_used(&cache->item);
811 num_bytes = min(total, cache->key.offset - byte_in_group);
812 if (alloc) {
813 if (cache->data != data &&
814 old_val < (cache->key.offset >> 1)) {
815 int bit_to_clear;
816 int bit_to_set;
817 cache->data = data;
818 if (data) {
819 bit_to_clear = BLOCK_GROUP_METADATA;
820 bit_to_set = BLOCK_GROUP_DATA;
821 cache->item.flags &=
822 ~BTRFS_BLOCK_GROUP_MIXED;
823 cache->item.flags |=
824 BTRFS_BLOCK_GROUP_DATA;
825 } else {
826 bit_to_clear = BLOCK_GROUP_DATA;
827 bit_to_set = BLOCK_GROUP_METADATA;
828 cache->item.flags &=
829 ~BTRFS_BLOCK_GROUP_MIXED;
830 cache->item.flags &=
831 ~BTRFS_BLOCK_GROUP_DATA;
832 }
833 clear_extent_bits(&info->block_group_cache,
834 start, end, bit_to_clear,
835 GFP_NOFS);
836 set_extent_bits(&info->block_group_cache,
837 start, end, bit_to_set,
838 GFP_NOFS);
839 } else if (cache->data != data &&
840 cache->data != BTRFS_BLOCK_GROUP_MIXED) {
841 cache->data = BTRFS_BLOCK_GROUP_MIXED;
842 set_extent_bits(&info->block_group_cache,
843 start, end,
844 BLOCK_GROUP_DATA |
845 BLOCK_GROUP_METADATA,
846 GFP_NOFS);
847 }
848 old_val += num_bytes;
849 } else {
850 old_val -= num_bytes;
851 if (mark_free) {
852 set_extent_dirty(&info->free_space_cache,
853 bytenr, bytenr + num_bytes - 1,
854 GFP_NOFS);
855 }
856 }
857 btrfs_set_block_group_used(&cache->item, old_val);
858 total -= num_bytes;
859 bytenr += num_bytes;
860 }
861 return 0;
862}
863static int update_pinned_extents(struct btrfs_root *root,
864 u64 bytenr, u64 num, int pin)
865{
866 u64 len;
867 struct btrfs_block_group_cache *cache;
868 struct btrfs_fs_info *fs_info = root->fs_info;
869
870 if (pin) {
871 set_extent_dirty(&fs_info->pinned_extents,
872 bytenr, bytenr + num - 1, GFP_NOFS);
873 } else {
874 clear_extent_dirty(&fs_info->pinned_extents,
875 bytenr, bytenr + num - 1, GFP_NOFS);
876 }
877 while (num > 0) {
878 cache = btrfs_lookup_block_group(fs_info, bytenr);
879 WARN_ON(!cache);
880 len = min(num, cache->key.offset -
881 (bytenr - cache->key.objectid));
882 if (pin) {
883 cache->pinned += len;
884 fs_info->total_pinned += len;
885 } else {
886 cache->pinned -= len;
887 fs_info->total_pinned -= len;
888 }
889 bytenr += len;
890 num -= len;
891 }
892 return 0;
893}
894
895int btrfs_copy_pinned(struct btrfs_root *root, struct extent_map_tree *copy)
896{
897 u64 last = 0;
898 u64 start;
899 u64 end;
900 struct extent_map_tree *pinned_extents = &root->fs_info->pinned_extents;
901 int ret;
902
903 while(1) {
904 ret = find_first_extent_bit(pinned_extents, last,
905 &start, &end, EXTENT_DIRTY);
906 if (ret)
907 break;
908 set_extent_dirty(copy, start, end, GFP_NOFS);
909 last = end + 1;
910 }
911 return 0;
912}
913
914int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
915 struct btrfs_root *root,
916 struct extent_map_tree *unpin)
917{
918 u64 start;
919 u64 end;
920 int ret;
921 struct extent_map_tree *free_space_cache;
922 free_space_cache = &root->fs_info->free_space_cache;
923
924 while(1) {
925 ret = find_first_extent_bit(unpin, 0, &start, &end,
926 EXTENT_DIRTY);
927 if (ret)
928 break;
929 update_pinned_extents(root, start, end + 1 - start, 0);
930 clear_extent_dirty(unpin, start, end, GFP_NOFS);
931 set_extent_dirty(free_space_cache, start, end, GFP_NOFS);
932 }
933 return 0;
934}
935
936static int finish_current_insert(struct btrfs_trans_handle *trans, struct
937 btrfs_root *extent_root)
938{
939 u64 start;
940 u64 end;
941 struct btrfs_fs_info *info = extent_root->fs_info;
942 struct btrfs_path *path;
943 struct btrfs_key ins;
944 struct btrfs_extent_item extent_item;
945 int ret;
946 int err = 0;
947
948 btrfs_set_stack_extent_refs(&extent_item, 1);
949 btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
950 path = btrfs_alloc_path();
951
952 while(1) {
953 ret = find_first_extent_bit(&info->extent_ins, 0, &start,
954 &end, EXTENT_LOCKED);
955 if (ret)
956 break;
957
958 ins.objectid = start;
959 ins.offset = end + 1 - start;
960 err = btrfs_insert_item(trans, extent_root, &ins,
961 &extent_item, sizeof(extent_item));
962 clear_extent_bits(&info->extent_ins, start, end, EXTENT_LOCKED,
963 GFP_NOFS);
964 err = btrfs_insert_extent_backref(trans, extent_root, path,
965 start, extent_root->root_key.objectid,
966 0, 0, 0);
967 BUG_ON(err);
968 }
969 btrfs_free_path(path);
970 return 0;
971}
972
973static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes,
974 int pending)
975{
976 int err = 0;
977 struct extent_buffer *buf;
978
979 if (!pending) {
980 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
981 if (buf) {
982 if (btrfs_buffer_uptodate(buf)) {
983 u64 transid =
984 root->fs_info->running_transaction->transid;
985 if (btrfs_header_generation(buf) == transid) {
986 free_extent_buffer(buf);
987 return 1;
988 }
989 }
990 free_extent_buffer(buf);
991 }
992 update_pinned_extents(root, bytenr, num_bytes, 1);
993 } else {
994 set_extent_bits(&root->fs_info->pending_del,
995 bytenr, bytenr + num_bytes - 1,
996 EXTENT_LOCKED, GFP_NOFS);
997 }
998 BUG_ON(err < 0);
999 return 0;
1000}
1001
1002/*
1003 * remove an extent from the root, returns 0 on success
1004 */
1005static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
1006 *root, u64 bytenr, u64 num_bytes,
1007 u64 root_objectid, u64 ref_generation,
1008 u64 owner_objectid, u64 owner_offset, int pin,
1009 int mark_free)
1010{
1011 struct btrfs_path *path;
1012 struct btrfs_key key;
1013 struct btrfs_fs_info *info = root->fs_info;
1014 struct btrfs_root *extent_root = info->extent_root;
1015 struct extent_buffer *leaf;
1016 int ret;
1017 struct btrfs_extent_item *ei;
1018 u32 refs;
1019
1020 key.objectid = bytenr;
1021 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
1022 key.offset = num_bytes;
1023
1024 path = btrfs_alloc_path();
1025 if (!path)
1026 return -ENOMEM;
1027
1028 if (ref_generation && owner_objectid == 0 && root_objectid == 3) {
1029//printk("drop backref root %Lu gen %Lu byte %Lu\n", root_objectid, ref_generation, bytenr );
1030 }
1031 ret = lookup_extent_backref(trans, extent_root, path,
1032 bytenr, root_objectid,
1033 ref_generation,
1034 owner_objectid, owner_offset, 1);
1035 if (ret == 0) {
1036 ret = btrfs_del_item(trans, extent_root, path);
1037 } else {
1038 btrfs_print_leaf(extent_root, path->nodes[0]);
1039 WARN_ON(1);
1040 printk("Unable to find ref byte nr %Lu root %Lu "
1041 " gen %Lu owner %Lu offset %Lu\n", bytenr,
1042 root_objectid, ref_generation, owner_objectid,
1043 owner_offset);
1044 }
1045 btrfs_release_path(extent_root, path);
1046 ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
1047 if (ret < 0)
1048 return ret;
1049 BUG_ON(ret);
1050
1051 leaf = path->nodes[0];
1052 ei = btrfs_item_ptr(leaf, path->slots[0],
1053 struct btrfs_extent_item);
1054 refs = btrfs_extent_refs(leaf, ei);
1055 BUG_ON(refs == 0);
1056 refs -= 1;
1057 btrfs_set_extent_refs(leaf, ei, refs);
1058 btrfs_mark_buffer_dirty(leaf);
1059
1060 if (refs == 0) {
1061 u64 super_used;
1062 u64 root_used;
1063
1064 if (pin) {
1065 ret = pin_down_bytes(root, bytenr, num_bytes, 0);
1066 if (ret > 0)
1067 mark_free = 1;
1068 BUG_ON(ret < 0);
1069 }
1070
1071 /* block accounting for super block */
1072 super_used = btrfs_super_bytes_used(&info->super_copy);
1073 btrfs_set_super_bytes_used(&info->super_copy,
1074 super_used - num_bytes);
1075
1076 /* block accounting for root item */
1077 root_used = btrfs_root_used(&root->root_item);
1078 btrfs_set_root_used(&root->root_item,
1079 root_used - num_bytes);
1080
1081 ret = btrfs_del_item(trans, extent_root, path);
1082 if (ret) {
1083 return ret;
1084 }
1085 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
1086 mark_free, 0);
1087 BUG_ON(ret);
1088 }
1089 btrfs_free_path(path);
1090 finish_current_insert(trans, extent_root);
1091 return ret;
1092}
1093
1094/*
1095 * find all the blocks marked as pending in the radix tree and remove
1096 * them from the extent map
1097 */
1098static int del_pending_extents(struct btrfs_trans_handle *trans, struct
1099 btrfs_root *extent_root)
1100{
1101 int ret;
1102 int err = 0;
1103 u64 start;
1104 u64 end;
1105 struct extent_map_tree *pending_del;
1106 struct extent_map_tree *pinned_extents;
1107
1108 pending_del = &extent_root->fs_info->pending_del;
1109 pinned_extents = &extent_root->fs_info->pinned_extents;
1110
1111 while(1) {
1112 ret = find_first_extent_bit(pending_del, 0, &start, &end,
1113 EXTENT_LOCKED);
1114 if (ret)
1115 break;
1116 update_pinned_extents(extent_root, start, end + 1 - start, 1);
1117 clear_extent_bits(pending_del, start, end, EXTENT_LOCKED,
1118 GFP_NOFS);
1119 ret = __free_extent(trans, extent_root,
1120 start, end + 1 - start,
1121 extent_root->root_key.objectid,
1122 0, 0, 0, 0, 0);
1123 if (ret)
1124 err = ret;
1125 }
1126 return err;
1127}
1128
1129/*
1130 * remove an extent from the root, returns 0 on success
1131 */
1132int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
1133 *root, u64 bytenr, u64 num_bytes,
1134 u64 root_objectid, u64 ref_generation,
1135 u64 owner_objectid, u64 owner_offset, int pin)
1136{
1137 struct btrfs_root *extent_root = root->fs_info->extent_root;
1138 int pending_ret;
1139 int ret;
1140
1141 WARN_ON(num_bytes < root->sectorsize);
1142 if (!root->ref_cows)
1143 ref_generation = 0;
1144
1145 if (root == extent_root) {
1146 pin_down_bytes(root, bytenr, num_bytes, 1);
1147 return 0;
1148 }
1149 ret = __free_extent(trans, root, bytenr, num_bytes, root_objectid,
1150 ref_generation, owner_objectid, owner_offset,
1151 pin, pin == 0);
1152 pending_ret = del_pending_extents(trans, root->fs_info->extent_root);
1153 return ret ? ret : pending_ret;
1154}
1155
1156static u64 stripe_align(struct btrfs_root *root, u64 val)
1157{
1158 u64 mask = ((u64)root->stripesize - 1);
1159 u64 ret = (val + mask) & ~mask;
1160 return ret;
1161}
1162
1163/*
1164 * walks the btree of allocated extents and find a hole of a given size.
1165 * The key ins is changed to record the hole:
1166 * ins->objectid == block start
1167 * ins->flags = BTRFS_EXTENT_ITEM_KEY
1168 * ins->offset == number of blocks
1169 * Any available blocks before search_start are skipped.
1170 */
1171static int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
1172 *orig_root, u64 num_bytes, u64 empty_size,
1173 u64 search_start, u64 search_end, u64 hint_byte,
1174 struct btrfs_key *ins, u64 exclude_start,
1175 u64 exclude_nr, int data)
1176{
1177 struct btrfs_path *path;
1178 struct btrfs_key key;
1179 u64 hole_size = 0;
1180 u64 aligned;
1181 int ret;
1182 int slot = 0;
1183 u64 last_byte = 0;
1184 u64 orig_search_start = search_start;
1185 int start_found;
1186 struct extent_buffer *l;
1187 struct btrfs_root * root = orig_root->fs_info->extent_root;
1188 struct btrfs_fs_info *info = root->fs_info;
1189 u64 total_needed = num_bytes;
1190 int level;
1191 struct btrfs_block_group_cache *block_group;
1192 int full_scan = 0;
1193 int wrapped = 0;
1194 u64 cached_start;
1195
1196 WARN_ON(num_bytes < root->sectorsize);
1197 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
1198
1199 level = btrfs_header_level(root->node);
1200
1201 if (num_bytes >= 32 * 1024 * 1024 && hint_byte) {
1202 data = BTRFS_BLOCK_GROUP_MIXED;
1203 }
1204
1205 if (search_end == (u64)-1)
1206 search_end = btrfs_super_total_bytes(&info->super_copy);
1207 if (hint_byte) {
1208 block_group = btrfs_lookup_block_group(info, hint_byte);
1209 if (!block_group)
1210 hint_byte = search_start;
1211 block_group = btrfs_find_block_group(root, block_group,
1212 hint_byte, data, 1);
1213 } else {
1214 block_group = btrfs_find_block_group(root,
1215 trans->block_group,
1216 search_start, data, 1);
1217 }
1218
1219 total_needed += empty_size;
1220 path = btrfs_alloc_path();
1221check_failed:
1222 search_start = find_search_start(root, &block_group, search_start,
1223 total_needed, data, full_scan);
1224 search_start = stripe_align(root, search_start);
1225 cached_start = search_start;
1226 btrfs_init_path(path);
1227 ins->objectid = search_start;
1228 ins->offset = 0;
1229 start_found = 0;
1230 path->reada = 2;
1231
1232 ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
1233 if (ret < 0)
1234 goto error;
1235
1236 if (path->slots[0] > 0) {
1237 path->slots[0]--;
1238 }
1239
1240 l = path->nodes[0];
1241 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1242
1243 /*
1244 * walk backwards to find the first extent item key
1245 */
1246 while(btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY) {
1247 if (path->slots[0] == 0) {
1248 ret = btrfs_prev_leaf(root, path);
1249 if (ret != 0) {
1250 ret = btrfs_search_slot(trans, root, ins,
1251 path, 0, 0);
1252 if (ret < 0)
1253 goto error;
1254 if (path->slots[0] > 0)
1255 path->slots[0]--;
1256 break;
1257 }
1258 } else {
1259 path->slots[0]--;
1260 }
1261 l = path->nodes[0];
1262 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1263 }
1264 while (1) {
1265 l = path->nodes[0];
1266 slot = path->slots[0];
1267 if (slot >= btrfs_header_nritems(l)) {
1268 ret = btrfs_next_leaf(root, path);
1269 if (ret == 0)
1270 continue;
1271 if (ret < 0)
1272 goto error;
1273
1274 search_start = max(search_start,
1275 block_group->key.objectid);
1276 if (!start_found) {
1277 aligned = stripe_align(root, search_start);
1278 ins->objectid = aligned;
1279 if (aligned >= search_end) {
1280 ret = -ENOSPC;
1281 goto error;
1282 }
1283 ins->offset = search_end - aligned;
1284 start_found = 1;
1285 goto check_pending;
1286 }
1287 ins->objectid = stripe_align(root,
1288 last_byte > search_start ?
1289 last_byte : search_start);
1290 if (search_end <= ins->objectid) {
1291 ret = -ENOSPC;
1292 goto error;
1293 }
1294 ins->offset = search_end - ins->objectid;
1295 BUG_ON(ins->objectid >= search_end);
1296 goto check_pending;
1297 }
1298 btrfs_item_key_to_cpu(l, &key, slot);
1299
1300 if (key.objectid >= search_start && key.objectid > last_byte &&
1301 start_found) {
1302 if (last_byte < search_start)
1303 last_byte = search_start;
1304 aligned = stripe_align(root, last_byte);
1305 hole_size = key.objectid - aligned;
1306 if (key.objectid > aligned && hole_size >= num_bytes) {
1307 ins->objectid = aligned;
1308 ins->offset = hole_size;
1309 goto check_pending;
1310 }
1311 }
1312 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY) {
1313 if (!start_found && btrfs_key_type(&key) ==
1314 BTRFS_BLOCK_GROUP_ITEM_KEY) {
1315 last_byte = key.objectid;
1316 start_found = 1;
1317 }
1318 goto next;
1319 }
1320
1321
1322 start_found = 1;
1323 last_byte = key.objectid + key.offset;
1324
1325 if (!full_scan && data != BTRFS_BLOCK_GROUP_MIXED &&
1326 last_byte >= block_group->key.objectid +
1327 block_group->key.offset) {
1328 btrfs_release_path(root, path);
1329 search_start = block_group->key.objectid +
1330 block_group->key.offset;
1331 goto new_group;
1332 }
1333next:
1334 path->slots[0]++;
1335 cond_resched();
1336 }
1337check_pending:
1338 /* we have to make sure we didn't find an extent that has already
1339 * been allocated by the map tree or the original allocation
1340 */
1341 btrfs_release_path(root, path);
1342 BUG_ON(ins->objectid < search_start);
1343
1344 if (ins->objectid + num_bytes >= search_end)
1345 goto enospc;
1346 if (!full_scan && data != BTRFS_BLOCK_GROUP_MIXED &&
1347 ins->objectid + num_bytes > block_group->
1348 key.objectid + block_group->key.offset) {
1349 search_start = block_group->key.objectid +
1350 block_group->key.offset;
1351 goto new_group;
1352 }
1353 if (test_range_bit(&info->extent_ins, ins->objectid,
1354 ins->objectid + num_bytes -1, EXTENT_LOCKED, 0)) {
1355 search_start = ins->objectid + num_bytes;
1356 goto new_group;
1357 }
1358 if (test_range_bit(&info->pinned_extents, ins->objectid,
1359 ins->objectid + num_bytes -1, EXTENT_DIRTY, 0)) {
1360 search_start = ins->objectid + num_bytes;
1361 goto new_group;
1362 }
1363 if (exclude_nr > 0 && (ins->objectid + num_bytes > exclude_start &&
1364 ins->objectid < exclude_start + exclude_nr)) {
1365 search_start = exclude_start + exclude_nr;
1366 goto new_group;
1367 }
1368 if (!data) {
1369 block_group = btrfs_lookup_block_group(info, ins->objectid);
1370 if (block_group)
1371 trans->block_group = block_group;
1372 }
1373 ins->offset = num_bytes;
1374 btrfs_free_path(path);
1375 return 0;
1376
1377new_group:
1378 if (search_start + num_bytes >= search_end) {
1379enospc:
1380 search_start = orig_search_start;
1381 if (full_scan) {
1382 ret = -ENOSPC;
1383 goto error;
1384 }
1385 if (wrapped) {
1386 if (!full_scan)
1387 total_needed -= empty_size;
1388 full_scan = 1;
1389 data = BTRFS_BLOCK_GROUP_MIXED;
1390 } else
1391 wrapped = 1;
1392 }
1393 block_group = btrfs_lookup_block_group(info, search_start);
1394 cond_resched();
1395 block_group = btrfs_find_block_group(root, block_group,
1396 search_start, data, 0);
1397 goto check_failed;
1398
1399error:
1400 btrfs_release_path(root, path);
1401 btrfs_free_path(path);
1402 return ret;
1403}
1404/*
1405 * finds a free extent and does all the dirty work required for allocation
1406 * returns the key for the extent through ins, and a tree buffer for
1407 * the first block of the extent through buf.
1408 *
1409 * returns 0 if everything worked, non-zero otherwise.
1410 */
1411int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
1412 struct btrfs_root *root,
1413 u64 num_bytes, u64 root_objectid, u64 ref_generation,
1414 u64 owner, u64 owner_offset,
1415 u64 empty_size, u64 hint_byte,
1416 u64 search_end, struct btrfs_key *ins, int data)
1417{
1418 int ret;
1419 int pending_ret;
1420 u64 super_used, root_used;
1421 u64 search_start = 0;
1422 struct btrfs_fs_info *info = root->fs_info;
1423 struct btrfs_root *extent_root = info->extent_root;
1424 struct btrfs_extent_item extent_item;
1425 struct btrfs_path *path;
1426
1427 btrfs_set_stack_extent_refs(&extent_item, 1);
1428
1429 WARN_ON(num_bytes < root->sectorsize);
1430 ret = find_free_extent(trans, root, num_bytes, empty_size,
1431 search_start, search_end, hint_byte, ins,
1432 trans->alloc_exclude_start,
1433 trans->alloc_exclude_nr, data);
1434 BUG_ON(ret);
1435 if (ret)
1436 return ret;
1437
1438 /* block accounting for super block */
1439 super_used = btrfs_super_bytes_used(&info->super_copy);
1440 btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
1441
1442 /* block accounting for root item */
1443 root_used = btrfs_root_used(&root->root_item);
1444 btrfs_set_root_used(&root->root_item, root_used + num_bytes);
1445
1446 clear_extent_dirty(&root->fs_info->free_space_cache,
1447 ins->objectid, ins->objectid + ins->offset - 1,
1448 GFP_NOFS);
1449
1450 if (root == extent_root) {
1451 set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
1452 ins->objectid + ins->offset - 1,
1453 EXTENT_LOCKED, GFP_NOFS);
1454 WARN_ON(data == 1);
1455 goto update_block;
1456 }
1457
1458 WARN_ON(trans->alloc_exclude_nr);
1459 trans->alloc_exclude_start = ins->objectid;
1460 trans->alloc_exclude_nr = ins->offset;
1461 ret = btrfs_insert_item(trans, extent_root, ins, &extent_item,
1462 sizeof(extent_item));
1463
1464 trans->alloc_exclude_start = 0;
1465 trans->alloc_exclude_nr = 0;
1466 BUG_ON(ret);
1467
1468 path = btrfs_alloc_path();
1469 BUG_ON(!path);
1470 ret = btrfs_insert_extent_backref(trans, extent_root, path,
1471 ins->objectid, root_objectid,
1472 ref_generation, owner, owner_offset);
1473
1474 BUG_ON(ret);
1475 btrfs_free_path(path);
1476 finish_current_insert(trans, extent_root);
1477 pending_ret = del_pending_extents(trans, extent_root);
1478
1479 if (ret) {
1480 return ret;
1481 }
1482 if (pending_ret) {
1483 return pending_ret;
1484 }
1485
1486update_block:
1487 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0,
1488 data);
1489 BUG_ON(ret);
1490 return 0;
1491}
1492
1493/*
1494 * helper function to allocate a block for a given tree
1495 * returns the tree buffer or NULL.
1496 */
1497struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
1498 struct btrfs_root *root,
1499 u32 blocksize,
1500 u64 root_objectid, u64 hint,
1501 u64 empty_size)
1502{
1503 u64 ref_generation;
1504
1505 if (root->ref_cows)
1506 ref_generation = trans->transid;
1507 else
1508 ref_generation = 0;
1509
1510
1511 return __btrfs_alloc_free_block(trans, root, blocksize, root_objectid,
1512 ref_generation, 0, 0, hint, empty_size);
1513}
1514
1515/*
1516 * helper function to allocate a block for a given tree
1517 * returns the tree buffer or NULL.
1518 */
1519struct extent_buffer *__btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
1520 struct btrfs_root *root,
1521 u32 blocksize,
1522 u64 root_objectid,
1523 u64 ref_generation,
1524 u64 first_objectid,
1525 int level,
1526 u64 hint,
1527 u64 empty_size)
1528{
1529 struct btrfs_key ins;
1530 int ret;
1531 struct extent_buffer *buf;
1532
1533 ret = btrfs_alloc_extent(trans, root, blocksize,
1534 root_objectid, ref_generation,
1535 first_objectid, level, empty_size, hint,
1536 (u64)-1, &ins, 0);
1537 if (ret) {
1538 BUG_ON(ret > 0);
1539 return ERR_PTR(ret);
1540 }
1541 buf = btrfs_find_create_tree_block(root, ins.objectid, blocksize);
1542 if (!buf) {
1543 btrfs_free_extent(trans, root, ins.objectid, blocksize,
1544 root->root_key.objectid, ref_generation,
1545 0, 0, 0);
1546 return ERR_PTR(-ENOMEM);
1547 }
1548 btrfs_set_buffer_uptodate(buf);
1549 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
1550 buf->start + buf->len - 1, GFP_NOFS);
1551 set_extent_bits(&BTRFS_I(root->fs_info->btree_inode)->extent_tree,
1552 buf->start, buf->start + buf->len - 1,
1553 EXTENT_CSUM, GFP_NOFS);
1554 buf->flags |= EXTENT_CSUM;
1555 btrfs_set_buffer_defrag(buf);
1556 trans->blocks_used++;
1557 return buf;
1558}
1559
1560static int drop_leaf_ref(struct btrfs_trans_handle *trans,
1561 struct btrfs_root *root, struct extent_buffer *leaf)
1562{
1563 u64 leaf_owner;
1564 u64 leaf_generation;
1565 struct btrfs_key key;
1566 struct btrfs_file_extent_item *fi;
1567 int i;
1568 int nritems;
1569 int ret;
1570
1571 BUG_ON(!btrfs_is_leaf(leaf));
1572 nritems = btrfs_header_nritems(leaf);
1573 leaf_owner = btrfs_header_owner(leaf);
1574 leaf_generation = btrfs_header_generation(leaf);
1575
1576 for (i = 0; i < nritems; i++) {
1577 u64 disk_bytenr;
1578
1579 btrfs_item_key_to_cpu(leaf, &key, i);
1580 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1581 continue;
1582 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1583 if (btrfs_file_extent_type(leaf, fi) ==
1584 BTRFS_FILE_EXTENT_INLINE)
1585 continue;
1586 /*
1587 * FIXME make sure to insert a trans record that
1588 * repeats the snapshot del on crash
1589 */
1590 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1591 if (disk_bytenr == 0)
1592 continue;
1593 ret = btrfs_free_extent(trans, root, disk_bytenr,
1594 btrfs_file_extent_disk_num_bytes(leaf, fi),
1595 leaf_owner, leaf_generation,
1596 key.objectid, key.offset, 0);
1597 BUG_ON(ret);
1598 }
1599 return 0;
1600}
1601
1602static void reada_walk_down(struct btrfs_root *root,
1603 struct extent_buffer *node)
1604{
1605 int i;
1606 u32 nritems;
1607 u64 bytenr;
1608 int ret;
1609 u32 refs;
1610 int level;
1611 u32 blocksize;
1612
1613 nritems = btrfs_header_nritems(node);
1614 level = btrfs_header_level(node);
1615 for (i = 0; i < nritems; i++) {
1616 bytenr = btrfs_node_blockptr(node, i);
1617 blocksize = btrfs_level_size(root, level - 1);
1618 ret = lookup_extent_ref(NULL, root, bytenr, blocksize, &refs);
1619 BUG_ON(ret);
1620 if (refs != 1)
1621 continue;
1622 mutex_unlock(&root->fs_info->fs_mutex);
1623 ret = readahead_tree_block(root, bytenr, blocksize);
1624 cond_resched();
1625 mutex_lock(&root->fs_info->fs_mutex);
1626 if (ret)
1627 break;
1628 }
1629}
1630
1631/*
1632 * helper function for drop_snapshot, this walks down the tree dropping ref
1633 * counts as it goes.
1634 */
1635static int walk_down_tree(struct btrfs_trans_handle *trans, struct btrfs_root
1636 *root, struct btrfs_path *path, int *level)
1637{
1638 u64 root_owner;
1639 u64 root_gen;
1640 u64 bytenr;
1641 struct extent_buffer *next;
1642 struct extent_buffer *cur;
1643 struct extent_buffer *parent;
1644 u32 blocksize;
1645 int ret;
1646 u32 refs;
1647
1648 WARN_ON(*level < 0);
1649 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1650 ret = lookup_extent_ref(trans, root,
1651 path->nodes[*level]->start,
1652 path->nodes[*level]->len, &refs);
1653 BUG_ON(ret);
1654 if (refs > 1)
1655 goto out;
1656
1657 /*
1658 * walk down to the last node level and free all the leaves
1659 */
1660 while(*level >= 0) {
1661 WARN_ON(*level < 0);
1662 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1663 cur = path->nodes[*level];
1664
1665 if (*level > 0 && path->slots[*level] == 0)
1666 reada_walk_down(root, cur);
1667
1668 if (btrfs_header_level(cur) != *level)
1669 WARN_ON(1);
1670
1671 if (path->slots[*level] >=
1672 btrfs_header_nritems(cur))
1673 break;
1674 if (*level == 0) {
1675 ret = drop_leaf_ref(trans, root, cur);
1676 BUG_ON(ret);
1677 break;
1678 }
1679 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
1680 blocksize = btrfs_level_size(root, *level - 1);
1681 ret = lookup_extent_ref(trans, root, bytenr, blocksize, &refs);
1682 BUG_ON(ret);
1683 if (refs != 1) {
1684 parent = path->nodes[*level];
1685 root_owner = btrfs_header_owner(parent);
1686 root_gen = btrfs_header_generation(parent);
1687 path->slots[*level]++;
1688 ret = btrfs_free_extent(trans, root, bytenr,
1689 blocksize, root_owner,
1690 root_gen, 0, 0, 1);
1691 BUG_ON(ret);
1692 continue;
1693 }
1694 next = btrfs_find_tree_block(root, bytenr, blocksize);
1695 if (!next || !btrfs_buffer_uptodate(next)) {
1696 free_extent_buffer(next);
1697 mutex_unlock(&root->fs_info->fs_mutex);
1698 next = read_tree_block(root, bytenr, blocksize);
1699 mutex_lock(&root->fs_info->fs_mutex);
1700
1701 /* we dropped the lock, check one more time */
1702 ret = lookup_extent_ref(trans, root, bytenr,
1703 blocksize, &refs);
1704 BUG_ON(ret);
1705 if (refs != 1) {
1706 parent = path->nodes[*level];
1707 root_owner = btrfs_header_owner(parent);
1708 root_gen = btrfs_header_generation(parent);
1709
1710 path->slots[*level]++;
1711 free_extent_buffer(next);
1712 ret = btrfs_free_extent(trans, root, bytenr,
1713 blocksize,
1714 root_owner,
1715 root_gen, 0, 0, 1);
1716 BUG_ON(ret);
1717 continue;
1718 }
1719 }
1720 WARN_ON(*level <= 0);
1721 if (path->nodes[*level-1])
1722 free_extent_buffer(path->nodes[*level-1]);
1723 path->nodes[*level-1] = next;
1724 *level = btrfs_header_level(next);
1725 path->slots[*level] = 0;
1726 }
1727out:
1728 WARN_ON(*level < 0);
1729 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1730
1731 if (path->nodes[*level] == root->node) {
1732 root_owner = root->root_key.objectid;
1733 parent = path->nodes[*level];
1734 } else {
1735 parent = path->nodes[*level + 1];
1736 root_owner = btrfs_header_owner(parent);
1737 }
1738
1739 root_gen = btrfs_header_generation(parent);
1740 ret = btrfs_free_extent(trans, root, path->nodes[*level]->start,
1741 path->nodes[*level]->len,
1742 root_owner, root_gen, 0, 0, 1);
1743 free_extent_buffer(path->nodes[*level]);
1744 path->nodes[*level] = NULL;
1745 *level += 1;
1746 BUG_ON(ret);
1747 return 0;
1748}
1749
1750/*
1751 * helper for dropping snapshots. This walks back up the tree in the path
1752 * to find the first node higher up where we haven't yet gone through
1753 * all the slots
1754 */
1755static int walk_up_tree(struct btrfs_trans_handle *trans, struct btrfs_root
1756 *root, struct btrfs_path *path, int *level)
1757{
1758 u64 root_owner;
1759 u64 root_gen;
1760 struct btrfs_root_item *root_item = &root->root_item;
1761 int i;
1762 int slot;
1763 int ret;
1764
1765 for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
1766 slot = path->slots[i];
1767 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
1768 struct extent_buffer *node;
1769 struct btrfs_disk_key disk_key;
1770 node = path->nodes[i];
1771 path->slots[i]++;
1772 *level = i;
1773 WARN_ON(*level == 0);
1774 btrfs_node_key(node, &disk_key, path->slots[i]);
1775 memcpy(&root_item->drop_progress,
1776 &disk_key, sizeof(disk_key));
1777 root_item->drop_level = i;
1778 return 0;
1779 } else {
1780 if (path->nodes[*level] == root->node) {
1781 root_owner = root->root_key.objectid;
1782 root_gen =
1783 btrfs_header_generation(path->nodes[*level]);
1784 } else {
1785 struct extent_buffer *node;
1786 node = path->nodes[*level + 1];
1787 root_owner = btrfs_header_owner(node);
1788 root_gen = btrfs_header_generation(node);
1789 }
1790 ret = btrfs_free_extent(trans, root,
1791 path->nodes[*level]->start,
1792 path->nodes[*level]->len,
1793 root_owner, root_gen, 0, 0, 1);
1794 BUG_ON(ret);
1795 free_extent_buffer(path->nodes[*level]);
1796 path->nodes[*level] = NULL;
1797 *level = i + 1;
1798 }
1799 }
1800 return 1;
1801}
1802
1803/*
1804 * drop the reference count on the tree rooted at 'snap'. This traverses
1805 * the tree freeing any blocks that have a ref count of zero after being
1806 * decremented.
1807 */
1808int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
1809 *root)
1810{
1811 int ret = 0;
1812 int wret;
1813 int level;
1814 struct btrfs_path *path;
1815 int i;
1816 int orig_level;
1817 struct btrfs_root_item *root_item = &root->root_item;
1818
1819 path = btrfs_alloc_path();
1820 BUG_ON(!path);
1821
1822 level = btrfs_header_level(root->node);
1823 orig_level = level;
1824 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
1825 path->nodes[level] = root->node;
1826 extent_buffer_get(root->node);
1827 path->slots[level] = 0;
1828 } else {
1829 struct btrfs_key key;
1830 struct btrfs_disk_key found_key;
1831 struct extent_buffer *node;
1832
1833 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
1834 level = root_item->drop_level;
1835 path->lowest_level = level;
1836 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1837 if (wret < 0) {
1838 ret = wret;
1839 goto out;
1840 }
1841 node = path->nodes[level];
1842 btrfs_node_key(node, &found_key, path->slots[level]);
1843 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
1844 sizeof(found_key)));
1845 }
1846 while(1) {
1847 wret = walk_down_tree(trans, root, path, &level);
1848 if (wret > 0)
1849 break;
1850 if (wret < 0)
1851 ret = wret;
1852
1853 wret = walk_up_tree(trans, root, path, &level);
1854 if (wret > 0)
1855 break;
1856 if (wret < 0)
1857 ret = wret;
1858 ret = -EAGAIN;
1859 break;
1860 }
1861 for (i = 0; i <= orig_level; i++) {
1862 if (path->nodes[i]) {
1863 free_extent_buffer(path->nodes[i]);
1864 path->nodes[i] = NULL;
1865 }
1866 }
1867out:
1868 btrfs_free_path(path);
1869 return ret;
1870}
1871
1872int btrfs_free_block_groups(struct btrfs_fs_info *info)
1873{
1874 u64 start;
1875 u64 end;
1876 u64 ptr;
1877 int ret;
1878 while(1) {
1879 ret = find_first_extent_bit(&info->block_group_cache, 0,
1880 &start, &end, (unsigned int)-1);
1881 if (ret)
1882 break;
1883 ret = get_state_private(&info->block_group_cache, start, &ptr);
1884 if (!ret)
1885 kfree((void *)(unsigned long)ptr);
1886 clear_extent_bits(&info->block_group_cache, start,
1887 end, (unsigned int)-1, GFP_NOFS);
1888 }
1889 while(1) {
1890 ret = find_first_extent_bit(&info->free_space_cache, 0,
1891 &start, &end, EXTENT_DIRTY);
1892 if (ret)
1893 break;
1894 clear_extent_dirty(&info->free_space_cache, start,
1895 end, GFP_NOFS);
1896 }
1897 return 0;
1898}
1899
1900int btrfs_read_block_groups(struct btrfs_root *root)
1901{
1902 struct btrfs_path *path;
1903 int ret;
1904 int err = 0;
1905 int bit;
1906 struct btrfs_block_group_cache *cache;
1907 struct btrfs_fs_info *info = root->fs_info;
1908 struct extent_map_tree *block_group_cache;
1909 struct btrfs_key key;
1910 struct btrfs_key found_key;
1911 struct extent_buffer *leaf;
1912
1913 block_group_cache = &info->block_group_cache;
1914
1915 root = info->extent_root;
1916 key.objectid = 0;
1917 key.offset = BTRFS_BLOCK_GROUP_SIZE;
1918 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
1919
1920 path = btrfs_alloc_path();
1921 if (!path)
1922 return -ENOMEM;
1923
1924 while(1) {
1925 ret = btrfs_search_slot(NULL, info->extent_root,
1926 &key, path, 0, 0);
1927 if (ret != 0) {
1928 err = ret;
1929 break;
1930 }
1931 leaf = path->nodes[0];
1932 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1933 cache = kmalloc(sizeof(*cache), GFP_NOFS);
1934 if (!cache) {
1935 err = -1;
1936 break;
1937 }
1938
1939 read_extent_buffer(leaf, &cache->item,
1940 btrfs_item_ptr_offset(leaf, path->slots[0]),
1941 sizeof(cache->item));
1942 memcpy(&cache->key, &found_key, sizeof(found_key));
1943 cache->cached = 0;
1944 cache->pinned = 0;
1945 key.objectid = found_key.objectid + found_key.offset;
1946 btrfs_release_path(root, path);
1947
1948 if (cache->item.flags & BTRFS_BLOCK_GROUP_MIXED) {
1949 bit = BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA;
1950 cache->data = BTRFS_BLOCK_GROUP_MIXED;
1951 } else if (cache->item.flags & BTRFS_BLOCK_GROUP_DATA) {
1952 bit = BLOCK_GROUP_DATA;
1953 cache->data = BTRFS_BLOCK_GROUP_DATA;
1954 } else {
1955 bit = BLOCK_GROUP_METADATA;
1956 cache->data = 0;
1957 }
1958
1959 /* use EXTENT_LOCKED to prevent merging */
1960 set_extent_bits(block_group_cache, found_key.objectid,
1961 found_key.objectid + found_key.offset - 1,
1962 bit | EXTENT_LOCKED, GFP_NOFS);
1963 set_state_private(block_group_cache, found_key.objectid,
1964 (unsigned long)cache);
1965
1966 if (key.objectid >=
1967 btrfs_super_total_bytes(&info->super_copy))
1968 break;
1969 }
1970
1971 btrfs_free_path(path);
1972 return 0;
1973}