Btrfs: fix free space leak
[linux-2.6-block.git] / fs / btrfs / extent-tree.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#include <linux/sched.h>
19#include <linux/pagemap.h>
20#include <linux/writeback.h>
21#include <linux/blkdev.h>
22#include "hash.h"
23#include "crc32c.h"
24#include "ctree.h"
25#include "disk-io.h"
26#include "print-tree.h"
27#include "transaction.h"
28#include "volumes.h"
29#include "locking.h"
30#include "ref-cache.h"
31
32#define PENDING_EXTENT_INSERT 0
33#define PENDING_EXTENT_DELETE 1
34#define PENDING_BACKREF_UPDATE 2
35
36struct pending_extent_op {
37 int type;
38 u64 bytenr;
39 u64 num_bytes;
40 u64 parent;
41 u64 orig_parent;
42 u64 generation;
43 u64 orig_generation;
44 int level;
45 struct list_head list;
46 int del;
47};
48
49static int finish_current_insert(struct btrfs_trans_handle *trans, struct
50 btrfs_root *extent_root, int all);
51static int del_pending_extents(struct btrfs_trans_handle *trans, struct
52 btrfs_root *extent_root, int all);
53static struct btrfs_block_group_cache *
54__btrfs_find_block_group(struct btrfs_root *root,
55 struct btrfs_block_group_cache *hint,
56 u64 search_start, int data, int owner);
57static int pin_down_bytes(struct btrfs_trans_handle *trans,
58 struct btrfs_root *root,
59 u64 bytenr, u64 num_bytes, int is_data);
60static int update_block_group(struct btrfs_trans_handle *trans,
61 struct btrfs_root *root,
62 u64 bytenr, u64 num_bytes, int alloc,
63 int mark_free);
64
65static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
66{
67 return (cache->flags & bits) == bits;
68}
69
70/*
71 * this adds the block group to the fs_info rb tree for the block group
72 * cache
73 */
74int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
75 struct btrfs_block_group_cache *block_group)
76{
77 struct rb_node **p;
78 struct rb_node *parent = NULL;
79 struct btrfs_block_group_cache *cache;
80
81 spin_lock(&info->block_group_cache_lock);
82 p = &info->block_group_cache_tree.rb_node;
83
84 while (*p) {
85 parent = *p;
86 cache = rb_entry(parent, struct btrfs_block_group_cache,
87 cache_node);
88 if (block_group->key.objectid < cache->key.objectid) {
89 p = &(*p)->rb_left;
90 } else if (block_group->key.objectid > cache->key.objectid) {
91 p = &(*p)->rb_right;
92 } else {
93 spin_unlock(&info->block_group_cache_lock);
94 return -EEXIST;
95 }
96 }
97
98 rb_link_node(&block_group->cache_node, parent, p);
99 rb_insert_color(&block_group->cache_node,
100 &info->block_group_cache_tree);
101 spin_unlock(&info->block_group_cache_lock);
102
103 return 0;
104}
105
106/*
107 * This will return the block group at or after bytenr if contains is 0, else
108 * it will return the block group that contains the bytenr
109 */
110static struct btrfs_block_group_cache *
111block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
112 int contains)
113{
114 struct btrfs_block_group_cache *cache, *ret = NULL;
115 struct rb_node *n;
116 u64 end, start;
117
118 spin_lock(&info->block_group_cache_lock);
119 n = info->block_group_cache_tree.rb_node;
120
121 while (n) {
122 cache = rb_entry(n, struct btrfs_block_group_cache,
123 cache_node);
124 end = cache->key.objectid + cache->key.offset - 1;
125 start = cache->key.objectid;
126
127 if (bytenr < start) {
128 if (!contains && (!ret || start < ret->key.objectid))
129 ret = cache;
130 n = n->rb_left;
131 } else if (bytenr > start) {
132 if (contains && bytenr <= end) {
133 ret = cache;
134 break;
135 }
136 n = n->rb_right;
137 } else {
138 ret = cache;
139 break;
140 }
141 }
142 spin_unlock(&info->block_group_cache_lock);
143
144 return ret;
145}
146
147/*
148 * this is only called by cache_block_group, since we could have freed extents
149 * we need to check the pinned_extents for any extents that can't be used yet
150 * since their free space will be released as soon as the transaction commits.
151 */
152static int add_new_free_space(struct btrfs_block_group_cache *block_group,
153 struct btrfs_fs_info *info, u64 start, u64 end)
154{
155 u64 extent_start, extent_end, size;
156 int ret;
157
158 mutex_lock(&info->pinned_mutex);
159 while (start < end) {
160 ret = find_first_extent_bit(&info->pinned_extents, start,
161 &extent_start, &extent_end,
162 EXTENT_DIRTY);
163 if (ret)
164 break;
165
166 if (extent_start == start) {
167 start = extent_end + 1;
168 } else if (extent_start > start && extent_start < end) {
169 size = extent_start - start;
170 ret = btrfs_add_free_space_lock(block_group, start,
171 size);
172 BUG_ON(ret);
173 start = extent_end + 1;
174 } else {
175 break;
176 }
177 }
178
179 if (start < end) {
180 size = end - start;
181 ret = btrfs_add_free_space_lock(block_group, start, size);
182 BUG_ON(ret);
183 }
184 mutex_unlock(&info->pinned_mutex);
185
186 return 0;
187}
188
189static int cache_block_group(struct btrfs_root *root,
190 struct btrfs_block_group_cache *block_group)
191{
192 struct btrfs_path *path;
193 int ret = 0;
194 struct btrfs_key key;
195 struct extent_buffer *leaf;
196 int slot;
197 u64 last = 0;
198 u64 first_free;
199 int found = 0;
200
201 if (!block_group)
202 return 0;
203
204 root = root->fs_info->extent_root;
205
206 if (block_group->cached)
207 return 0;
208
209 path = btrfs_alloc_path();
210 if (!path)
211 return -ENOMEM;
212
213 path->reada = 2;
214 /*
215 * we get into deadlocks with paths held by callers of this function.
216 * since the alloc_mutex is protecting things right now, just
217 * skip the locking here
218 */
219 path->skip_locking = 1;
220 first_free = max_t(u64, block_group->key.objectid,
221 BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
222 key.objectid = block_group->key.objectid;
223 key.offset = 0;
224 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
225 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
226 if (ret < 0)
227 goto err;
228 ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
229 if (ret < 0)
230 goto err;
231 if (ret == 0) {
232 leaf = path->nodes[0];
233 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
234 if (key.objectid + key.offset > first_free)
235 first_free = key.objectid + key.offset;
236 }
237 while(1) {
238 leaf = path->nodes[0];
239 slot = path->slots[0];
240 if (slot >= btrfs_header_nritems(leaf)) {
241 ret = btrfs_next_leaf(root, path);
242 if (ret < 0)
243 goto err;
244 if (ret == 0)
245 continue;
246 else
247 break;
248 }
249 btrfs_item_key_to_cpu(leaf, &key, slot);
250 if (key.objectid < block_group->key.objectid)
251 goto next;
252
253 if (key.objectid >= block_group->key.objectid +
254 block_group->key.offset)
255 break;
256
257 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
258 if (!found) {
259 last = first_free;
260 found = 1;
261 }
262
263 add_new_free_space(block_group, root->fs_info, last,
264 key.objectid);
265
266 last = key.objectid + key.offset;
267 }
268next:
269 path->slots[0]++;
270 }
271
272 if (!found)
273 last = first_free;
274
275 add_new_free_space(block_group, root->fs_info, last,
276 block_group->key.objectid +
277 block_group->key.offset);
278
279 block_group->cached = 1;
280 ret = 0;
281err:
282 btrfs_free_path(path);
283 return ret;
284}
285
286/*
287 * return the block group that starts at or after bytenr
288 */
289struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
290 btrfs_fs_info *info,
291 u64 bytenr)
292{
293 struct btrfs_block_group_cache *cache;
294
295 cache = block_group_cache_tree_search(info, bytenr, 0);
296
297 return cache;
298}
299
300/*
301 * return the block group that contains teh given bytenr
302 */
303struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
304 btrfs_fs_info *info,
305 u64 bytenr)
306{
307 struct btrfs_block_group_cache *cache;
308
309 cache = block_group_cache_tree_search(info, bytenr, 1);
310
311 return cache;
312}
313
314static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
315 u64 flags)
316{
317 struct list_head *head = &info->space_info;
318 struct list_head *cur;
319 struct btrfs_space_info *found;
320 list_for_each(cur, head) {
321 found = list_entry(cur, struct btrfs_space_info, list);
322 if (found->flags == flags)
323 return found;
324 }
325 return NULL;
326}
327
328static u64 div_factor(u64 num, int factor)
329{
330 if (factor == 10)
331 return num;
332 num *= factor;
333 do_div(num, 10);
334 return num;
335}
336
337static struct btrfs_block_group_cache *
338__btrfs_find_block_group(struct btrfs_root *root,
339 struct btrfs_block_group_cache *hint,
340 u64 search_start, int data, int owner)
341{
342 struct btrfs_block_group_cache *cache;
343 struct btrfs_block_group_cache *found_group = NULL;
344 struct btrfs_fs_info *info = root->fs_info;
345 u64 used;
346 u64 last = 0;
347 u64 free_check;
348 int full_search = 0;
349 int factor = 10;
350 int wrapped = 0;
351
352 if (data & BTRFS_BLOCK_GROUP_METADATA)
353 factor = 9;
354
355 if (search_start) {
356 struct btrfs_block_group_cache *shint;
357 shint = btrfs_lookup_first_block_group(info, search_start);
358 if (shint && block_group_bits(shint, data)) {
359 spin_lock(&shint->lock);
360 used = btrfs_block_group_used(&shint->item);
361 if (used + shint->pinned + shint->reserved <
362 div_factor(shint->key.offset, factor)) {
363 spin_unlock(&shint->lock);
364 return shint;
365 }
366 spin_unlock(&shint->lock);
367 }
368 }
369 if (hint && block_group_bits(hint, data)) {
370 spin_lock(&hint->lock);
371 used = btrfs_block_group_used(&hint->item);
372 if (used + hint->pinned + hint->reserved <
373 div_factor(hint->key.offset, factor)) {
374 spin_unlock(&hint->lock);
375 return hint;
376 }
377 spin_unlock(&hint->lock);
378 last = hint->key.objectid + hint->key.offset;
379 } else {
380 if (hint)
381 last = max(hint->key.objectid, search_start);
382 else
383 last = search_start;
384 }
385again:
386 while (1) {
387 cache = btrfs_lookup_first_block_group(root->fs_info, last);
388 if (!cache)
389 break;
390
391 spin_lock(&cache->lock);
392 last = cache->key.objectid + cache->key.offset;
393 used = btrfs_block_group_used(&cache->item);
394
395 if (block_group_bits(cache, data)) {
396 free_check = div_factor(cache->key.offset, factor);
397 if (used + cache->pinned + cache->reserved <
398 free_check) {
399 found_group = cache;
400 spin_unlock(&cache->lock);
401 goto found;
402 }
403 }
404 spin_unlock(&cache->lock);
405 cond_resched();
406 }
407 if (!wrapped) {
408 last = search_start;
409 wrapped = 1;
410 goto again;
411 }
412 if (!full_search && factor < 10) {
413 last = search_start;
414 full_search = 1;
415 factor = 10;
416 goto again;
417 }
418found:
419 return found_group;
420}
421
422struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
423 struct btrfs_block_group_cache
424 *hint, u64 search_start,
425 int data, int owner)
426{
427
428 struct btrfs_block_group_cache *ret;
429 ret = __btrfs_find_block_group(root, hint, search_start, data, owner);
430 return ret;
431}
432
433/* simple helper to search for an existing extent at a given offset */
434int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
435{
436 int ret;
437 struct btrfs_key key;
438 struct btrfs_path *path;
439
440 path = btrfs_alloc_path();
441 BUG_ON(!path);
442 key.objectid = start;
443 key.offset = len;
444 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
445 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
446 0, 0);
447 btrfs_free_path(path);
448 return ret;
449}
450
451/*
452 * Back reference rules. Back refs have three main goals:
453 *
454 * 1) differentiate between all holders of references to an extent so that
455 * when a reference is dropped we can make sure it was a valid reference
456 * before freeing the extent.
457 *
458 * 2) Provide enough information to quickly find the holders of an extent
459 * if we notice a given block is corrupted or bad.
460 *
461 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
462 * maintenance. This is actually the same as #2, but with a slightly
463 * different use case.
464 *
465 * File extents can be referenced by:
466 *
467 * - multiple snapshots, subvolumes, or different generations in one subvol
468 * - different files inside a single subvolume
469 * - different offsets inside a file (bookend extents in file.c)
470 *
471 * The extent ref structure has fields for:
472 *
473 * - Objectid of the subvolume root
474 * - Generation number of the tree holding the reference
475 * - objectid of the file holding the reference
476 * - number of references holding by parent node (alway 1 for tree blocks)
477 *
478 * Btree leaf may hold multiple references to a file extent. In most cases,
479 * these references are from same file and the corresponding offsets inside
480 * the file are close together.
481 *
482 * When a file extent is allocated the fields are filled in:
483 * (root_key.objectid, trans->transid, inode objectid, 1)
484 *
485 * When a leaf is cow'd new references are added for every file extent found
486 * in the leaf. It looks similar to the create case, but trans->transid will
487 * be different when the block is cow'd.
488 *
489 * (root_key.objectid, trans->transid, inode objectid,
490 * number of references in the leaf)
491 *
492 * When a file extent is removed either during snapshot deletion or
493 * file truncation, we find the corresponding back reference and check
494 * the following fields:
495 *
496 * (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
497 * inode objectid)
498 *
499 * Btree extents can be referenced by:
500 *
501 * - Different subvolumes
502 * - Different generations of the same subvolume
503 *
504 * When a tree block is created, back references are inserted:
505 *
506 * (root->root_key.objectid, trans->transid, level, 1)
507 *
508 * When a tree block is cow'd, new back references are added for all the
509 * blocks it points to. If the tree block isn't in reference counted root,
510 * the old back references are removed. These new back references are of
511 * the form (trans->transid will have increased since creation):
512 *
513 * (root->root_key.objectid, trans->transid, level, 1)
514 *
515 * When a backref is in deleting, the following fields are checked:
516 *
517 * if backref was for a tree root:
518 * (btrfs_header_owner(itself), btrfs_header_generation(itself), level)
519 * else
520 * (btrfs_header_owner(parent), btrfs_header_generation(parent), level)
521 *
522 * Back Reference Key composing:
523 *
524 * The key objectid corresponds to the first byte in the extent, the key
525 * type is set to BTRFS_EXTENT_REF_KEY, and the key offset is the first
526 * byte of parent extent. If a extent is tree root, the key offset is set
527 * to the key objectid.
528 */
529
530static int noinline lookup_extent_backref(struct btrfs_trans_handle *trans,
531 struct btrfs_root *root,
532 struct btrfs_path *path,
533 u64 bytenr, u64 parent,
534 u64 ref_root, u64 ref_generation,
535 u64 owner_objectid, int del)
536{
537 struct btrfs_key key;
538 struct btrfs_extent_ref *ref;
539 struct extent_buffer *leaf;
540 u64 ref_objectid;
541 int ret;
542
543 key.objectid = bytenr;
544 key.type = BTRFS_EXTENT_REF_KEY;
545 key.offset = parent;
546
547 ret = btrfs_search_slot(trans, root, &key, path, del ? -1 : 0, 1);
548 if (ret < 0)
549 goto out;
550 if (ret > 0) {
551 ret = -ENOENT;
552 goto out;
553 }
554
555 leaf = path->nodes[0];
556 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
557 ref_objectid = btrfs_ref_objectid(leaf, ref);
558 if (btrfs_ref_root(leaf, ref) != ref_root ||
559 btrfs_ref_generation(leaf, ref) != ref_generation ||
560 (ref_objectid != owner_objectid &&
561 ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) {
562 ret = -EIO;
563 WARN_ON(1);
564 goto out;
565 }
566 ret = 0;
567out:
568 return ret;
569}
570
571/*
572 * updates all the backrefs that are pending on update_list for the
573 * extent_root
574 */
575static int noinline update_backrefs(struct btrfs_trans_handle *trans,
576 struct btrfs_root *extent_root,
577 struct btrfs_path *path,
578 struct list_head *update_list)
579{
580 struct btrfs_key key;
581 struct btrfs_extent_ref *ref;
582 struct btrfs_fs_info *info = extent_root->fs_info;
583 struct pending_extent_op *op;
584 struct extent_buffer *leaf;
585 int ret = 0;
586 struct list_head *cur = update_list->next;
587 u64 ref_objectid;
588 u64 ref_root = extent_root->root_key.objectid;
589
590 op = list_entry(cur, struct pending_extent_op, list);
591
592search:
593 key.objectid = op->bytenr;
594 key.type = BTRFS_EXTENT_REF_KEY;
595 key.offset = op->orig_parent;
596
597 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 1);
598 BUG_ON(ret);
599
600 leaf = path->nodes[0];
601
602loop:
603 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
604
605 ref_objectid = btrfs_ref_objectid(leaf, ref);
606
607 if (btrfs_ref_root(leaf, ref) != ref_root ||
608 btrfs_ref_generation(leaf, ref) != op->orig_generation ||
609 (ref_objectid != op->level &&
610 ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) {
611 printk(KERN_ERR "couldn't find %Lu, parent %Lu, root %Lu, "
612 "owner %u\n", op->bytenr, op->orig_parent,
613 ref_root, op->level);
614 btrfs_print_leaf(extent_root, leaf);
615 BUG();
616 }
617
618 key.objectid = op->bytenr;
619 key.offset = op->parent;
620 key.type = BTRFS_EXTENT_REF_KEY;
621 ret = btrfs_set_item_key_safe(trans, extent_root, path, &key);
622 BUG_ON(ret);
623 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
624 btrfs_set_ref_generation(leaf, ref, op->generation);
625
626 cur = cur->next;
627
628 list_del_init(&op->list);
629 unlock_extent(&info->extent_ins, op->bytenr,
630 op->bytenr + op->num_bytes - 1, GFP_NOFS);
631 kfree(op);
632
633 if (cur == update_list) {
634 btrfs_mark_buffer_dirty(path->nodes[0]);
635 btrfs_release_path(extent_root, path);
636 goto out;
637 }
638
639 op = list_entry(cur, struct pending_extent_op, list);
640
641 path->slots[0]++;
642 while (path->slots[0] < btrfs_header_nritems(leaf)) {
643 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
644 if (key.objectid == op->bytenr &&
645 key.type == BTRFS_EXTENT_REF_KEY)
646 goto loop;
647 path->slots[0]++;
648 }
649
650 btrfs_mark_buffer_dirty(path->nodes[0]);
651 btrfs_release_path(extent_root, path);
652 goto search;
653
654out:
655 return 0;
656}
657
658static int noinline insert_extents(struct btrfs_trans_handle *trans,
659 struct btrfs_root *extent_root,
660 struct btrfs_path *path,
661 struct list_head *insert_list, int nr)
662{
663 struct btrfs_key *keys;
664 u32 *data_size;
665 struct pending_extent_op *op;
666 struct extent_buffer *leaf;
667 struct list_head *cur = insert_list->next;
668 struct btrfs_fs_info *info = extent_root->fs_info;
669 u64 ref_root = extent_root->root_key.objectid;
670 int i = 0, last = 0, ret;
671 int total = nr * 2;
672
673 if (!nr)
674 return 0;
675
676 keys = kzalloc(total * sizeof(struct btrfs_key), GFP_NOFS);
677 if (!keys)
678 return -ENOMEM;
679
680 data_size = kzalloc(total * sizeof(u32), GFP_NOFS);
681 if (!data_size) {
682 kfree(keys);
683 return -ENOMEM;
684 }
685
686 list_for_each_entry(op, insert_list, list) {
687 keys[i].objectid = op->bytenr;
688 keys[i].offset = op->num_bytes;
689 keys[i].type = BTRFS_EXTENT_ITEM_KEY;
690 data_size[i] = sizeof(struct btrfs_extent_item);
691 i++;
692
693 keys[i].objectid = op->bytenr;
694 keys[i].offset = op->parent;
695 keys[i].type = BTRFS_EXTENT_REF_KEY;
696 data_size[i] = sizeof(struct btrfs_extent_ref);
697 i++;
698 }
699
700 op = list_entry(cur, struct pending_extent_op, list);
701 i = 0;
702 while (i < total) {
703 int c;
704 ret = btrfs_insert_some_items(trans, extent_root, path,
705 keys+i, data_size+i, total-i);
706 BUG_ON(ret < 0);
707
708 if (last && ret > 1)
709 BUG();
710
711 leaf = path->nodes[0];
712 for (c = 0; c < ret; c++) {
713 int ref_first = keys[i].type == BTRFS_EXTENT_REF_KEY;
714
715 /*
716 * if the first item we inserted was a backref, then
717 * the EXTENT_ITEM will be the odd c's, else it will
718 * be the even c's
719 */
720 if ((ref_first && (c % 2)) ||
721 (!ref_first && !(c % 2))) {
722 struct btrfs_extent_item *itm;
723
724 itm = btrfs_item_ptr(leaf, path->slots[0] + c,
725 struct btrfs_extent_item);
726 btrfs_set_extent_refs(path->nodes[0], itm, 1);
727 op->del++;
728 } else {
729 struct btrfs_extent_ref *ref;
730
731 ref = btrfs_item_ptr(leaf, path->slots[0] + c,
732 struct btrfs_extent_ref);
733 btrfs_set_ref_root(leaf, ref, ref_root);
734 btrfs_set_ref_generation(leaf, ref,
735 op->generation);
736 btrfs_set_ref_objectid(leaf, ref, op->level);
737 btrfs_set_ref_num_refs(leaf, ref, 1);
738 op->del++;
739 }
740
741 /*
742 * using del to see when its ok to free up the
743 * pending_extent_op. In the case where we insert the
744 * last item on the list in order to help do batching
745 * we need to not free the extent op until we actually
746 * insert the extent_item
747 */
748 if (op->del == 2) {
749 unlock_extent(&info->extent_ins, op->bytenr,
750 op->bytenr + op->num_bytes - 1,
751 GFP_NOFS);
752 cur = cur->next;
753 list_del_init(&op->list);
754 kfree(op);
755 if (cur != insert_list)
756 op = list_entry(cur,
757 struct pending_extent_op,
758 list);
759 }
760 }
761 btrfs_mark_buffer_dirty(leaf);
762 btrfs_release_path(extent_root, path);
763
764 /*
765 * Ok backref's and items usually go right next to eachother,
766 * but if we could only insert 1 item that means that we
767 * inserted on the end of a leaf, and we have no idea what may
768 * be on the next leaf so we just play it safe. In order to
769 * try and help this case we insert the last thing on our
770 * insert list so hopefully it will end up being the last
771 * thing on the leaf and everything else will be before it,
772 * which will let us insert a whole bunch of items at the same
773 * time.
774 */
775 if (ret == 1 && !last && (i + ret < total)) {
776 /*
777 * last: where we will pick up the next time around
778 * i: our current key to insert, will be total - 1
779 * cur: the current op we are screwing with
780 * op: duh
781 */
782 last = i + ret;
783 i = total - 1;
784 cur = insert_list->prev;
785 op = list_entry(cur, struct pending_extent_op, list);
786 } else if (last) {
787 /*
788 * ok we successfully inserted the last item on the
789 * list, lets reset everything
790 *
791 * i: our current key to insert, so where we left off
792 * last time
793 * last: done with this
794 * cur: the op we are messing with
795 * op: duh
796 * total: since we inserted the last key, we need to
797 * decrement total so we dont overflow
798 */
799 i = last;
800 last = 0;
801 cur = insert_list->next;
802 op = list_entry(cur, struct pending_extent_op, list);
803 total--;
804 } else {
805 i += ret;
806 }
807
808 cond_resched();
809 }
810 ret = 0;
811 kfree(keys);
812 kfree(data_size);
813 return ret;
814}
815
816static int noinline insert_extent_backref(struct btrfs_trans_handle *trans,
817 struct btrfs_root *root,
818 struct btrfs_path *path,
819 u64 bytenr, u64 parent,
820 u64 ref_root, u64 ref_generation,
821 u64 owner_objectid)
822{
823 struct btrfs_key key;
824 struct extent_buffer *leaf;
825 struct btrfs_extent_ref *ref;
826 u32 num_refs;
827 int ret;
828
829 key.objectid = bytenr;
830 key.type = BTRFS_EXTENT_REF_KEY;
831 key.offset = parent;
832
833 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*ref));
834 if (ret == 0) {
835 leaf = path->nodes[0];
836 ref = btrfs_item_ptr(leaf, path->slots[0],
837 struct btrfs_extent_ref);
838 btrfs_set_ref_root(leaf, ref, ref_root);
839 btrfs_set_ref_generation(leaf, ref, ref_generation);
840 btrfs_set_ref_objectid(leaf, ref, owner_objectid);
841 btrfs_set_ref_num_refs(leaf, ref, 1);
842 } else if (ret == -EEXIST) {
843 u64 existing_owner;
844 BUG_ON(owner_objectid < BTRFS_FIRST_FREE_OBJECTID);
845 leaf = path->nodes[0];
846 ref = btrfs_item_ptr(leaf, path->slots[0],
847 struct btrfs_extent_ref);
848 if (btrfs_ref_root(leaf, ref) != ref_root ||
849 btrfs_ref_generation(leaf, ref) != ref_generation) {
850 ret = -EIO;
851 WARN_ON(1);
852 goto out;
853 }
854
855 num_refs = btrfs_ref_num_refs(leaf, ref);
856 BUG_ON(num_refs == 0);
857 btrfs_set_ref_num_refs(leaf, ref, num_refs + 1);
858
859 existing_owner = btrfs_ref_objectid(leaf, ref);
860 if (existing_owner != owner_objectid &&
861 existing_owner != BTRFS_MULTIPLE_OBJECTIDS) {
862 btrfs_set_ref_objectid(leaf, ref,
863 BTRFS_MULTIPLE_OBJECTIDS);
864 }
865 ret = 0;
866 } else {
867 goto out;
868 }
869 btrfs_mark_buffer_dirty(path->nodes[0]);
870out:
871 btrfs_release_path(root, path);
872 return ret;
873}
874
875static int noinline remove_extent_backref(struct btrfs_trans_handle *trans,
876 struct btrfs_root *root,
877 struct btrfs_path *path)
878{
879 struct extent_buffer *leaf;
880 struct btrfs_extent_ref *ref;
881 u32 num_refs;
882 int ret = 0;
883
884 leaf = path->nodes[0];
885 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
886 num_refs = btrfs_ref_num_refs(leaf, ref);
887 BUG_ON(num_refs == 0);
888 num_refs -= 1;
889 if (num_refs == 0) {
890 ret = btrfs_del_item(trans, root, path);
891 } else {
892 btrfs_set_ref_num_refs(leaf, ref, num_refs);
893 btrfs_mark_buffer_dirty(leaf);
894 }
895 btrfs_release_path(root, path);
896 return ret;
897}
898
899static int noinline free_extents(struct btrfs_trans_handle *trans,
900 struct btrfs_root *extent_root,
901 struct list_head *del_list)
902{
903 struct btrfs_fs_info *info = extent_root->fs_info;
904 struct btrfs_path *path;
905 struct btrfs_key key, found_key;
906 struct extent_buffer *leaf;
907 struct list_head *cur;
908 struct pending_extent_op *op;
909 struct btrfs_extent_item *ei;
910 int ret, num_to_del, extent_slot = 0, found_extent = 0;
911 u32 refs;
912 u64 bytes_freed = 0;
913
914 path = btrfs_alloc_path();
915 if (!path)
916 return -ENOMEM;
917 path->reada = 1;
918
919search:
920 /* search for the backref for the current ref we want to delete */
921 cur = del_list->next;
922 op = list_entry(cur, struct pending_extent_op, list);
923 ret = lookup_extent_backref(trans, extent_root, path, op->bytenr,
924 op->orig_parent,
925 extent_root->root_key.objectid,
926 op->orig_generation, op->level, 1);
927 if (ret) {
928 printk("Unable to find backref byte nr %Lu root %Lu gen %Lu "
929 "owner %u\n", op->bytenr,
930 extent_root->root_key.objectid, op->orig_generation,
931 op->level);
932 btrfs_print_leaf(extent_root, path->nodes[0]);
933 WARN_ON(1);
934 goto out;
935 }
936
937 extent_slot = path->slots[0];
938 num_to_del = 1;
939 found_extent = 0;
940
941 /*
942 * if we aren't the first item on the leaf we can move back one and see
943 * if our ref is right next to our extent item
944 */
945 if (likely(extent_slot)) {
946 extent_slot--;
947 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
948 extent_slot);
949 if (found_key.objectid == op->bytenr &&
950 found_key.type == BTRFS_EXTENT_ITEM_KEY &&
951 found_key.offset == op->num_bytes) {
952 num_to_del++;
953 found_extent = 1;
954 }
955 }
956
957 /*
958 * if we didn't find the extent we need to delete the backref and then
959 * search for the extent item key so we can update its ref count
960 */
961 if (!found_extent) {
962 key.objectid = op->bytenr;
963 key.type = BTRFS_EXTENT_ITEM_KEY;
964 key.offset = op->num_bytes;
965
966 ret = remove_extent_backref(trans, extent_root, path);
967 BUG_ON(ret);
968 btrfs_release_path(extent_root, path);
969 ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
970 BUG_ON(ret);
971 extent_slot = path->slots[0];
972 }
973
974 /* this is where we update the ref count for the extent */
975 leaf = path->nodes[0];
976 ei = btrfs_item_ptr(leaf, extent_slot, struct btrfs_extent_item);
977 refs = btrfs_extent_refs(leaf, ei);
978 BUG_ON(refs == 0);
979 refs--;
980 btrfs_set_extent_refs(leaf, ei, refs);
981
982 btrfs_mark_buffer_dirty(leaf);
983
984 /*
985 * This extent needs deleting. The reason cur_slot is extent_slot +
986 * num_to_del is because extent_slot points to the slot where the extent
987 * is, and if the backref was not right next to the extent we will be
988 * deleting at least 1 item, and will want to start searching at the
989 * slot directly next to extent_slot. However if we did find the
990 * backref next to the extent item them we will be deleting at least 2
991 * items and will want to start searching directly after the ref slot
992 */
993 if (!refs) {
994 struct list_head *pos, *n, *end;
995 int cur_slot = extent_slot+num_to_del;
996 u64 super_used;
997 u64 root_used;
998
999 path->slots[0] = extent_slot;
1000 bytes_freed = op->num_bytes;
1001
1002 mutex_lock(&info->pinned_mutex);
1003 ret = pin_down_bytes(trans, extent_root, op->bytenr,
1004 op->num_bytes, op->level >=
1005 BTRFS_FIRST_FREE_OBJECTID);
1006 mutex_unlock(&info->pinned_mutex);
1007 BUG_ON(ret < 0);
1008 op->del = ret;
1009
1010 /*
1011 * we need to see if we can delete multiple things at once, so
1012 * start looping through the list of extents we are wanting to
1013 * delete and see if their extent/backref's are right next to
1014 * eachother and the extents only have 1 ref
1015 */
1016 for (pos = cur->next; pos != del_list; pos = pos->next) {
1017 struct pending_extent_op *tmp;
1018
1019 tmp = list_entry(pos, struct pending_extent_op, list);
1020
1021 /* we only want to delete extent+ref at this stage */
1022 if (cur_slot >= btrfs_header_nritems(leaf) - 1)
1023 break;
1024
1025 btrfs_item_key_to_cpu(leaf, &found_key, cur_slot);
1026 if (found_key.objectid != tmp->bytenr ||
1027 found_key.type != BTRFS_EXTENT_ITEM_KEY ||
1028 found_key.offset != tmp->num_bytes)
1029 break;
1030
1031 /* check to make sure this extent only has one ref */
1032 ei = btrfs_item_ptr(leaf, cur_slot,
1033 struct btrfs_extent_item);
1034 if (btrfs_extent_refs(leaf, ei) != 1)
1035 break;
1036
1037 btrfs_item_key_to_cpu(leaf, &found_key, cur_slot+1);
1038 if (found_key.objectid != tmp->bytenr ||
1039 found_key.type != BTRFS_EXTENT_REF_KEY ||
1040 found_key.offset != tmp->orig_parent)
1041 break;
1042
1043 /*
1044 * the ref is right next to the extent, we can set the
1045 * ref count to 0 since we will delete them both now
1046 */
1047 btrfs_set_extent_refs(leaf, ei, 0);
1048
1049 /* pin down the bytes for this extent */
1050 mutex_lock(&info->pinned_mutex);
1051 ret = pin_down_bytes(trans, extent_root, tmp->bytenr,
1052 tmp->num_bytes, tmp->level >=
1053 BTRFS_FIRST_FREE_OBJECTID);
1054 mutex_unlock(&info->pinned_mutex);
1055 BUG_ON(ret < 0);
1056
1057 /*
1058 * use the del field to tell if we need to go ahead and
1059 * free up the extent when we delete the item or not.
1060 */
1061 tmp->del = ret;
1062 bytes_freed += tmp->num_bytes;
1063
1064 num_to_del += 2;
1065 cur_slot += 2;
1066 }
1067 end = pos;
1068
1069 /* update the free space counters */
1070 spin_lock_irq(&info->delalloc_lock);
1071 super_used = btrfs_super_bytes_used(&info->super_copy);
1072 btrfs_set_super_bytes_used(&info->super_copy,
1073 super_used - bytes_freed);
1074 spin_unlock_irq(&info->delalloc_lock);
1075
1076 root_used = btrfs_root_used(&extent_root->root_item);
1077 btrfs_set_root_used(&extent_root->root_item,
1078 root_used - bytes_freed);
1079
1080 /* delete the items */
1081 ret = btrfs_del_items(trans, extent_root, path,
1082 path->slots[0], num_to_del);
1083 BUG_ON(ret);
1084
1085 /*
1086 * loop through the extents we deleted and do the cleanup work
1087 * on them
1088 */
1089 for (pos = cur, n = pos->next; pos != end;
1090 pos = n, n = pos->next) {
1091 struct pending_extent_op *tmp;
1092#ifdef BIO_RW_DISCARD
1093 u64 map_length;
1094 struct btrfs_multi_bio *multi = NULL;
1095#endif
1096 tmp = list_entry(pos, struct pending_extent_op, list);
1097
1098 /*
1099 * remember tmp->del tells us wether or not we pinned
1100 * down the extent
1101 */
1102 ret = update_block_group(trans, extent_root,
1103 tmp->bytenr, tmp->num_bytes, 0,
1104 tmp->del);
1105 BUG_ON(ret);
1106
1107#ifdef BIO_RW_DISCARD
1108 ret = btrfs_map_block(&info->mapping_tree, READ,
1109 tmp->bytenr, &map_length, &multi,
1110 0);
1111 if (!ret) {
1112 struct btrfs_bio_stripe *stripe;
1113 int i;
1114
1115 stripe = multi->stripe;
1116
1117 if (map_length > tmp->num_bytes)
1118 map_length = tmp->num_bytes;
1119
1120 for (i = 0; i < multi->num_stripes;
1121 i++, stripe++)
1122 blkdev_issue_discard(stripe->dev->bdev,
1123 stripe->physical >> 9,
1124 map_length >> 9);
1125 kfree(multi);
1126 }
1127#endif
1128 list_del_init(&tmp->list);
1129 unlock_extent(&info->extent_ins, tmp->bytenr,
1130 tmp->bytenr + tmp->num_bytes - 1,
1131 GFP_NOFS);
1132 kfree(tmp);
1133 }
1134 } else if (refs && found_extent) {
1135 /*
1136 * the ref and extent were right next to eachother, but the
1137 * extent still has a ref, so just free the backref and keep
1138 * going
1139 */
1140 ret = remove_extent_backref(trans, extent_root, path);
1141 BUG_ON(ret);
1142
1143 list_del_init(&op->list);
1144 unlock_extent(&info->extent_ins, op->bytenr,
1145 op->bytenr + op->num_bytes - 1, GFP_NOFS);
1146 kfree(op);
1147 } else {
1148 /*
1149 * the extent has multiple refs and the backref we were looking
1150 * for was not right next to it, so just unlock and go next,
1151 * we're good to go
1152 */
1153 list_del_init(&op->list);
1154 unlock_extent(&info->extent_ins, op->bytenr,
1155 op->bytenr + op->num_bytes - 1, GFP_NOFS);
1156 kfree(op);
1157 }
1158
1159 btrfs_release_path(extent_root, path);
1160 if (!list_empty(del_list))
1161 goto search;
1162
1163out:
1164 btrfs_free_path(path);
1165 return ret;
1166}
1167
1168static int __btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
1169 struct btrfs_root *root, u64 bytenr,
1170 u64 orig_parent, u64 parent,
1171 u64 orig_root, u64 ref_root,
1172 u64 orig_generation, u64 ref_generation,
1173 u64 owner_objectid)
1174{
1175 int ret;
1176 struct btrfs_root *extent_root = root->fs_info->extent_root;
1177 struct btrfs_path *path;
1178
1179 if (root == root->fs_info->extent_root) {
1180 struct pending_extent_op *extent_op;
1181 u64 num_bytes;
1182
1183 BUG_ON(owner_objectid >= BTRFS_MAX_LEVEL);
1184 num_bytes = btrfs_level_size(root, (int)owner_objectid);
1185 mutex_lock(&root->fs_info->extent_ins_mutex);
1186 if (test_range_bit(&root->fs_info->extent_ins, bytenr,
1187 bytenr + num_bytes - 1, EXTENT_WRITEBACK, 0)) {
1188 u64 priv;
1189 ret = get_state_private(&root->fs_info->extent_ins,
1190 bytenr, &priv);
1191 BUG_ON(ret);
1192 extent_op = (struct pending_extent_op *)
1193 (unsigned long)priv;
1194 BUG_ON(extent_op->parent != orig_parent);
1195 BUG_ON(extent_op->generation != orig_generation);
1196
1197 extent_op->parent = parent;
1198 extent_op->generation = ref_generation;
1199 } else {
1200 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
1201 BUG_ON(!extent_op);
1202
1203 extent_op->type = PENDING_BACKREF_UPDATE;
1204 extent_op->bytenr = bytenr;
1205 extent_op->num_bytes = num_bytes;
1206 extent_op->parent = parent;
1207 extent_op->orig_parent = orig_parent;
1208 extent_op->generation = ref_generation;
1209 extent_op->orig_generation = orig_generation;
1210 extent_op->level = (int)owner_objectid;
1211 INIT_LIST_HEAD(&extent_op->list);
1212 extent_op->del = 0;
1213
1214 set_extent_bits(&root->fs_info->extent_ins,
1215 bytenr, bytenr + num_bytes - 1,
1216 EXTENT_WRITEBACK, GFP_NOFS);
1217 set_state_private(&root->fs_info->extent_ins,
1218 bytenr, (unsigned long)extent_op);
1219 }
1220 mutex_unlock(&root->fs_info->extent_ins_mutex);
1221 return 0;
1222 }
1223
1224 path = btrfs_alloc_path();
1225 if (!path)
1226 return -ENOMEM;
1227 ret = lookup_extent_backref(trans, extent_root, path,
1228 bytenr, orig_parent, orig_root,
1229 orig_generation, owner_objectid, 1);
1230 if (ret)
1231 goto out;
1232 ret = remove_extent_backref(trans, extent_root, path);
1233 if (ret)
1234 goto out;
1235 ret = insert_extent_backref(trans, extent_root, path, bytenr,
1236 parent, ref_root, ref_generation,
1237 owner_objectid);
1238 BUG_ON(ret);
1239 finish_current_insert(trans, extent_root, 0);
1240 del_pending_extents(trans, extent_root, 0);
1241out:
1242 btrfs_free_path(path);
1243 return ret;
1244}
1245
1246int btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
1247 struct btrfs_root *root, u64 bytenr,
1248 u64 orig_parent, u64 parent,
1249 u64 ref_root, u64 ref_generation,
1250 u64 owner_objectid)
1251{
1252 int ret;
1253 if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
1254 owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
1255 return 0;
1256 ret = __btrfs_update_extent_ref(trans, root, bytenr, orig_parent,
1257 parent, ref_root, ref_root,
1258 ref_generation, ref_generation,
1259 owner_objectid);
1260 return ret;
1261}
1262
1263static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1264 struct btrfs_root *root, u64 bytenr,
1265 u64 orig_parent, u64 parent,
1266 u64 orig_root, u64 ref_root,
1267 u64 orig_generation, u64 ref_generation,
1268 u64 owner_objectid)
1269{
1270 struct btrfs_path *path;
1271 int ret;
1272 struct btrfs_key key;
1273 struct extent_buffer *l;
1274 struct btrfs_extent_item *item;
1275 u32 refs;
1276
1277 path = btrfs_alloc_path();
1278 if (!path)
1279 return -ENOMEM;
1280
1281 path->reada = 1;
1282 key.objectid = bytenr;
1283 key.type = BTRFS_EXTENT_ITEM_KEY;
1284 key.offset = (u64)-1;
1285
1286 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
1287 0, 1);
1288 if (ret < 0)
1289 return ret;
1290 BUG_ON(ret == 0 || path->slots[0] == 0);
1291
1292 path->slots[0]--;
1293 l = path->nodes[0];
1294
1295 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1296 if (key.objectid != bytenr) {
1297 btrfs_print_leaf(root->fs_info->extent_root, path->nodes[0]);
1298 printk("wanted %Lu found %Lu\n", bytenr, key.objectid);
1299 BUG();
1300 }
1301 BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY);
1302
1303 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
1304 refs = btrfs_extent_refs(l, item);
1305 btrfs_set_extent_refs(l, item, refs + 1);
1306 btrfs_mark_buffer_dirty(path->nodes[0]);
1307
1308 btrfs_release_path(root->fs_info->extent_root, path);
1309
1310 path->reada = 1;
1311 ret = insert_extent_backref(trans, root->fs_info->extent_root,
1312 path, bytenr, parent,
1313 ref_root, ref_generation,
1314 owner_objectid);
1315 BUG_ON(ret);
1316 finish_current_insert(trans, root->fs_info->extent_root, 0);
1317 del_pending_extents(trans, root->fs_info->extent_root, 0);
1318
1319 btrfs_free_path(path);
1320 return 0;
1321}
1322
1323int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1324 struct btrfs_root *root,
1325 u64 bytenr, u64 num_bytes, u64 parent,
1326 u64 ref_root, u64 ref_generation,
1327 u64 owner_objectid)
1328{
1329 int ret;
1330 if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
1331 owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
1332 return 0;
1333 ret = __btrfs_inc_extent_ref(trans, root, bytenr, 0, parent,
1334 0, ref_root, 0, ref_generation,
1335 owner_objectid);
1336 return ret;
1337}
1338
1339int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
1340 struct btrfs_root *root)
1341{
1342 finish_current_insert(trans, root->fs_info->extent_root, 1);
1343 del_pending_extents(trans, root->fs_info->extent_root, 1);
1344 return 0;
1345}
1346
1347int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans,
1348 struct btrfs_root *root, u64 bytenr,
1349 u64 num_bytes, u32 *refs)
1350{
1351 struct btrfs_path *path;
1352 int ret;
1353 struct btrfs_key key;
1354 struct extent_buffer *l;
1355 struct btrfs_extent_item *item;
1356
1357 WARN_ON(num_bytes < root->sectorsize);
1358 path = btrfs_alloc_path();
1359 path->reada = 1;
1360 key.objectid = bytenr;
1361 key.offset = num_bytes;
1362 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
1363 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
1364 0, 0);
1365 if (ret < 0)
1366 goto out;
1367 if (ret != 0) {
1368 btrfs_print_leaf(root, path->nodes[0]);
1369 printk("failed to find block number %Lu\n", bytenr);
1370 BUG();
1371 }
1372 l = path->nodes[0];
1373 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
1374 *refs = btrfs_extent_refs(l, item);
1375out:
1376 btrfs_free_path(path);
1377 return 0;
1378}
1379
1380int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
1381 struct btrfs_root *root, u64 bytenr)
1382{
1383 struct btrfs_root *extent_root = root->fs_info->extent_root;
1384 struct btrfs_path *path;
1385 struct extent_buffer *leaf;
1386 struct btrfs_extent_ref *ref_item;
1387 struct btrfs_key key;
1388 struct btrfs_key found_key;
1389 u64 ref_root;
1390 u64 last_snapshot;
1391 u32 nritems;
1392 int ret;
1393
1394 key.objectid = bytenr;
1395 key.offset = (u64)-1;
1396 key.type = BTRFS_EXTENT_ITEM_KEY;
1397
1398 path = btrfs_alloc_path();
1399 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1400 if (ret < 0)
1401 goto out;
1402 BUG_ON(ret == 0);
1403
1404 ret = -ENOENT;
1405 if (path->slots[0] == 0)
1406 goto out;
1407
1408 path->slots[0]--;
1409 leaf = path->nodes[0];
1410 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1411
1412 if (found_key.objectid != bytenr ||
1413 found_key.type != BTRFS_EXTENT_ITEM_KEY)
1414 goto out;
1415
1416 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1417 while (1) {
1418 leaf = path->nodes[0];
1419 nritems = btrfs_header_nritems(leaf);
1420 if (path->slots[0] >= nritems) {
1421 ret = btrfs_next_leaf(extent_root, path);
1422 if (ret < 0)
1423 goto out;
1424 if (ret == 0)
1425 continue;
1426 break;
1427 }
1428 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1429 if (found_key.objectid != bytenr)
1430 break;
1431
1432 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
1433 path->slots[0]++;
1434 continue;
1435 }
1436
1437 ref_item = btrfs_item_ptr(leaf, path->slots[0],
1438 struct btrfs_extent_ref);
1439 ref_root = btrfs_ref_root(leaf, ref_item);
1440 if (ref_root != root->root_key.objectid &&
1441 ref_root != BTRFS_TREE_LOG_OBJECTID) {
1442 ret = 1;
1443 goto out;
1444 }
1445 if (btrfs_ref_generation(leaf, ref_item) <= last_snapshot) {
1446 ret = 1;
1447 goto out;
1448 }
1449
1450 path->slots[0]++;
1451 }
1452 ret = 0;
1453out:
1454 btrfs_free_path(path);
1455 return ret;
1456}
1457
1458int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1459 struct extent_buffer *buf, u32 nr_extents)
1460{
1461 struct btrfs_key key;
1462 struct btrfs_file_extent_item *fi;
1463 u64 root_gen;
1464 u32 nritems;
1465 int i;
1466 int level;
1467 int ret = 0;
1468 int shared = 0;
1469
1470 if (!root->ref_cows)
1471 return 0;
1472
1473 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1474 shared = 0;
1475 root_gen = root->root_key.offset;
1476 } else {
1477 shared = 1;
1478 root_gen = trans->transid - 1;
1479 }
1480
1481 level = btrfs_header_level(buf);
1482 nritems = btrfs_header_nritems(buf);
1483
1484 if (level == 0) {
1485 struct btrfs_leaf_ref *ref;
1486 struct btrfs_extent_info *info;
1487
1488 ref = btrfs_alloc_leaf_ref(root, nr_extents);
1489 if (!ref) {
1490 ret = -ENOMEM;
1491 goto out;
1492 }
1493
1494 ref->root_gen = root_gen;
1495 ref->bytenr = buf->start;
1496 ref->owner = btrfs_header_owner(buf);
1497 ref->generation = btrfs_header_generation(buf);
1498 ref->nritems = nr_extents;
1499 info = ref->extents;
1500
1501 for (i = 0; nr_extents > 0 && i < nritems; i++) {
1502 u64 disk_bytenr;
1503 btrfs_item_key_to_cpu(buf, &key, i);
1504 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1505 continue;
1506 fi = btrfs_item_ptr(buf, i,
1507 struct btrfs_file_extent_item);
1508 if (btrfs_file_extent_type(buf, fi) ==
1509 BTRFS_FILE_EXTENT_INLINE)
1510 continue;
1511 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1512 if (disk_bytenr == 0)
1513 continue;
1514
1515 info->bytenr = disk_bytenr;
1516 info->num_bytes =
1517 btrfs_file_extent_disk_num_bytes(buf, fi);
1518 info->objectid = key.objectid;
1519 info->offset = key.offset;
1520 info++;
1521 }
1522
1523 ret = btrfs_add_leaf_ref(root, ref, shared);
1524 if (ret == -EEXIST && shared) {
1525 struct btrfs_leaf_ref *old;
1526 old = btrfs_lookup_leaf_ref(root, ref->bytenr);
1527 BUG_ON(!old);
1528 btrfs_remove_leaf_ref(root, old);
1529 btrfs_free_leaf_ref(root, old);
1530 ret = btrfs_add_leaf_ref(root, ref, shared);
1531 }
1532 WARN_ON(ret);
1533 btrfs_free_leaf_ref(root, ref);
1534 }
1535out:
1536 return ret;
1537}
1538
1539int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1540 struct extent_buffer *orig_buf, struct extent_buffer *buf,
1541 u32 *nr_extents)
1542{
1543 u64 bytenr;
1544 u64 ref_root;
1545 u64 orig_root;
1546 u64 ref_generation;
1547 u64 orig_generation;
1548 u32 nritems;
1549 u32 nr_file_extents = 0;
1550 struct btrfs_key key;
1551 struct btrfs_file_extent_item *fi;
1552 int i;
1553 int level;
1554 int ret = 0;
1555 int faili = 0;
1556 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
1557 u64, u64, u64, u64, u64, u64, u64, u64);
1558
1559 ref_root = btrfs_header_owner(buf);
1560 ref_generation = btrfs_header_generation(buf);
1561 orig_root = btrfs_header_owner(orig_buf);
1562 orig_generation = btrfs_header_generation(orig_buf);
1563
1564 nritems = btrfs_header_nritems(buf);
1565 level = btrfs_header_level(buf);
1566
1567 if (root->ref_cows) {
1568 process_func = __btrfs_inc_extent_ref;
1569 } else {
1570 if (level == 0 &&
1571 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
1572 goto out;
1573 if (level != 0 &&
1574 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
1575 goto out;
1576 process_func = __btrfs_update_extent_ref;
1577 }
1578
1579 for (i = 0; i < nritems; i++) {
1580 cond_resched();
1581 if (level == 0) {
1582 btrfs_item_key_to_cpu(buf, &key, i);
1583 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1584 continue;
1585 fi = btrfs_item_ptr(buf, i,
1586 struct btrfs_file_extent_item);
1587 if (btrfs_file_extent_type(buf, fi) ==
1588 BTRFS_FILE_EXTENT_INLINE)
1589 continue;
1590 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1591 if (bytenr == 0)
1592 continue;
1593
1594 nr_file_extents++;
1595
1596 ret = process_func(trans, root, bytenr,
1597 orig_buf->start, buf->start,
1598 orig_root, ref_root,
1599 orig_generation, ref_generation,
1600 key.objectid);
1601
1602 if (ret) {
1603 faili = i;
1604 WARN_ON(1);
1605 goto fail;
1606 }
1607 } else {
1608 bytenr = btrfs_node_blockptr(buf, i);
1609 ret = process_func(trans, root, bytenr,
1610 orig_buf->start, buf->start,
1611 orig_root, ref_root,
1612 orig_generation, ref_generation,
1613 level - 1);
1614 if (ret) {
1615 faili = i;
1616 WARN_ON(1);
1617 goto fail;
1618 }
1619 }
1620 }
1621out:
1622 if (nr_extents) {
1623 if (level == 0)
1624 *nr_extents = nr_file_extents;
1625 else
1626 *nr_extents = nritems;
1627 }
1628 return 0;
1629fail:
1630 WARN_ON(1);
1631 return ret;
1632}
1633
1634int btrfs_update_ref(struct btrfs_trans_handle *trans,
1635 struct btrfs_root *root, struct extent_buffer *orig_buf,
1636 struct extent_buffer *buf, int start_slot, int nr)
1637
1638{
1639 u64 bytenr;
1640 u64 ref_root;
1641 u64 orig_root;
1642 u64 ref_generation;
1643 u64 orig_generation;
1644 struct btrfs_key key;
1645 struct btrfs_file_extent_item *fi;
1646 int i;
1647 int ret;
1648 int slot;
1649 int level;
1650
1651 BUG_ON(start_slot < 0);
1652 BUG_ON(start_slot + nr > btrfs_header_nritems(buf));
1653
1654 ref_root = btrfs_header_owner(buf);
1655 ref_generation = btrfs_header_generation(buf);
1656 orig_root = btrfs_header_owner(orig_buf);
1657 orig_generation = btrfs_header_generation(orig_buf);
1658 level = btrfs_header_level(buf);
1659
1660 if (!root->ref_cows) {
1661 if (level == 0 &&
1662 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
1663 return 0;
1664 if (level != 0 &&
1665 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
1666 return 0;
1667 }
1668
1669 for (i = 0, slot = start_slot; i < nr; i++, slot++) {
1670 cond_resched();
1671 if (level == 0) {
1672 btrfs_item_key_to_cpu(buf, &key, slot);
1673 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1674 continue;
1675 fi = btrfs_item_ptr(buf, slot,
1676 struct btrfs_file_extent_item);
1677 if (btrfs_file_extent_type(buf, fi) ==
1678 BTRFS_FILE_EXTENT_INLINE)
1679 continue;
1680 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1681 if (bytenr == 0)
1682 continue;
1683 ret = __btrfs_update_extent_ref(trans, root, bytenr,
1684 orig_buf->start, buf->start,
1685 orig_root, ref_root,
1686 orig_generation, ref_generation,
1687 key.objectid);
1688 if (ret)
1689 goto fail;
1690 } else {
1691 bytenr = btrfs_node_blockptr(buf, slot);
1692 ret = __btrfs_update_extent_ref(trans, root, bytenr,
1693 orig_buf->start, buf->start,
1694 orig_root, ref_root,
1695 orig_generation, ref_generation,
1696 level - 1);
1697 if (ret)
1698 goto fail;
1699 }
1700 }
1701 return 0;
1702fail:
1703 WARN_ON(1);
1704 return -1;
1705}
1706
1707static int write_one_cache_group(struct btrfs_trans_handle *trans,
1708 struct btrfs_root *root,
1709 struct btrfs_path *path,
1710 struct btrfs_block_group_cache *cache)
1711{
1712 int ret;
1713 int pending_ret;
1714 struct btrfs_root *extent_root = root->fs_info->extent_root;
1715 unsigned long bi;
1716 struct extent_buffer *leaf;
1717
1718 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
1719 if (ret < 0)
1720 goto fail;
1721 BUG_ON(ret);
1722
1723 leaf = path->nodes[0];
1724 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
1725 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
1726 btrfs_mark_buffer_dirty(leaf);
1727 btrfs_release_path(extent_root, path);
1728fail:
1729 finish_current_insert(trans, extent_root, 0);
1730 pending_ret = del_pending_extents(trans, extent_root, 0);
1731 if (ret)
1732 return ret;
1733 if (pending_ret)
1734 return pending_ret;
1735 return 0;
1736
1737}
1738
1739int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
1740 struct btrfs_root *root)
1741{
1742 struct btrfs_block_group_cache *cache, *entry;
1743 struct rb_node *n;
1744 int err = 0;
1745 int werr = 0;
1746 struct btrfs_path *path;
1747 u64 last = 0;
1748
1749 path = btrfs_alloc_path();
1750 if (!path)
1751 return -ENOMEM;
1752
1753 while(1) {
1754 cache = NULL;
1755 spin_lock(&root->fs_info->block_group_cache_lock);
1756 for (n = rb_first(&root->fs_info->block_group_cache_tree);
1757 n; n = rb_next(n)) {
1758 entry = rb_entry(n, struct btrfs_block_group_cache,
1759 cache_node);
1760 if (entry->dirty) {
1761 cache = entry;
1762 break;
1763 }
1764 }
1765 spin_unlock(&root->fs_info->block_group_cache_lock);
1766
1767 if (!cache)
1768 break;
1769
1770 cache->dirty = 0;
1771 last += cache->key.offset;
1772
1773 err = write_one_cache_group(trans, root,
1774 path, cache);
1775 /*
1776 * if we fail to write the cache group, we want
1777 * to keep it marked dirty in hopes that a later
1778 * write will work
1779 */
1780 if (err) {
1781 werr = err;
1782 continue;
1783 }
1784 }
1785 btrfs_free_path(path);
1786 return werr;
1787}
1788
1789static int update_space_info(struct btrfs_fs_info *info, u64 flags,
1790 u64 total_bytes, u64 bytes_used,
1791 struct btrfs_space_info **space_info)
1792{
1793 struct btrfs_space_info *found;
1794
1795 found = __find_space_info(info, flags);
1796 if (found) {
1797 spin_lock(&found->lock);
1798 found->total_bytes += total_bytes;
1799 found->bytes_used += bytes_used;
1800 found->full = 0;
1801 spin_unlock(&found->lock);
1802 *space_info = found;
1803 return 0;
1804 }
1805 found = kzalloc(sizeof(*found), GFP_NOFS);
1806 if (!found)
1807 return -ENOMEM;
1808
1809 list_add(&found->list, &info->space_info);
1810 INIT_LIST_HEAD(&found->block_groups);
1811 init_rwsem(&found->groups_sem);
1812 spin_lock_init(&found->lock);
1813 found->flags = flags;
1814 found->total_bytes = total_bytes;
1815 found->bytes_used = bytes_used;
1816 found->bytes_pinned = 0;
1817 found->bytes_reserved = 0;
1818 found->bytes_readonly = 0;
1819 found->full = 0;
1820 found->force_alloc = 0;
1821 *space_info = found;
1822 return 0;
1823}
1824
1825static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1826{
1827 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
1828 BTRFS_BLOCK_GROUP_RAID1 |
1829 BTRFS_BLOCK_GROUP_RAID10 |
1830 BTRFS_BLOCK_GROUP_DUP);
1831 if (extra_flags) {
1832 if (flags & BTRFS_BLOCK_GROUP_DATA)
1833 fs_info->avail_data_alloc_bits |= extra_flags;
1834 if (flags & BTRFS_BLOCK_GROUP_METADATA)
1835 fs_info->avail_metadata_alloc_bits |= extra_flags;
1836 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1837 fs_info->avail_system_alloc_bits |= extra_flags;
1838 }
1839}
1840
1841static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
1842{
1843 spin_lock(&cache->space_info->lock);
1844 spin_lock(&cache->lock);
1845 if (!cache->ro) {
1846 cache->space_info->bytes_readonly += cache->key.offset -
1847 btrfs_block_group_used(&cache->item);
1848 cache->ro = 1;
1849 }
1850 spin_unlock(&cache->lock);
1851 spin_unlock(&cache->space_info->lock);
1852}
1853
1854u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
1855{
1856 u64 num_devices = root->fs_info->fs_devices->rw_devices;
1857
1858 if (num_devices == 1)
1859 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
1860 if (num_devices < 4)
1861 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
1862
1863 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
1864 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
1865 BTRFS_BLOCK_GROUP_RAID10))) {
1866 flags &= ~BTRFS_BLOCK_GROUP_DUP;
1867 }
1868
1869 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
1870 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
1871 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
1872 }
1873
1874 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
1875 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
1876 (flags & BTRFS_BLOCK_GROUP_RAID10) |
1877 (flags & BTRFS_BLOCK_GROUP_DUP)))
1878 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
1879 return flags;
1880}
1881
1882static int do_chunk_alloc(struct btrfs_trans_handle *trans,
1883 struct btrfs_root *extent_root, u64 alloc_bytes,
1884 u64 flags, int force)
1885{
1886 struct btrfs_space_info *space_info;
1887 u64 thresh;
1888 int ret = 0;
1889
1890 mutex_lock(&extent_root->fs_info->chunk_mutex);
1891
1892 flags = btrfs_reduce_alloc_profile(extent_root, flags);
1893
1894 space_info = __find_space_info(extent_root->fs_info, flags);
1895 if (!space_info) {
1896 ret = update_space_info(extent_root->fs_info, flags,
1897 0, 0, &space_info);
1898 BUG_ON(ret);
1899 }
1900 BUG_ON(!space_info);
1901
1902 spin_lock(&space_info->lock);
1903 if (space_info->force_alloc) {
1904 force = 1;
1905 space_info->force_alloc = 0;
1906 }
1907 if (space_info->full) {
1908 spin_unlock(&space_info->lock);
1909 goto out;
1910 }
1911
1912 thresh = space_info->total_bytes - space_info->bytes_readonly;
1913 thresh = div_factor(thresh, 6);
1914 if (!force &&
1915 (space_info->bytes_used + space_info->bytes_pinned +
1916 space_info->bytes_reserved + alloc_bytes) < thresh) {
1917 spin_unlock(&space_info->lock);
1918 goto out;
1919 }
1920 spin_unlock(&space_info->lock);
1921
1922 ret = btrfs_alloc_chunk(trans, extent_root, flags);
1923 if (ret) {
1924printk("space info full %Lu\n", flags);
1925 space_info->full = 1;
1926 }
1927out:
1928 mutex_unlock(&extent_root->fs_info->chunk_mutex);
1929 return ret;
1930}
1931
1932static int update_block_group(struct btrfs_trans_handle *trans,
1933 struct btrfs_root *root,
1934 u64 bytenr, u64 num_bytes, int alloc,
1935 int mark_free)
1936{
1937 struct btrfs_block_group_cache *cache;
1938 struct btrfs_fs_info *info = root->fs_info;
1939 u64 total = num_bytes;
1940 u64 old_val;
1941 u64 byte_in_group;
1942
1943 while(total) {
1944 cache = btrfs_lookup_block_group(info, bytenr);
1945 if (!cache)
1946 return -1;
1947 byte_in_group = bytenr - cache->key.objectid;
1948 WARN_ON(byte_in_group > cache->key.offset);
1949
1950 spin_lock(&cache->space_info->lock);
1951 spin_lock(&cache->lock);
1952 cache->dirty = 1;
1953 old_val = btrfs_block_group_used(&cache->item);
1954 num_bytes = min(total, cache->key.offset - byte_in_group);
1955 if (alloc) {
1956 old_val += num_bytes;
1957 cache->space_info->bytes_used += num_bytes;
1958 if (cache->ro) {
1959 cache->space_info->bytes_readonly -= num_bytes;
1960 WARN_ON(1);
1961 }
1962 btrfs_set_block_group_used(&cache->item, old_val);
1963 spin_unlock(&cache->lock);
1964 spin_unlock(&cache->space_info->lock);
1965 } else {
1966 old_val -= num_bytes;
1967 cache->space_info->bytes_used -= num_bytes;
1968 if (cache->ro)
1969 cache->space_info->bytes_readonly += num_bytes;
1970 btrfs_set_block_group_used(&cache->item, old_val);
1971 spin_unlock(&cache->lock);
1972 spin_unlock(&cache->space_info->lock);
1973 if (mark_free) {
1974 int ret;
1975 ret = btrfs_add_free_space(cache, bytenr,
1976 num_bytes);
1977 if (ret)
1978 return -1;
1979 }
1980 }
1981 total -= num_bytes;
1982 bytenr += num_bytes;
1983 }
1984 return 0;
1985}
1986
1987static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
1988{
1989 struct btrfs_block_group_cache *cache;
1990
1991 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
1992 if (!cache)
1993 return 0;
1994
1995 return cache->key.objectid;
1996}
1997
1998int btrfs_update_pinned_extents(struct btrfs_root *root,
1999 u64 bytenr, u64 num, int pin)
2000{
2001 u64 len;
2002 struct btrfs_block_group_cache *cache;
2003 struct btrfs_fs_info *fs_info = root->fs_info;
2004
2005 WARN_ON(!mutex_is_locked(&root->fs_info->pinned_mutex));
2006 if (pin) {
2007 set_extent_dirty(&fs_info->pinned_extents,
2008 bytenr, bytenr + num - 1, GFP_NOFS);
2009 } else {
2010 clear_extent_dirty(&fs_info->pinned_extents,
2011 bytenr, bytenr + num - 1, GFP_NOFS);
2012 }
2013 while (num > 0) {
2014 cache = btrfs_lookup_block_group(fs_info, bytenr);
2015 BUG_ON(!cache);
2016 len = min(num, cache->key.offset -
2017 (bytenr - cache->key.objectid));
2018 if (pin) {
2019 spin_lock(&cache->space_info->lock);
2020 spin_lock(&cache->lock);
2021 cache->pinned += len;
2022 cache->space_info->bytes_pinned += len;
2023 spin_unlock(&cache->lock);
2024 spin_unlock(&cache->space_info->lock);
2025 fs_info->total_pinned += len;
2026 } else {
2027 spin_lock(&cache->space_info->lock);
2028 spin_lock(&cache->lock);
2029 cache->pinned -= len;
2030 cache->space_info->bytes_pinned -= len;
2031 spin_unlock(&cache->lock);
2032 spin_unlock(&cache->space_info->lock);
2033 fs_info->total_pinned -= len;
2034 }
2035 bytenr += len;
2036 num -= len;
2037 }
2038 return 0;
2039}
2040
2041static int update_reserved_extents(struct btrfs_root *root,
2042 u64 bytenr, u64 num, int reserve)
2043{
2044 u64 len;
2045 struct btrfs_block_group_cache *cache;
2046 struct btrfs_fs_info *fs_info = root->fs_info;
2047
2048 while (num > 0) {
2049 cache = btrfs_lookup_block_group(fs_info, bytenr);
2050 BUG_ON(!cache);
2051 len = min(num, cache->key.offset -
2052 (bytenr - cache->key.objectid));
2053
2054 spin_lock(&cache->space_info->lock);
2055 spin_lock(&cache->lock);
2056 if (reserve) {
2057 cache->reserved += len;
2058 cache->space_info->bytes_reserved += len;
2059 } else {
2060 cache->reserved -= len;
2061 cache->space_info->bytes_reserved -= len;
2062 }
2063 spin_unlock(&cache->lock);
2064 spin_unlock(&cache->space_info->lock);
2065 bytenr += len;
2066 num -= len;
2067 }
2068 return 0;
2069}
2070
2071int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
2072{
2073 u64 last = 0;
2074 u64 start;
2075 u64 end;
2076 struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
2077 int ret;
2078
2079 mutex_lock(&root->fs_info->pinned_mutex);
2080 while(1) {
2081 ret = find_first_extent_bit(pinned_extents, last,
2082 &start, &end, EXTENT_DIRTY);
2083 if (ret)
2084 break;
2085 set_extent_dirty(copy, start, end, GFP_NOFS);
2086 last = end + 1;
2087 }
2088 mutex_unlock(&root->fs_info->pinned_mutex);
2089 return 0;
2090}
2091
2092int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
2093 struct btrfs_root *root,
2094 struct extent_io_tree *unpin)
2095{
2096 u64 start;
2097 u64 end;
2098 int ret;
2099 struct btrfs_block_group_cache *cache;
2100
2101 mutex_lock(&root->fs_info->pinned_mutex);
2102 while(1) {
2103 ret = find_first_extent_bit(unpin, 0, &start, &end,
2104 EXTENT_DIRTY);
2105 if (ret)
2106 break;
2107 btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
2108 clear_extent_dirty(unpin, start, end, GFP_NOFS);
2109 cache = btrfs_lookup_block_group(root->fs_info, start);
2110 if (cache->cached)
2111 btrfs_add_free_space(cache, start, end - start + 1);
2112 if (need_resched()) {
2113 mutex_unlock(&root->fs_info->pinned_mutex);
2114 cond_resched();
2115 mutex_lock(&root->fs_info->pinned_mutex);
2116 }
2117 }
2118 mutex_unlock(&root->fs_info->pinned_mutex);
2119 return 0;
2120}
2121
2122static int finish_current_insert(struct btrfs_trans_handle *trans,
2123 struct btrfs_root *extent_root, int all)
2124{
2125 u64 start;
2126 u64 end;
2127 u64 priv;
2128 u64 search = 0;
2129 u64 skipped = 0;
2130 struct btrfs_fs_info *info = extent_root->fs_info;
2131 struct btrfs_path *path;
2132 struct pending_extent_op *extent_op, *tmp;
2133 struct list_head insert_list, update_list;
2134 int ret;
2135 int num_inserts = 0, max_inserts;
2136
2137 path = btrfs_alloc_path();
2138 INIT_LIST_HEAD(&insert_list);
2139 INIT_LIST_HEAD(&update_list);
2140
2141 max_inserts = extent_root->leafsize /
2142 (2 * sizeof(struct btrfs_key) + 2 * sizeof(struct btrfs_item) +
2143 sizeof(struct btrfs_extent_ref) +
2144 sizeof(struct btrfs_extent_item));
2145again:
2146 mutex_lock(&info->extent_ins_mutex);
2147 while (1) {
2148 ret = find_first_extent_bit(&info->extent_ins, search, &start,
2149 &end, EXTENT_WRITEBACK);
2150 if (ret) {
2151 if (skipped && all && !num_inserts) {
2152 skipped = 0;
2153 continue;
2154 }
2155 mutex_unlock(&info->extent_ins_mutex);
2156 break;
2157 }
2158
2159 ret = try_lock_extent(&info->extent_ins, start, end, GFP_NOFS);
2160 if (!ret) {
2161 skipped = 1;
2162 search = end + 1;
2163 if (need_resched()) {
2164 mutex_unlock(&info->extent_ins_mutex);
2165 cond_resched();
2166 mutex_lock(&info->extent_ins_mutex);
2167 }
2168 continue;
2169 }
2170
2171 ret = get_state_private(&info->extent_ins, start, &priv);
2172 BUG_ON(ret);
2173 extent_op = (struct pending_extent_op *)(unsigned long) priv;
2174
2175 if (extent_op->type == PENDING_EXTENT_INSERT) {
2176 num_inserts++;
2177 list_add_tail(&extent_op->list, &insert_list);
2178 search = end + 1;
2179 if (num_inserts == max_inserts) {
2180 mutex_unlock(&info->extent_ins_mutex);
2181 break;
2182 }
2183 } else if (extent_op->type == PENDING_BACKREF_UPDATE) {
2184 list_add_tail(&extent_op->list, &update_list);
2185 search = end + 1;
2186 } else {
2187 BUG();
2188 }
2189 }
2190
2191 /*
2192 * process teh update list, clear the writeback bit for it, and if
2193 * somebody marked this thing for deletion then just unlock it and be
2194 * done, the free_extents will handle it
2195 */
2196 mutex_lock(&info->extent_ins_mutex);
2197 list_for_each_entry_safe(extent_op, tmp, &update_list, list) {
2198 clear_extent_bits(&info->extent_ins, extent_op->bytenr,
2199 extent_op->bytenr + extent_op->num_bytes - 1,
2200 EXTENT_WRITEBACK, GFP_NOFS);
2201 if (extent_op->del) {
2202 list_del_init(&extent_op->list);
2203 unlock_extent(&info->extent_ins, extent_op->bytenr,
2204 extent_op->bytenr + extent_op->num_bytes
2205 - 1, GFP_NOFS);
2206 kfree(extent_op);
2207 }
2208 }
2209 mutex_unlock(&info->extent_ins_mutex);
2210
2211 /*
2212 * still have things left on the update list, go ahead an update
2213 * everything
2214 */
2215 if (!list_empty(&update_list)) {
2216 ret = update_backrefs(trans, extent_root, path, &update_list);
2217 BUG_ON(ret);
2218 }
2219
2220 /*
2221 * if no inserts need to be done, but we skipped some extents and we
2222 * need to make sure everything is cleaned then reset everything and
2223 * go back to the beginning
2224 */
2225 if (!num_inserts && all && skipped) {
2226 search = 0;
2227 skipped = 0;
2228 INIT_LIST_HEAD(&update_list);
2229 INIT_LIST_HEAD(&insert_list);
2230 goto again;
2231 } else if (!num_inserts) {
2232 goto out;
2233 }
2234
2235 /*
2236 * process the insert extents list. Again if we are deleting this
2237 * extent, then just unlock it, pin down the bytes if need be, and be
2238 * done with it. Saves us from having to actually insert the extent
2239 * into the tree and then subsequently come along and delete it
2240 */
2241 mutex_lock(&info->extent_ins_mutex);
2242 list_for_each_entry_safe(extent_op, tmp, &insert_list, list) {
2243 clear_extent_bits(&info->extent_ins, extent_op->bytenr,
2244 extent_op->bytenr + extent_op->num_bytes - 1,
2245 EXTENT_WRITEBACK, GFP_NOFS);
2246 if (extent_op->del) {
2247 list_del_init(&extent_op->list);
2248 unlock_extent(&info->extent_ins, extent_op->bytenr,
2249 extent_op->bytenr + extent_op->num_bytes
2250 - 1, GFP_NOFS);
2251
2252 mutex_lock(&extent_root->fs_info->pinned_mutex);
2253 ret = pin_down_bytes(trans, extent_root,
2254 extent_op->bytenr,
2255 extent_op->num_bytes, 0);
2256 mutex_unlock(&extent_root->fs_info->pinned_mutex);
2257
2258 ret = update_block_group(trans, extent_root,
2259 extent_op->bytenr,
2260 extent_op->num_bytes,
2261 0, ret > 0);
2262 BUG_ON(ret);
2263 kfree(extent_op);
2264 num_inserts--;
2265 }
2266 }
2267 mutex_unlock(&info->extent_ins_mutex);
2268
2269 ret = insert_extents(trans, extent_root, path, &insert_list,
2270 num_inserts);
2271 BUG_ON(ret);
2272
2273 /*
2274 * if we broke out of the loop in order to insert stuff because we hit
2275 * the maximum number of inserts at a time we can handle, then loop
2276 * back and pick up where we left off
2277 */
2278 if (num_inserts == max_inserts) {
2279 INIT_LIST_HEAD(&insert_list);
2280 INIT_LIST_HEAD(&update_list);
2281 num_inserts = 0;
2282 goto again;
2283 }
2284
2285 /*
2286 * again, if we need to make absolutely sure there are no more pending
2287 * extent operations left and we know that we skipped some, go back to
2288 * the beginning and do it all again
2289 */
2290 if (all && skipped) {
2291 INIT_LIST_HEAD(&insert_list);
2292 INIT_LIST_HEAD(&update_list);
2293 search = 0;
2294 skipped = 0;
2295 num_inserts = 0;
2296 goto again;
2297 }
2298out:
2299 btrfs_free_path(path);
2300 return 0;
2301}
2302
2303static int pin_down_bytes(struct btrfs_trans_handle *trans,
2304 struct btrfs_root *root,
2305 u64 bytenr, u64 num_bytes, int is_data)
2306{
2307 int err = 0;
2308 struct extent_buffer *buf;
2309
2310 if (is_data)
2311 goto pinit;
2312
2313 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
2314 if (!buf)
2315 goto pinit;
2316
2317 /* we can reuse a block if it hasn't been written
2318 * and it is from this transaction. We can't
2319 * reuse anything from the tree log root because
2320 * it has tiny sub-transactions.
2321 */
2322 if (btrfs_buffer_uptodate(buf, 0) &&
2323 btrfs_try_tree_lock(buf)) {
2324 u64 header_owner = btrfs_header_owner(buf);
2325 u64 header_transid = btrfs_header_generation(buf);
2326 if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
2327 header_owner != BTRFS_TREE_RELOC_OBJECTID &&
2328 header_transid == trans->transid &&
2329 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
2330 clean_tree_block(NULL, root, buf);
2331 btrfs_tree_unlock(buf);
2332 free_extent_buffer(buf);
2333 return 1;
2334 }
2335 btrfs_tree_unlock(buf);
2336 }
2337 free_extent_buffer(buf);
2338pinit:
2339 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
2340
2341 BUG_ON(err < 0);
2342 return 0;
2343}
2344
2345/*
2346 * remove an extent from the root, returns 0 on success
2347 */
2348static int __free_extent(struct btrfs_trans_handle *trans,
2349 struct btrfs_root *root,
2350 u64 bytenr, u64 num_bytes, u64 parent,
2351 u64 root_objectid, u64 ref_generation,
2352 u64 owner_objectid, int pin, int mark_free)
2353{
2354 struct btrfs_path *path;
2355 struct btrfs_key key;
2356 struct btrfs_fs_info *info = root->fs_info;
2357 struct btrfs_root *extent_root = info->extent_root;
2358 struct extent_buffer *leaf;
2359 int ret;
2360 int extent_slot = 0;
2361 int found_extent = 0;
2362 int num_to_del = 1;
2363 struct btrfs_extent_item *ei;
2364 u32 refs;
2365
2366 key.objectid = bytenr;
2367 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
2368 key.offset = num_bytes;
2369 path = btrfs_alloc_path();
2370 if (!path)
2371 return -ENOMEM;
2372
2373 path->reada = 1;
2374 ret = lookup_extent_backref(trans, extent_root, path,
2375 bytenr, parent, root_objectid,
2376 ref_generation, owner_objectid, 1);
2377 if (ret == 0) {
2378 struct btrfs_key found_key;
2379 extent_slot = path->slots[0];
2380 while(extent_slot > 0) {
2381 extent_slot--;
2382 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2383 extent_slot);
2384 if (found_key.objectid != bytenr)
2385 break;
2386 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
2387 found_key.offset == num_bytes) {
2388 found_extent = 1;
2389 break;
2390 }
2391 if (path->slots[0] - extent_slot > 5)
2392 break;
2393 }
2394 if (!found_extent) {
2395 ret = remove_extent_backref(trans, extent_root, path);
2396 BUG_ON(ret);
2397 btrfs_release_path(extent_root, path);
2398 ret = btrfs_search_slot(trans, extent_root,
2399 &key, path, -1, 1);
2400 if (ret) {
2401 printk(KERN_ERR "umm, got %d back from search"
2402 ", was looking for %Lu\n", ret,
2403 bytenr);
2404 btrfs_print_leaf(extent_root, path->nodes[0]);
2405 }
2406 BUG_ON(ret);
2407 extent_slot = path->slots[0];
2408 }
2409 } else {
2410 btrfs_print_leaf(extent_root, path->nodes[0]);
2411 WARN_ON(1);
2412 printk("Unable to find ref byte nr %Lu root %Lu "
2413 "gen %Lu owner %Lu\n", bytenr,
2414 root_objectid, ref_generation, owner_objectid);
2415 }
2416
2417 leaf = path->nodes[0];
2418 ei = btrfs_item_ptr(leaf, extent_slot,
2419 struct btrfs_extent_item);
2420 refs = btrfs_extent_refs(leaf, ei);
2421 BUG_ON(refs == 0);
2422 refs -= 1;
2423 btrfs_set_extent_refs(leaf, ei, refs);
2424
2425 btrfs_mark_buffer_dirty(leaf);
2426
2427 if (refs == 0 && found_extent && path->slots[0] == extent_slot + 1) {
2428 struct btrfs_extent_ref *ref;
2429 ref = btrfs_item_ptr(leaf, path->slots[0],
2430 struct btrfs_extent_ref);
2431 BUG_ON(btrfs_ref_num_refs(leaf, ref) != 1);
2432 /* if the back ref and the extent are next to each other
2433 * they get deleted below in one shot
2434 */
2435 path->slots[0] = extent_slot;
2436 num_to_del = 2;
2437 } else if (found_extent) {
2438 /* otherwise delete the extent back ref */
2439 ret = remove_extent_backref(trans, extent_root, path);
2440 BUG_ON(ret);
2441 /* if refs are 0, we need to setup the path for deletion */
2442 if (refs == 0) {
2443 btrfs_release_path(extent_root, path);
2444 ret = btrfs_search_slot(trans, extent_root, &key, path,
2445 -1, 1);
2446 BUG_ON(ret);
2447 }
2448 }
2449
2450 if (refs == 0) {
2451 u64 super_used;
2452 u64 root_used;
2453#ifdef BIO_RW_DISCARD
2454 u64 map_length = num_bytes;
2455 struct btrfs_multi_bio *multi = NULL;
2456#endif
2457
2458 if (pin) {
2459 mutex_lock(&root->fs_info->pinned_mutex);
2460 ret = pin_down_bytes(trans, root, bytenr, num_bytes,
2461 owner_objectid >= BTRFS_FIRST_FREE_OBJECTID);
2462 mutex_unlock(&root->fs_info->pinned_mutex);
2463 if (ret > 0)
2464 mark_free = 1;
2465 BUG_ON(ret < 0);
2466 }
2467
2468 /* block accounting for super block */
2469 spin_lock_irq(&info->delalloc_lock);
2470 super_used = btrfs_super_bytes_used(&info->super_copy);
2471 btrfs_set_super_bytes_used(&info->super_copy,
2472 super_used - num_bytes);
2473 spin_unlock_irq(&info->delalloc_lock);
2474
2475 /* block accounting for root item */
2476 root_used = btrfs_root_used(&root->root_item);
2477 btrfs_set_root_used(&root->root_item,
2478 root_used - num_bytes);
2479 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
2480 num_to_del);
2481 BUG_ON(ret);
2482 btrfs_release_path(extent_root, path);
2483 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
2484 mark_free);
2485 BUG_ON(ret);
2486
2487#ifdef BIO_RW_DISCARD
2488 /* Tell the block device(s) that the sectors can be discarded */
2489 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
2490 bytenr, &map_length, &multi, 0);
2491 if (!ret) {
2492 struct btrfs_bio_stripe *stripe = multi->stripes;
2493 int i;
2494
2495 if (map_length > num_bytes)
2496 map_length = num_bytes;
2497
2498 for (i = 0; i < multi->num_stripes; i++, stripe++) {
2499 blkdev_issue_discard(stripe->dev->bdev,
2500 stripe->physical >> 9,
2501 map_length >> 9);
2502 }
2503 kfree(multi);
2504 }
2505#endif
2506 }
2507 btrfs_free_path(path);
2508 finish_current_insert(trans, extent_root, 0);
2509 return ret;
2510}
2511
2512/*
2513 * find all the blocks marked as pending in the radix tree and remove
2514 * them from the extent map
2515 */
2516static int del_pending_extents(struct btrfs_trans_handle *trans, struct
2517 btrfs_root *extent_root, int all)
2518{
2519 int ret;
2520 int err = 0;
2521 u64 start;
2522 u64 end;
2523 u64 priv;
2524 u64 search = 0;
2525 int nr = 0, skipped = 0;
2526 struct extent_io_tree *pending_del;
2527 struct extent_io_tree *extent_ins;
2528 struct pending_extent_op *extent_op;
2529 struct btrfs_fs_info *info = extent_root->fs_info;
2530 struct list_head delete_list;
2531
2532 INIT_LIST_HEAD(&delete_list);
2533 extent_ins = &extent_root->fs_info->extent_ins;
2534 pending_del = &extent_root->fs_info->pending_del;
2535
2536again:
2537 mutex_lock(&info->extent_ins_mutex);
2538 while(1) {
2539 ret = find_first_extent_bit(pending_del, search, &start, &end,
2540 EXTENT_WRITEBACK);
2541 if (ret) {
2542 if (all && skipped && !nr) {
2543 search = 0;
2544 continue;
2545 }
2546 mutex_unlock(&info->extent_ins_mutex);
2547 break;
2548 }
2549
2550 ret = try_lock_extent(extent_ins, start, end, GFP_NOFS);
2551 if (!ret) {
2552 search = end+1;
2553 skipped = 1;
2554
2555 if (need_resched()) {
2556 mutex_unlock(&info->extent_ins_mutex);
2557 cond_resched();
2558 mutex_lock(&info->extent_ins_mutex);
2559 }
2560
2561 continue;
2562 }
2563 BUG_ON(ret < 0);
2564
2565 ret = get_state_private(pending_del, start, &priv);
2566 BUG_ON(ret);
2567 extent_op = (struct pending_extent_op *)(unsigned long)priv;
2568
2569 clear_extent_bits(pending_del, start, end, EXTENT_WRITEBACK,
2570 GFP_NOFS);
2571 if (!test_range_bit(extent_ins, start, end,
2572 EXTENT_WRITEBACK, 0)) {
2573 list_add_tail(&extent_op->list, &delete_list);
2574 nr++;
2575 } else {
2576 kfree(extent_op);
2577
2578 ret = get_state_private(&info->extent_ins, start,
2579 &priv);
2580 BUG_ON(ret);
2581 extent_op = (struct pending_extent_op *)
2582 (unsigned long)priv;
2583
2584 clear_extent_bits(&info->extent_ins, start, end,
2585 EXTENT_WRITEBACK, GFP_NOFS);
2586
2587 if (extent_op->type == PENDING_BACKREF_UPDATE) {
2588 list_add_tail(&extent_op->list, &delete_list);
2589 search = end + 1;
2590 nr++;
2591 continue;
2592 }
2593
2594 mutex_lock(&extent_root->fs_info->pinned_mutex);
2595 ret = pin_down_bytes(trans, extent_root, start,
2596 end + 1 - start, 0);
2597 mutex_unlock(&extent_root->fs_info->pinned_mutex);
2598
2599 ret = update_block_group(trans, extent_root, start,
2600 end + 1 - start, 0, ret > 0);
2601
2602 unlock_extent(extent_ins, start, end, GFP_NOFS);
2603 BUG_ON(ret);
2604 kfree(extent_op);
2605 }
2606 if (ret)
2607 err = ret;
2608
2609 search = end + 1;
2610
2611 if (need_resched()) {
2612 mutex_unlock(&info->extent_ins_mutex);
2613 cond_resched();
2614 mutex_lock(&info->extent_ins_mutex);
2615 }
2616 }
2617
2618 if (nr) {
2619 ret = free_extents(trans, extent_root, &delete_list);
2620 BUG_ON(ret);
2621 }
2622
2623 if (all && skipped) {
2624 INIT_LIST_HEAD(&delete_list);
2625 search = 0;
2626 nr = 0;
2627 goto again;
2628 }
2629
2630 return err;
2631}
2632
2633/*
2634 * remove an extent from the root, returns 0 on success
2635 */
2636static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
2637 struct btrfs_root *root,
2638 u64 bytenr, u64 num_bytes, u64 parent,
2639 u64 root_objectid, u64 ref_generation,
2640 u64 owner_objectid, int pin)
2641{
2642 struct btrfs_root *extent_root = root->fs_info->extent_root;
2643 int pending_ret;
2644 int ret;
2645
2646 WARN_ON(num_bytes < root->sectorsize);
2647 if (root == extent_root) {
2648 struct pending_extent_op *extent_op = NULL;
2649
2650 mutex_lock(&root->fs_info->extent_ins_mutex);
2651 if (test_range_bit(&root->fs_info->extent_ins, bytenr,
2652 bytenr + num_bytes - 1, EXTENT_WRITEBACK, 0)) {
2653 u64 priv;
2654 ret = get_state_private(&root->fs_info->extent_ins,
2655 bytenr, &priv);
2656 BUG_ON(ret);
2657 extent_op = (struct pending_extent_op *)
2658 (unsigned long)priv;
2659
2660 extent_op->del = 1;
2661 if (extent_op->type == PENDING_EXTENT_INSERT) {
2662 mutex_unlock(&root->fs_info->extent_ins_mutex);
2663 return 0;
2664 }
2665 }
2666
2667 if (extent_op) {
2668 ref_generation = extent_op->orig_generation;
2669 parent = extent_op->orig_parent;
2670 }
2671
2672 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2673 BUG_ON(!extent_op);
2674
2675 extent_op->type = PENDING_EXTENT_DELETE;
2676 extent_op->bytenr = bytenr;
2677 extent_op->num_bytes = num_bytes;
2678 extent_op->parent = parent;
2679 extent_op->orig_parent = parent;
2680 extent_op->generation = ref_generation;
2681 extent_op->orig_generation = ref_generation;
2682 extent_op->level = (int)owner_objectid;
2683 INIT_LIST_HEAD(&extent_op->list);
2684 extent_op->del = 0;
2685
2686 set_extent_bits(&root->fs_info->pending_del,
2687 bytenr, bytenr + num_bytes - 1,
2688 EXTENT_WRITEBACK, GFP_NOFS);
2689 set_state_private(&root->fs_info->pending_del,
2690 bytenr, (unsigned long)extent_op);
2691 mutex_unlock(&root->fs_info->extent_ins_mutex);
2692 return 0;
2693 }
2694 /* if metadata always pin */
2695 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
2696 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
2697 struct btrfs_block_group_cache *cache;
2698
2699 /* btrfs_free_reserved_extent */
2700 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
2701 BUG_ON(!cache);
2702 btrfs_add_free_space(cache, bytenr, num_bytes);
2703 update_reserved_extents(root, bytenr, num_bytes, 0);
2704 return 0;
2705 }
2706 pin = 1;
2707 }
2708
2709 /* if data pin when any transaction has committed this */
2710 if (ref_generation != trans->transid)
2711 pin = 1;
2712
2713 ret = __free_extent(trans, root, bytenr, num_bytes, parent,
2714 root_objectid, ref_generation,
2715 owner_objectid, pin, pin == 0);
2716
2717 finish_current_insert(trans, root->fs_info->extent_root, 0);
2718 pending_ret = del_pending_extents(trans, root->fs_info->extent_root, 0);
2719 return ret ? ret : pending_ret;
2720}
2721
2722int btrfs_free_extent(struct btrfs_trans_handle *trans,
2723 struct btrfs_root *root,
2724 u64 bytenr, u64 num_bytes, u64 parent,
2725 u64 root_objectid, u64 ref_generation,
2726 u64 owner_objectid, int pin)
2727{
2728 int ret;
2729
2730 ret = __btrfs_free_extent(trans, root, bytenr, num_bytes, parent,
2731 root_objectid, ref_generation,
2732 owner_objectid, pin);
2733 return ret;
2734}
2735
2736static u64 stripe_align(struct btrfs_root *root, u64 val)
2737{
2738 u64 mask = ((u64)root->stripesize - 1);
2739 u64 ret = (val + mask) & ~mask;
2740 return ret;
2741}
2742
2743/*
2744 * walks the btree of allocated extents and find a hole of a given size.
2745 * The key ins is changed to record the hole:
2746 * ins->objectid == block start
2747 * ins->flags = BTRFS_EXTENT_ITEM_KEY
2748 * ins->offset == number of blocks
2749 * Any available blocks before search_start are skipped.
2750 */
2751static int noinline find_free_extent(struct btrfs_trans_handle *trans,
2752 struct btrfs_root *orig_root,
2753 u64 num_bytes, u64 empty_size,
2754 u64 search_start, u64 search_end,
2755 u64 hint_byte, struct btrfs_key *ins,
2756 u64 exclude_start, u64 exclude_nr,
2757 int data)
2758{
2759 int ret = 0;
2760 struct btrfs_root * root = orig_root->fs_info->extent_root;
2761 u64 total_needed = num_bytes;
2762 u64 *last_ptr = NULL;
2763 u64 last_wanted = 0;
2764 struct btrfs_block_group_cache *block_group = NULL;
2765 int chunk_alloc_done = 0;
2766 int empty_cluster = 2 * 1024 * 1024;
2767 int allowed_chunk_alloc = 0;
2768 struct list_head *head = NULL, *cur = NULL;
2769 int loop = 0;
2770 int extra_loop = 0;
2771 struct btrfs_space_info *space_info;
2772
2773 WARN_ON(num_bytes < root->sectorsize);
2774 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
2775 ins->objectid = 0;
2776 ins->offset = 0;
2777
2778 if (orig_root->ref_cows || empty_size)
2779 allowed_chunk_alloc = 1;
2780
2781 if (data & BTRFS_BLOCK_GROUP_METADATA) {
2782 last_ptr = &root->fs_info->last_alloc;
2783 empty_cluster = 64 * 1024;
2784 }
2785
2786 if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD))
2787 last_ptr = &root->fs_info->last_data_alloc;
2788
2789 if (last_ptr) {
2790 if (*last_ptr) {
2791 hint_byte = *last_ptr;
2792 last_wanted = *last_ptr;
2793 } else
2794 empty_size += empty_cluster;
2795 } else {
2796 empty_cluster = 0;
2797 }
2798 search_start = max(search_start, first_logical_byte(root, 0));
2799 search_start = max(search_start, hint_byte);
2800
2801 if (last_wanted && search_start != last_wanted) {
2802 last_wanted = 0;
2803 empty_size += empty_cluster;
2804 }
2805
2806 total_needed += empty_size;
2807 block_group = btrfs_lookup_block_group(root->fs_info, search_start);
2808 if (!block_group)
2809 block_group = btrfs_lookup_first_block_group(root->fs_info,
2810 search_start);
2811 space_info = __find_space_info(root->fs_info, data);
2812
2813 down_read(&space_info->groups_sem);
2814 while (1) {
2815 struct btrfs_free_space *free_space;
2816 /*
2817 * the only way this happens if our hint points to a block
2818 * group thats not of the proper type, while looping this
2819 * should never happen
2820 */
2821 if (empty_size)
2822 extra_loop = 1;
2823
2824 if (!block_group)
2825 goto new_group_no_lock;
2826
2827 mutex_lock(&block_group->alloc_mutex);
2828 if (unlikely(!block_group_bits(block_group, data)))
2829 goto new_group;
2830
2831 ret = cache_block_group(root, block_group);
2832 if (ret) {
2833 mutex_unlock(&block_group->alloc_mutex);
2834 break;
2835 }
2836
2837 if (block_group->ro)
2838 goto new_group;
2839
2840 free_space = btrfs_find_free_space(block_group, search_start,
2841 total_needed);
2842 if (free_space) {
2843 u64 start = block_group->key.objectid;
2844 u64 end = block_group->key.objectid +
2845 block_group->key.offset;
2846
2847 search_start = stripe_align(root, free_space->offset);
2848
2849 /* move on to the next group */
2850 if (search_start + num_bytes >= search_end)
2851 goto new_group;
2852
2853 /* move on to the next group */
2854 if (search_start + num_bytes > end)
2855 goto new_group;
2856
2857 if (last_wanted && search_start != last_wanted) {
2858 total_needed += empty_cluster;
2859 empty_size += empty_cluster;
2860 last_wanted = 0;
2861 /*
2862 * if search_start is still in this block group
2863 * then we just re-search this block group
2864 */
2865 if (search_start >= start &&
2866 search_start < end) {
2867 mutex_unlock(&block_group->alloc_mutex);
2868 continue;
2869 }
2870
2871 /* else we go to the next block group */
2872 goto new_group;
2873 }
2874
2875 if (exclude_nr > 0 &&
2876 (search_start + num_bytes > exclude_start &&
2877 search_start < exclude_start + exclude_nr)) {
2878 search_start = exclude_start + exclude_nr;
2879 /*
2880 * if search_start is still in this block group
2881 * then we just re-search this block group
2882 */
2883 if (search_start >= start &&
2884 search_start < end) {
2885 mutex_unlock(&block_group->alloc_mutex);
2886 last_wanted = 0;
2887 continue;
2888 }
2889
2890 /* else we go to the next block group */
2891 goto new_group;
2892 }
2893
2894 ins->objectid = search_start;
2895 ins->offset = num_bytes;
2896
2897 btrfs_remove_free_space_lock(block_group, search_start,
2898 num_bytes);
2899 /* we are all good, lets return */
2900 mutex_unlock(&block_group->alloc_mutex);
2901 break;
2902 }
2903new_group:
2904 mutex_unlock(&block_group->alloc_mutex);
2905new_group_no_lock:
2906 /* don't try to compare new allocations against the
2907 * last allocation any more
2908 */
2909 last_wanted = 0;
2910
2911 /*
2912 * Here's how this works.
2913 * loop == 0: we were searching a block group via a hint
2914 * and didn't find anything, so we start at
2915 * the head of the block groups and keep searching
2916 * loop == 1: we're searching through all of the block groups
2917 * if we hit the head again we have searched
2918 * all of the block groups for this space and we
2919 * need to try and allocate, if we cant error out.
2920 * loop == 2: we allocated more space and are looping through
2921 * all of the block groups again.
2922 */
2923 if (loop == 0) {
2924 head = &space_info->block_groups;
2925 cur = head->next;
2926 loop++;
2927 } else if (loop == 1 && cur == head) {
2928 int keep_going;
2929
2930 /* at this point we give up on the empty_size
2931 * allocations and just try to allocate the min
2932 * space.
2933 *
2934 * The extra_loop field was set if an empty_size
2935 * allocation was attempted above, and if this
2936 * is try we need to try the loop again without
2937 * the additional empty_size.
2938 */
2939 total_needed -= empty_size;
2940 empty_size = 0;
2941 keep_going = extra_loop;
2942 loop++;
2943
2944 if (allowed_chunk_alloc && !chunk_alloc_done) {
2945 up_read(&space_info->groups_sem);
2946 ret = do_chunk_alloc(trans, root, num_bytes +
2947 2 * 1024 * 1024, data, 1);
2948 down_read(&space_info->groups_sem);
2949 if (ret < 0)
2950 goto loop_check;
2951 head = &space_info->block_groups;
2952 /*
2953 * we've allocated a new chunk, keep
2954 * trying
2955 */
2956 keep_going = 1;
2957 chunk_alloc_done = 1;
2958 } else if (!allowed_chunk_alloc) {
2959 space_info->force_alloc = 1;
2960 }
2961loop_check:
2962 if (keep_going) {
2963 cur = head->next;
2964 extra_loop = 0;
2965 } else {
2966 break;
2967 }
2968 } else if (cur == head) {
2969 break;
2970 }
2971
2972 block_group = list_entry(cur, struct btrfs_block_group_cache,
2973 list);
2974 search_start = block_group->key.objectid;
2975 cur = cur->next;
2976 }
2977
2978 /* we found what we needed */
2979 if (ins->objectid) {
2980 if (!(data & BTRFS_BLOCK_GROUP_DATA))
2981 trans->block_group = block_group;
2982
2983 if (last_ptr)
2984 *last_ptr = ins->objectid + ins->offset;
2985 ret = 0;
2986 } else if (!ret) {
2987 ret = -ENOSPC;
2988 }
2989
2990 up_read(&space_info->groups_sem);
2991 return ret;
2992}
2993
2994static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
2995{
2996 struct btrfs_block_group_cache *cache;
2997 struct list_head *l;
2998
2999 printk(KERN_INFO "space_info has %Lu free, is %sfull\n",
3000 info->total_bytes - info->bytes_used - info->bytes_pinned -
3001 info->bytes_reserved, (info->full) ? "" : "not ");
3002
3003 down_read(&info->groups_sem);
3004 list_for_each(l, &info->block_groups) {
3005 cache = list_entry(l, struct btrfs_block_group_cache, list);
3006 spin_lock(&cache->lock);
3007 printk(KERN_INFO "block group %Lu has %Lu bytes, %Lu used "
3008 "%Lu pinned %Lu reserved\n",
3009 cache->key.objectid, cache->key.offset,
3010 btrfs_block_group_used(&cache->item),
3011 cache->pinned, cache->reserved);
3012 btrfs_dump_free_space(cache, bytes);
3013 spin_unlock(&cache->lock);
3014 }
3015 up_read(&info->groups_sem);
3016}
3017
3018static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
3019 struct btrfs_root *root,
3020 u64 num_bytes, u64 min_alloc_size,
3021 u64 empty_size, u64 hint_byte,
3022 u64 search_end, struct btrfs_key *ins,
3023 u64 data)
3024{
3025 int ret;
3026 u64 search_start = 0;
3027 u64 alloc_profile;
3028 struct btrfs_fs_info *info = root->fs_info;
3029
3030 if (data) {
3031 alloc_profile = info->avail_data_alloc_bits &
3032 info->data_alloc_profile;
3033 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
3034 } else if (root == root->fs_info->chunk_root) {
3035 alloc_profile = info->avail_system_alloc_bits &
3036 info->system_alloc_profile;
3037 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
3038 } else {
3039 alloc_profile = info->avail_metadata_alloc_bits &
3040 info->metadata_alloc_profile;
3041 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
3042 }
3043again:
3044 data = btrfs_reduce_alloc_profile(root, data);
3045 /*
3046 * the only place that sets empty_size is btrfs_realloc_node, which
3047 * is not called recursively on allocations
3048 */
3049 if (empty_size || root->ref_cows) {
3050 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
3051 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3052 2 * 1024 * 1024,
3053 BTRFS_BLOCK_GROUP_METADATA |
3054 (info->metadata_alloc_profile &
3055 info->avail_metadata_alloc_bits), 0);
3056 }
3057 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3058 num_bytes + 2 * 1024 * 1024, data, 0);
3059 }
3060
3061 WARN_ON(num_bytes < root->sectorsize);
3062 ret = find_free_extent(trans, root, num_bytes, empty_size,
3063 search_start, search_end, hint_byte, ins,
3064 trans->alloc_exclude_start,
3065 trans->alloc_exclude_nr, data);
3066
3067 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
3068 num_bytes = num_bytes >> 1;
3069 num_bytes = num_bytes & ~(root->sectorsize - 1);
3070 num_bytes = max(num_bytes, min_alloc_size);
3071 do_chunk_alloc(trans, root->fs_info->extent_root,
3072 num_bytes, data, 1);
3073 goto again;
3074 }
3075 if (ret) {
3076 struct btrfs_space_info *sinfo;
3077
3078 sinfo = __find_space_info(root->fs_info, data);
3079 printk("allocation failed flags %Lu, wanted %Lu\n",
3080 data, num_bytes);
3081 dump_space_info(sinfo, num_bytes);
3082 BUG();
3083 }
3084
3085 return ret;
3086}
3087
3088int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
3089{
3090 struct btrfs_block_group_cache *cache;
3091
3092 cache = btrfs_lookup_block_group(root->fs_info, start);
3093 if (!cache) {
3094 printk(KERN_ERR "Unable to find block group for %Lu\n", start);
3095 return -ENOSPC;
3096 }
3097 btrfs_add_free_space(cache, start, len);
3098 update_reserved_extents(root, start, len, 0);
3099 return 0;
3100}
3101
3102int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
3103 struct btrfs_root *root,
3104 u64 num_bytes, u64 min_alloc_size,
3105 u64 empty_size, u64 hint_byte,
3106 u64 search_end, struct btrfs_key *ins,
3107 u64 data)
3108{
3109 int ret;
3110 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
3111 empty_size, hint_byte, search_end, ins,
3112 data);
3113 update_reserved_extents(root, ins->objectid, ins->offset, 1);
3114 return ret;
3115}
3116
3117static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
3118 struct btrfs_root *root, u64 parent,
3119 u64 root_objectid, u64 ref_generation,
3120 u64 owner, struct btrfs_key *ins)
3121{
3122 int ret;
3123 int pending_ret;
3124 u64 super_used;
3125 u64 root_used;
3126 u64 num_bytes = ins->offset;
3127 u32 sizes[2];
3128 struct btrfs_fs_info *info = root->fs_info;
3129 struct btrfs_root *extent_root = info->extent_root;
3130 struct btrfs_extent_item *extent_item;
3131 struct btrfs_extent_ref *ref;
3132 struct btrfs_path *path;
3133 struct btrfs_key keys[2];
3134
3135 if (parent == 0)
3136 parent = ins->objectid;
3137
3138 /* block accounting for super block */
3139 spin_lock_irq(&info->delalloc_lock);
3140 super_used = btrfs_super_bytes_used(&info->super_copy);
3141 btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
3142 spin_unlock_irq(&info->delalloc_lock);
3143
3144 /* block accounting for root item */
3145 root_used = btrfs_root_used(&root->root_item);
3146 btrfs_set_root_used(&root->root_item, root_used + num_bytes);
3147
3148 if (root == extent_root) {
3149 struct pending_extent_op *extent_op;
3150
3151 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
3152 BUG_ON(!extent_op);
3153
3154 extent_op->type = PENDING_EXTENT_INSERT;
3155 extent_op->bytenr = ins->objectid;
3156 extent_op->num_bytes = ins->offset;
3157 extent_op->parent = parent;
3158 extent_op->orig_parent = 0;
3159 extent_op->generation = ref_generation;
3160 extent_op->orig_generation = 0;
3161 extent_op->level = (int)owner;
3162 INIT_LIST_HEAD(&extent_op->list);
3163 extent_op->del = 0;
3164
3165 mutex_lock(&root->fs_info->extent_ins_mutex);
3166 set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
3167 ins->objectid + ins->offset - 1,
3168 EXTENT_WRITEBACK, GFP_NOFS);
3169 set_state_private(&root->fs_info->extent_ins,
3170 ins->objectid, (unsigned long)extent_op);
3171 mutex_unlock(&root->fs_info->extent_ins_mutex);
3172 goto update_block;
3173 }
3174
3175 memcpy(&keys[0], ins, sizeof(*ins));
3176 keys[1].objectid = ins->objectid;
3177 keys[1].type = BTRFS_EXTENT_REF_KEY;
3178 keys[1].offset = parent;
3179 sizes[0] = sizeof(*extent_item);
3180 sizes[1] = sizeof(*ref);
3181
3182 path = btrfs_alloc_path();
3183 BUG_ON(!path);
3184
3185 ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
3186 sizes, 2);
3187 BUG_ON(ret);
3188
3189 extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3190 struct btrfs_extent_item);
3191 btrfs_set_extent_refs(path->nodes[0], extent_item, 1);
3192 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
3193 struct btrfs_extent_ref);
3194
3195 btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
3196 btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
3197 btrfs_set_ref_objectid(path->nodes[0], ref, owner);
3198 btrfs_set_ref_num_refs(path->nodes[0], ref, 1);
3199
3200 btrfs_mark_buffer_dirty(path->nodes[0]);
3201
3202 trans->alloc_exclude_start = 0;
3203 trans->alloc_exclude_nr = 0;
3204 btrfs_free_path(path);
3205 finish_current_insert(trans, extent_root, 0);
3206 pending_ret = del_pending_extents(trans, extent_root, 0);
3207
3208 if (ret)
3209 goto out;
3210 if (pending_ret) {
3211 ret = pending_ret;
3212 goto out;
3213 }
3214
3215update_block:
3216 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0);
3217 if (ret) {
3218 printk("update block group failed for %Lu %Lu\n",
3219 ins->objectid, ins->offset);
3220 BUG();
3221 }
3222out:
3223 return ret;
3224}
3225
3226int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
3227 struct btrfs_root *root, u64 parent,
3228 u64 root_objectid, u64 ref_generation,
3229 u64 owner, struct btrfs_key *ins)
3230{
3231 int ret;
3232
3233 if (root_objectid == BTRFS_TREE_LOG_OBJECTID)
3234 return 0;
3235 ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
3236 ref_generation, owner, ins);
3237 update_reserved_extents(root, ins->objectid, ins->offset, 0);
3238 return ret;
3239}
3240
3241/*
3242 * this is used by the tree logging recovery code. It records that
3243 * an extent has been allocated and makes sure to clear the free
3244 * space cache bits as well
3245 */
3246int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
3247 struct btrfs_root *root, u64 parent,
3248 u64 root_objectid, u64 ref_generation,
3249 u64 owner, struct btrfs_key *ins)
3250{
3251 int ret;
3252 struct btrfs_block_group_cache *block_group;
3253
3254 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
3255 mutex_lock(&block_group->alloc_mutex);
3256 cache_block_group(root, block_group);
3257
3258 ret = btrfs_remove_free_space_lock(block_group, ins->objectid,
3259 ins->offset);
3260 mutex_unlock(&block_group->alloc_mutex);
3261 BUG_ON(ret);
3262 ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
3263 ref_generation, owner, ins);
3264 return ret;
3265}
3266
3267/*
3268 * finds a free extent and does all the dirty work required for allocation
3269 * returns the key for the extent through ins, and a tree buffer for
3270 * the first block of the extent through buf.
3271 *
3272 * returns 0 if everything worked, non-zero otherwise.
3273 */
3274int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
3275 struct btrfs_root *root,
3276 u64 num_bytes, u64 parent, u64 min_alloc_size,
3277 u64 root_objectid, u64 ref_generation,
3278 u64 owner_objectid, u64 empty_size, u64 hint_byte,
3279 u64 search_end, struct btrfs_key *ins, u64 data)
3280{
3281 int ret;
3282
3283 ret = __btrfs_reserve_extent(trans, root, num_bytes,
3284 min_alloc_size, empty_size, hint_byte,
3285 search_end, ins, data);
3286 BUG_ON(ret);
3287 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
3288 ret = __btrfs_alloc_reserved_extent(trans, root, parent,
3289 root_objectid, ref_generation,
3290 owner_objectid, ins);
3291 BUG_ON(ret);
3292
3293 } else {
3294 update_reserved_extents(root, ins->objectid, ins->offset, 1);
3295 }
3296 return ret;
3297}
3298
3299struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
3300 struct btrfs_root *root,
3301 u64 bytenr, u32 blocksize)
3302{
3303 struct extent_buffer *buf;
3304
3305 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
3306 if (!buf)
3307 return ERR_PTR(-ENOMEM);
3308 btrfs_set_header_generation(buf, trans->transid);
3309 btrfs_tree_lock(buf);
3310 clean_tree_block(trans, root, buf);
3311 btrfs_set_buffer_uptodate(buf);
3312 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
3313 set_extent_dirty(&root->dirty_log_pages, buf->start,
3314 buf->start + buf->len - 1, GFP_NOFS);
3315 } else {
3316 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
3317 buf->start + buf->len - 1, GFP_NOFS);
3318 }
3319 trans->blocks_used++;
3320 return buf;
3321}
3322
3323/*
3324 * helper function to allocate a block for a given tree
3325 * returns the tree buffer or NULL.
3326 */
3327struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
3328 struct btrfs_root *root,
3329 u32 blocksize, u64 parent,
3330 u64 root_objectid,
3331 u64 ref_generation,
3332 int level,
3333 u64 hint,
3334 u64 empty_size)
3335{
3336 struct btrfs_key ins;
3337 int ret;
3338 struct extent_buffer *buf;
3339
3340 ret = btrfs_alloc_extent(trans, root, blocksize, parent, blocksize,
3341 root_objectid, ref_generation, level,
3342 empty_size, hint, (u64)-1, &ins, 0);
3343 if (ret) {
3344 BUG_ON(ret > 0);
3345 return ERR_PTR(ret);
3346 }
3347
3348 buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize);
3349 return buf;
3350}
3351
3352int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
3353 struct btrfs_root *root, struct extent_buffer *leaf)
3354{
3355 u64 leaf_owner;
3356 u64 leaf_generation;
3357 struct btrfs_key key;
3358 struct btrfs_file_extent_item *fi;
3359 int i;
3360 int nritems;
3361 int ret;
3362
3363 BUG_ON(!btrfs_is_leaf(leaf));
3364 nritems = btrfs_header_nritems(leaf);
3365 leaf_owner = btrfs_header_owner(leaf);
3366 leaf_generation = btrfs_header_generation(leaf);
3367
3368 for (i = 0; i < nritems; i++) {
3369 u64 disk_bytenr;
3370 cond_resched();
3371
3372 btrfs_item_key_to_cpu(leaf, &key, i);
3373 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3374 continue;
3375 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3376 if (btrfs_file_extent_type(leaf, fi) ==
3377 BTRFS_FILE_EXTENT_INLINE)
3378 continue;
3379 /*
3380 * FIXME make sure to insert a trans record that
3381 * repeats the snapshot del on crash
3382 */
3383 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
3384 if (disk_bytenr == 0)
3385 continue;
3386
3387 ret = __btrfs_free_extent(trans, root, disk_bytenr,
3388 btrfs_file_extent_disk_num_bytes(leaf, fi),
3389 leaf->start, leaf_owner, leaf_generation,
3390 key.objectid, 0);
3391 BUG_ON(ret);
3392
3393 atomic_inc(&root->fs_info->throttle_gen);
3394 wake_up(&root->fs_info->transaction_throttle);
3395 cond_resched();
3396 }
3397 return 0;
3398}
3399
3400static int noinline cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
3401 struct btrfs_root *root,
3402 struct btrfs_leaf_ref *ref)
3403{
3404 int i;
3405 int ret;
3406 struct btrfs_extent_info *info = ref->extents;
3407
3408 for (i = 0; i < ref->nritems; i++) {
3409 ret = __btrfs_free_extent(trans, root, info->bytenr,
3410 info->num_bytes, ref->bytenr,
3411 ref->owner, ref->generation,
3412 info->objectid, 0);
3413
3414 atomic_inc(&root->fs_info->throttle_gen);
3415 wake_up(&root->fs_info->transaction_throttle);
3416 cond_resched();
3417
3418 BUG_ON(ret);
3419 info++;
3420 }
3421
3422 return 0;
3423}
3424
3425int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
3426 u32 *refs)
3427{
3428 int ret;
3429
3430 ret = btrfs_lookup_extent_ref(NULL, root, start, len, refs);
3431 BUG_ON(ret);
3432
3433#if 0 // some debugging code in case we see problems here
3434 /* if the refs count is one, it won't get increased again. But
3435 * if the ref count is > 1, someone may be decreasing it at
3436 * the same time we are.
3437 */
3438 if (*refs != 1) {
3439 struct extent_buffer *eb = NULL;
3440 eb = btrfs_find_create_tree_block(root, start, len);
3441 if (eb)
3442 btrfs_tree_lock(eb);
3443
3444 mutex_lock(&root->fs_info->alloc_mutex);
3445 ret = lookup_extent_ref(NULL, root, start, len, refs);
3446 BUG_ON(ret);
3447 mutex_unlock(&root->fs_info->alloc_mutex);
3448
3449 if (eb) {
3450 btrfs_tree_unlock(eb);
3451 free_extent_buffer(eb);
3452 }
3453 if (*refs == 1) {
3454 printk("block %llu went down to one during drop_snap\n",
3455 (unsigned long long)start);
3456 }
3457
3458 }
3459#endif
3460
3461 cond_resched();
3462 return ret;
3463}
3464
3465/*
3466 * helper function for drop_snapshot, this walks down the tree dropping ref
3467 * counts as it goes.
3468 */
3469static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
3470 struct btrfs_root *root,
3471 struct btrfs_path *path, int *level)
3472{
3473 u64 root_owner;
3474 u64 root_gen;
3475 u64 bytenr;
3476 u64 ptr_gen;
3477 struct extent_buffer *next;
3478 struct extent_buffer *cur;
3479 struct extent_buffer *parent;
3480 struct btrfs_leaf_ref *ref;
3481 u32 blocksize;
3482 int ret;
3483 u32 refs;
3484
3485 WARN_ON(*level < 0);
3486 WARN_ON(*level >= BTRFS_MAX_LEVEL);
3487 ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
3488 path->nodes[*level]->len, &refs);
3489 BUG_ON(ret);
3490 if (refs > 1)
3491 goto out;
3492
3493 /*
3494 * walk down to the last node level and free all the leaves
3495 */
3496 while(*level >= 0) {
3497 WARN_ON(*level < 0);
3498 WARN_ON(*level >= BTRFS_MAX_LEVEL);
3499 cur = path->nodes[*level];
3500
3501 if (btrfs_header_level(cur) != *level)
3502 WARN_ON(1);
3503
3504 if (path->slots[*level] >=
3505 btrfs_header_nritems(cur))
3506 break;
3507 if (*level == 0) {
3508 ret = btrfs_drop_leaf_ref(trans, root, cur);
3509 BUG_ON(ret);
3510 break;
3511 }
3512 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
3513 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
3514 blocksize = btrfs_level_size(root, *level - 1);
3515
3516 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
3517 BUG_ON(ret);
3518 if (refs != 1) {
3519 parent = path->nodes[*level];
3520 root_owner = btrfs_header_owner(parent);
3521 root_gen = btrfs_header_generation(parent);
3522 path->slots[*level]++;
3523
3524 ret = __btrfs_free_extent(trans, root, bytenr,
3525 blocksize, parent->start,
3526 root_owner, root_gen,
3527 *level - 1, 1);
3528 BUG_ON(ret);
3529
3530 atomic_inc(&root->fs_info->throttle_gen);
3531 wake_up(&root->fs_info->transaction_throttle);
3532 cond_resched();
3533
3534 continue;
3535 }
3536 /*
3537 * at this point, we have a single ref, and since the
3538 * only place referencing this extent is a dead root
3539 * the reference count should never go higher.
3540 * So, we don't need to check it again
3541 */
3542 if (*level == 1) {
3543 ref = btrfs_lookup_leaf_ref(root, bytenr);
3544 if (ref && ref->generation != ptr_gen) {
3545 btrfs_free_leaf_ref(root, ref);
3546 ref = NULL;
3547 }
3548 if (ref) {
3549 ret = cache_drop_leaf_ref(trans, root, ref);
3550 BUG_ON(ret);
3551 btrfs_remove_leaf_ref(root, ref);
3552 btrfs_free_leaf_ref(root, ref);
3553 *level = 0;
3554 break;
3555 }
3556 if (printk_ratelimit()) {
3557 printk("leaf ref miss for bytenr %llu\n",
3558 (unsigned long long)bytenr);
3559 }
3560 }
3561 next = btrfs_find_tree_block(root, bytenr, blocksize);
3562 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
3563 free_extent_buffer(next);
3564
3565 next = read_tree_block(root, bytenr, blocksize,
3566 ptr_gen);
3567 cond_resched();
3568#if 0
3569 /*
3570 * this is a debugging check and can go away
3571 * the ref should never go all the way down to 1
3572 * at this point
3573 */
3574 ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
3575 &refs);
3576 BUG_ON(ret);
3577 WARN_ON(refs != 1);
3578#endif
3579 }
3580 WARN_ON(*level <= 0);
3581 if (path->nodes[*level-1])
3582 free_extent_buffer(path->nodes[*level-1]);
3583 path->nodes[*level-1] = next;
3584 *level = btrfs_header_level(next);
3585 path->slots[*level] = 0;
3586 cond_resched();
3587 }
3588out:
3589 WARN_ON(*level < 0);
3590 WARN_ON(*level >= BTRFS_MAX_LEVEL);
3591
3592 if (path->nodes[*level] == root->node) {
3593 parent = path->nodes[*level];
3594 bytenr = path->nodes[*level]->start;
3595 } else {
3596 parent = path->nodes[*level + 1];
3597 bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
3598 }
3599
3600 blocksize = btrfs_level_size(root, *level);
3601 root_owner = btrfs_header_owner(parent);
3602 root_gen = btrfs_header_generation(parent);
3603
3604 ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
3605 parent->start, root_owner, root_gen,
3606 *level, 1);
3607 free_extent_buffer(path->nodes[*level]);
3608 path->nodes[*level] = NULL;
3609 *level += 1;
3610 BUG_ON(ret);
3611
3612 cond_resched();
3613 return 0;
3614}
3615
3616/*
3617 * helper function for drop_subtree, this function is similar to
3618 * walk_down_tree. The main difference is that it checks reference
3619 * counts while tree blocks are locked.
3620 */
3621static int noinline walk_down_subtree(struct btrfs_trans_handle *trans,
3622 struct btrfs_root *root,
3623 struct btrfs_path *path, int *level)
3624{
3625 struct extent_buffer *next;
3626 struct extent_buffer *cur;
3627 struct extent_buffer *parent;
3628 u64 bytenr;
3629 u64 ptr_gen;
3630 u32 blocksize;
3631 u32 refs;
3632 int ret;
3633
3634 cur = path->nodes[*level];
3635 ret = btrfs_lookup_extent_ref(trans, root, cur->start, cur->len,
3636 &refs);
3637 BUG_ON(ret);
3638 if (refs > 1)
3639 goto out;
3640
3641 while (*level >= 0) {
3642 cur = path->nodes[*level];
3643 if (*level == 0) {
3644 ret = btrfs_drop_leaf_ref(trans, root, cur);
3645 BUG_ON(ret);
3646 clean_tree_block(trans, root, cur);
3647 break;
3648 }
3649 if (path->slots[*level] >= btrfs_header_nritems(cur)) {
3650 clean_tree_block(trans, root, cur);
3651 break;
3652 }
3653
3654 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
3655 blocksize = btrfs_level_size(root, *level - 1);
3656 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
3657
3658 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
3659 btrfs_tree_lock(next);
3660
3661 ret = btrfs_lookup_extent_ref(trans, root, bytenr, blocksize,
3662 &refs);
3663 BUG_ON(ret);
3664 if (refs > 1) {
3665 parent = path->nodes[*level];
3666 ret = btrfs_free_extent(trans, root, bytenr,
3667 blocksize, parent->start,
3668 btrfs_header_owner(parent),
3669 btrfs_header_generation(parent),
3670 *level - 1, 1);
3671 BUG_ON(ret);
3672 path->slots[*level]++;
3673 btrfs_tree_unlock(next);
3674 free_extent_buffer(next);
3675 continue;
3676 }
3677
3678 *level = btrfs_header_level(next);
3679 path->nodes[*level] = next;
3680 path->slots[*level] = 0;
3681 path->locks[*level] = 1;
3682 cond_resched();
3683 }
3684out:
3685 parent = path->nodes[*level + 1];
3686 bytenr = path->nodes[*level]->start;
3687 blocksize = path->nodes[*level]->len;
3688
3689 ret = btrfs_free_extent(trans, root, bytenr, blocksize,
3690 parent->start, btrfs_header_owner(parent),
3691 btrfs_header_generation(parent), *level, 1);
3692 BUG_ON(ret);
3693
3694 if (path->locks[*level]) {
3695 btrfs_tree_unlock(path->nodes[*level]);
3696 path->locks[*level] = 0;
3697 }
3698 free_extent_buffer(path->nodes[*level]);
3699 path->nodes[*level] = NULL;
3700 *level += 1;
3701 cond_resched();
3702 return 0;
3703}
3704
3705/*
3706 * helper for dropping snapshots. This walks back up the tree in the path
3707 * to find the first node higher up where we haven't yet gone through
3708 * all the slots
3709 */
3710static int noinline walk_up_tree(struct btrfs_trans_handle *trans,
3711 struct btrfs_root *root,
3712 struct btrfs_path *path,
3713 int *level, int max_level)
3714{
3715 u64 root_owner;
3716 u64 root_gen;
3717 struct btrfs_root_item *root_item = &root->root_item;
3718 int i;
3719 int slot;
3720 int ret;
3721
3722 for (i = *level; i < max_level && path->nodes[i]; i++) {
3723 slot = path->slots[i];
3724 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
3725 struct extent_buffer *node;
3726 struct btrfs_disk_key disk_key;
3727 node = path->nodes[i];
3728 path->slots[i]++;
3729 *level = i;
3730 WARN_ON(*level == 0);
3731 btrfs_node_key(node, &disk_key, path->slots[i]);
3732 memcpy(&root_item->drop_progress,
3733 &disk_key, sizeof(disk_key));
3734 root_item->drop_level = i;
3735 return 0;
3736 } else {
3737 struct extent_buffer *parent;
3738 if (path->nodes[*level] == root->node)
3739 parent = path->nodes[*level];
3740 else
3741 parent = path->nodes[*level + 1];
3742
3743 root_owner = btrfs_header_owner(parent);
3744 root_gen = btrfs_header_generation(parent);
3745
3746 clean_tree_block(trans, root, path->nodes[*level]);
3747 ret = btrfs_free_extent(trans, root,
3748 path->nodes[*level]->start,
3749 path->nodes[*level]->len,
3750 parent->start, root_owner,
3751 root_gen, *level, 1);
3752 BUG_ON(ret);
3753 if (path->locks[*level]) {
3754 btrfs_tree_unlock(path->nodes[*level]);
3755 path->locks[*level] = 0;
3756 }
3757 free_extent_buffer(path->nodes[*level]);
3758 path->nodes[*level] = NULL;
3759 *level = i + 1;
3760 }
3761 }
3762 return 1;
3763}
3764
3765/*
3766 * drop the reference count on the tree rooted at 'snap'. This traverses
3767 * the tree freeing any blocks that have a ref count of zero after being
3768 * decremented.
3769 */
3770int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
3771 *root)
3772{
3773 int ret = 0;
3774 int wret;
3775 int level;
3776 struct btrfs_path *path;
3777 int i;
3778 int orig_level;
3779 struct btrfs_root_item *root_item = &root->root_item;
3780
3781 WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
3782 path = btrfs_alloc_path();
3783 BUG_ON(!path);
3784
3785 level = btrfs_header_level(root->node);
3786 orig_level = level;
3787 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
3788 path->nodes[level] = root->node;
3789 extent_buffer_get(root->node);
3790 path->slots[level] = 0;
3791 } else {
3792 struct btrfs_key key;
3793 struct btrfs_disk_key found_key;
3794 struct extent_buffer *node;
3795
3796 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
3797 level = root_item->drop_level;
3798 path->lowest_level = level;
3799 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3800 if (wret < 0) {
3801 ret = wret;
3802 goto out;
3803 }
3804 node = path->nodes[level];
3805 btrfs_node_key(node, &found_key, path->slots[level]);
3806 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
3807 sizeof(found_key)));
3808 /*
3809 * unlock our path, this is safe because only this
3810 * function is allowed to delete this snapshot
3811 */
3812 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3813 if (path->nodes[i] && path->locks[i]) {
3814 path->locks[i] = 0;
3815 btrfs_tree_unlock(path->nodes[i]);
3816 }
3817 }
3818 }
3819 while(1) {
3820 wret = walk_down_tree(trans, root, path, &level);
3821 if (wret > 0)
3822 break;
3823 if (wret < 0)
3824 ret = wret;
3825
3826 wret = walk_up_tree(trans, root, path, &level,
3827 BTRFS_MAX_LEVEL);
3828 if (wret > 0)
3829 break;
3830 if (wret < 0)
3831 ret = wret;
3832 if (trans->transaction->in_commit) {
3833 ret = -EAGAIN;
3834 break;
3835 }
3836 atomic_inc(&root->fs_info->throttle_gen);
3837 wake_up(&root->fs_info->transaction_throttle);
3838 }
3839 for (i = 0; i <= orig_level; i++) {
3840 if (path->nodes[i]) {
3841 free_extent_buffer(path->nodes[i]);
3842 path->nodes[i] = NULL;
3843 }
3844 }
3845out:
3846 btrfs_free_path(path);
3847 return ret;
3848}
3849
3850int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
3851 struct btrfs_root *root,
3852 struct extent_buffer *node,
3853 struct extent_buffer *parent)
3854{
3855 struct btrfs_path *path;
3856 int level;
3857 int parent_level;
3858 int ret = 0;
3859 int wret;
3860
3861 path = btrfs_alloc_path();
3862 BUG_ON(!path);
3863
3864 BUG_ON(!btrfs_tree_locked(parent));
3865 parent_level = btrfs_header_level(parent);
3866 extent_buffer_get(parent);
3867 path->nodes[parent_level] = parent;
3868 path->slots[parent_level] = btrfs_header_nritems(parent);
3869
3870 BUG_ON(!btrfs_tree_locked(node));
3871 level = btrfs_header_level(node);
3872 extent_buffer_get(node);
3873 path->nodes[level] = node;
3874 path->slots[level] = 0;
3875
3876 while (1) {
3877 wret = walk_down_subtree(trans, root, path, &level);
3878 if (wret < 0)
3879 ret = wret;
3880 if (wret != 0)
3881 break;
3882
3883 wret = walk_up_tree(trans, root, path, &level, parent_level);
3884 if (wret < 0)
3885 ret = wret;
3886 if (wret != 0)
3887 break;
3888 }
3889
3890 btrfs_free_path(path);
3891 return ret;
3892}
3893
3894static unsigned long calc_ra(unsigned long start, unsigned long last,
3895 unsigned long nr)
3896{
3897 return min(last, start + nr - 1);
3898}
3899
3900static int noinline relocate_inode_pages(struct inode *inode, u64 start,
3901 u64 len)
3902{
3903 u64 page_start;
3904 u64 page_end;
3905 unsigned long first_index;
3906 unsigned long last_index;
3907 unsigned long i;
3908 struct page *page;
3909 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3910 struct file_ra_state *ra;
3911 struct btrfs_ordered_extent *ordered;
3912 unsigned int total_read = 0;
3913 unsigned int total_dirty = 0;
3914 int ret = 0;
3915
3916 ra = kzalloc(sizeof(*ra), GFP_NOFS);
3917
3918 mutex_lock(&inode->i_mutex);
3919 first_index = start >> PAGE_CACHE_SHIFT;
3920 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
3921
3922 /* make sure the dirty trick played by the caller work */
3923 ret = invalidate_inode_pages2_range(inode->i_mapping,
3924 first_index, last_index);
3925 if (ret)
3926 goto out_unlock;
3927
3928 file_ra_state_init(ra, inode->i_mapping);
3929
3930 for (i = first_index ; i <= last_index; i++) {
3931 if (total_read % ra->ra_pages == 0) {
3932 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
3933 calc_ra(i, last_index, ra->ra_pages));
3934 }
3935 total_read++;
3936again:
3937 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
3938 BUG_ON(1);
3939 page = grab_cache_page(inode->i_mapping, i);
3940 if (!page) {
3941 ret = -ENOMEM;
3942 goto out_unlock;
3943 }
3944 if (!PageUptodate(page)) {
3945 btrfs_readpage(NULL, page);
3946 lock_page(page);
3947 if (!PageUptodate(page)) {
3948 unlock_page(page);
3949 page_cache_release(page);
3950 ret = -EIO;
3951 goto out_unlock;
3952 }
3953 }
3954 wait_on_page_writeback(page);
3955
3956 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
3957 page_end = page_start + PAGE_CACHE_SIZE - 1;
3958 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
3959
3960 ordered = btrfs_lookup_ordered_extent(inode, page_start);
3961 if (ordered) {
3962 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3963 unlock_page(page);
3964 page_cache_release(page);
3965 btrfs_start_ordered_extent(inode, ordered, 1);
3966 btrfs_put_ordered_extent(ordered);
3967 goto again;
3968 }
3969 set_page_extent_mapped(page);
3970
3971 btrfs_set_extent_delalloc(inode, page_start, page_end);
3972 if (i == first_index)
3973 set_extent_bits(io_tree, page_start, page_end,
3974 EXTENT_BOUNDARY, GFP_NOFS);
3975
3976 set_page_dirty(page);
3977 total_dirty++;
3978
3979 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3980 unlock_page(page);
3981 page_cache_release(page);
3982 }
3983
3984out_unlock:
3985 kfree(ra);
3986 mutex_unlock(&inode->i_mutex);
3987 balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
3988 return ret;
3989}
3990
3991static int noinline relocate_data_extent(struct inode *reloc_inode,
3992 struct btrfs_key *extent_key,
3993 u64 offset)
3994{
3995 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
3996 struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
3997 struct extent_map *em;
3998 u64 start = extent_key->objectid - offset;
3999 u64 end = start + extent_key->offset - 1;
4000
4001 em = alloc_extent_map(GFP_NOFS);
4002 BUG_ON(!em || IS_ERR(em));
4003
4004 em->start = start;
4005 em->len = extent_key->offset;
4006 em->block_len = extent_key->offset;
4007 em->block_start = extent_key->objectid;
4008 em->bdev = root->fs_info->fs_devices->latest_bdev;
4009 set_bit(EXTENT_FLAG_PINNED, &em->flags);
4010
4011 /* setup extent map to cheat btrfs_readpage */
4012 lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
4013 while (1) {
4014 int ret;
4015 spin_lock(&em_tree->lock);
4016 ret = add_extent_mapping(em_tree, em);
4017 spin_unlock(&em_tree->lock);
4018 if (ret != -EEXIST) {
4019 free_extent_map(em);
4020 break;
4021 }
4022 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
4023 }
4024 unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
4025
4026 return relocate_inode_pages(reloc_inode, start, extent_key->offset);
4027}
4028
4029struct btrfs_ref_path {
4030 u64 extent_start;
4031 u64 nodes[BTRFS_MAX_LEVEL];
4032 u64 root_objectid;
4033 u64 root_generation;
4034 u64 owner_objectid;
4035 u32 num_refs;
4036 int lowest_level;
4037 int current_level;
4038 int shared_level;
4039
4040 struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
4041 u64 new_nodes[BTRFS_MAX_LEVEL];
4042};
4043
4044struct disk_extent {
4045 u64 ram_bytes;
4046 u64 disk_bytenr;
4047 u64 disk_num_bytes;
4048 u64 offset;
4049 u64 num_bytes;
4050 u8 compression;
4051 u8 encryption;
4052 u16 other_encoding;
4053};
4054
4055static int is_cowonly_root(u64 root_objectid)
4056{
4057 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
4058 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
4059 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
4060 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
4061 root_objectid == BTRFS_TREE_LOG_OBJECTID)
4062 return 1;
4063 return 0;
4064}
4065
4066static int noinline __next_ref_path(struct btrfs_trans_handle *trans,
4067 struct btrfs_root *extent_root,
4068 struct btrfs_ref_path *ref_path,
4069 int first_time)
4070{
4071 struct extent_buffer *leaf;
4072 struct btrfs_path *path;
4073 struct btrfs_extent_ref *ref;
4074 struct btrfs_key key;
4075 struct btrfs_key found_key;
4076 u64 bytenr;
4077 u32 nritems;
4078 int level;
4079 int ret = 1;
4080
4081 path = btrfs_alloc_path();
4082 if (!path)
4083 return -ENOMEM;
4084
4085 if (first_time) {
4086 ref_path->lowest_level = -1;
4087 ref_path->current_level = -1;
4088 ref_path->shared_level = -1;
4089 goto walk_up;
4090 }
4091walk_down:
4092 level = ref_path->current_level - 1;
4093 while (level >= -1) {
4094 u64 parent;
4095 if (level < ref_path->lowest_level)
4096 break;
4097
4098 if (level >= 0) {
4099 bytenr = ref_path->nodes[level];
4100 } else {
4101 bytenr = ref_path->extent_start;
4102 }
4103 BUG_ON(bytenr == 0);
4104
4105 parent = ref_path->nodes[level + 1];
4106 ref_path->nodes[level + 1] = 0;
4107 ref_path->current_level = level;
4108 BUG_ON(parent == 0);
4109
4110 key.objectid = bytenr;
4111 key.offset = parent + 1;
4112 key.type = BTRFS_EXTENT_REF_KEY;
4113
4114 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
4115 if (ret < 0)
4116 goto out;
4117 BUG_ON(ret == 0);
4118
4119 leaf = path->nodes[0];
4120 nritems = btrfs_header_nritems(leaf);
4121 if (path->slots[0] >= nritems) {
4122 ret = btrfs_next_leaf(extent_root, path);
4123 if (ret < 0)
4124 goto out;
4125 if (ret > 0)
4126 goto next;
4127 leaf = path->nodes[0];
4128 }
4129
4130 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4131 if (found_key.objectid == bytenr &&
4132 found_key.type == BTRFS_EXTENT_REF_KEY) {
4133 if (level < ref_path->shared_level)
4134 ref_path->shared_level = level;
4135 goto found;
4136 }
4137next:
4138 level--;
4139 btrfs_release_path(extent_root, path);
4140 cond_resched();
4141 }
4142 /* reached lowest level */
4143 ret = 1;
4144 goto out;
4145walk_up:
4146 level = ref_path->current_level;
4147 while (level < BTRFS_MAX_LEVEL - 1) {
4148 u64 ref_objectid;
4149 if (level >= 0) {
4150 bytenr = ref_path->nodes[level];
4151 } else {
4152 bytenr = ref_path->extent_start;
4153 }
4154 BUG_ON(bytenr == 0);
4155
4156 key.objectid = bytenr;
4157 key.offset = 0;
4158 key.type = BTRFS_EXTENT_REF_KEY;
4159
4160 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
4161 if (ret < 0)
4162 goto out;
4163
4164 leaf = path->nodes[0];
4165 nritems = btrfs_header_nritems(leaf);
4166 if (path->slots[0] >= nritems) {
4167 ret = btrfs_next_leaf(extent_root, path);
4168 if (ret < 0)
4169 goto out;
4170 if (ret > 0) {
4171 /* the extent was freed by someone */
4172 if (ref_path->lowest_level == level)
4173 goto out;
4174 btrfs_release_path(extent_root, path);
4175 goto walk_down;
4176 }
4177 leaf = path->nodes[0];
4178 }
4179
4180 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4181 if (found_key.objectid != bytenr ||
4182 found_key.type != BTRFS_EXTENT_REF_KEY) {
4183 /* the extent was freed by someone */
4184 if (ref_path->lowest_level == level) {
4185 ret = 1;
4186 goto out;
4187 }
4188 btrfs_release_path(extent_root, path);
4189 goto walk_down;
4190 }
4191found:
4192 ref = btrfs_item_ptr(leaf, path->slots[0],
4193 struct btrfs_extent_ref);
4194 ref_objectid = btrfs_ref_objectid(leaf, ref);
4195 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4196 if (first_time) {
4197 level = (int)ref_objectid;
4198 BUG_ON(level >= BTRFS_MAX_LEVEL);
4199 ref_path->lowest_level = level;
4200 ref_path->current_level = level;
4201 ref_path->nodes[level] = bytenr;
4202 } else {
4203 WARN_ON(ref_objectid != level);
4204 }
4205 } else {
4206 WARN_ON(level != -1);
4207 }
4208 first_time = 0;
4209
4210 if (ref_path->lowest_level == level) {
4211 ref_path->owner_objectid = ref_objectid;
4212 ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
4213 }
4214
4215 /*
4216 * the block is tree root or the block isn't in reference
4217 * counted tree.
4218 */
4219 if (found_key.objectid == found_key.offset ||
4220 is_cowonly_root(btrfs_ref_root(leaf, ref))) {
4221 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
4222 ref_path->root_generation =
4223 btrfs_ref_generation(leaf, ref);
4224 if (level < 0) {
4225 /* special reference from the tree log */
4226 ref_path->nodes[0] = found_key.offset;
4227 ref_path->current_level = 0;
4228 }
4229 ret = 0;
4230 goto out;
4231 }
4232
4233 level++;
4234 BUG_ON(ref_path->nodes[level] != 0);
4235 ref_path->nodes[level] = found_key.offset;
4236 ref_path->current_level = level;
4237
4238 /*
4239 * the reference was created in the running transaction,
4240 * no need to continue walking up.
4241 */
4242 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
4243 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
4244 ref_path->root_generation =
4245 btrfs_ref_generation(leaf, ref);
4246 ret = 0;
4247 goto out;
4248 }
4249
4250 btrfs_release_path(extent_root, path);
4251 cond_resched();
4252 }
4253 /* reached max tree level, but no tree root found. */
4254 BUG();
4255out:
4256 btrfs_free_path(path);
4257 return ret;
4258}
4259
4260static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
4261 struct btrfs_root *extent_root,
4262 struct btrfs_ref_path *ref_path,
4263 u64 extent_start)
4264{
4265 memset(ref_path, 0, sizeof(*ref_path));
4266 ref_path->extent_start = extent_start;
4267
4268 return __next_ref_path(trans, extent_root, ref_path, 1);
4269}
4270
4271static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
4272 struct btrfs_root *extent_root,
4273 struct btrfs_ref_path *ref_path)
4274{
4275 return __next_ref_path(trans, extent_root, ref_path, 0);
4276}
4277
4278static int noinline get_new_locations(struct inode *reloc_inode,
4279 struct btrfs_key *extent_key,
4280 u64 offset, int no_fragment,
4281 struct disk_extent **extents,
4282 int *nr_extents)
4283{
4284 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
4285 struct btrfs_path *path;
4286 struct btrfs_file_extent_item *fi;
4287 struct extent_buffer *leaf;
4288 struct disk_extent *exts = *extents;
4289 struct btrfs_key found_key;
4290 u64 cur_pos;
4291 u64 last_byte;
4292 u32 nritems;
4293 int nr = 0;
4294 int max = *nr_extents;
4295 int ret;
4296
4297 WARN_ON(!no_fragment && *extents);
4298 if (!exts) {
4299 max = 1;
4300 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
4301 if (!exts)
4302 return -ENOMEM;
4303 }
4304
4305 path = btrfs_alloc_path();
4306 BUG_ON(!path);
4307
4308 cur_pos = extent_key->objectid - offset;
4309 last_byte = extent_key->objectid + extent_key->offset;
4310 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
4311 cur_pos, 0);
4312 if (ret < 0)
4313 goto out;
4314 if (ret > 0) {
4315 ret = -ENOENT;
4316 goto out;
4317 }
4318
4319 while (1) {
4320 leaf = path->nodes[0];
4321 nritems = btrfs_header_nritems(leaf);
4322 if (path->slots[0] >= nritems) {
4323 ret = btrfs_next_leaf(root, path);
4324 if (ret < 0)
4325 goto out;
4326 if (ret > 0)
4327 break;
4328 leaf = path->nodes[0];
4329 }
4330
4331 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4332 if (found_key.offset != cur_pos ||
4333 found_key.type != BTRFS_EXTENT_DATA_KEY ||
4334 found_key.objectid != reloc_inode->i_ino)
4335 break;
4336
4337 fi = btrfs_item_ptr(leaf, path->slots[0],
4338 struct btrfs_file_extent_item);
4339 if (btrfs_file_extent_type(leaf, fi) !=
4340 BTRFS_FILE_EXTENT_REG ||
4341 btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
4342 break;
4343
4344 if (nr == max) {
4345 struct disk_extent *old = exts;
4346 max *= 2;
4347 exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
4348 memcpy(exts, old, sizeof(*exts) * nr);
4349 if (old != *extents)
4350 kfree(old);
4351 }
4352
4353 exts[nr].disk_bytenr =
4354 btrfs_file_extent_disk_bytenr(leaf, fi);
4355 exts[nr].disk_num_bytes =
4356 btrfs_file_extent_disk_num_bytes(leaf, fi);
4357 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
4358 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4359 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
4360 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
4361 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
4362 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
4363 fi);
4364 BUG_ON(exts[nr].offset > 0);
4365 BUG_ON(exts[nr].compression || exts[nr].encryption);
4366 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
4367
4368 cur_pos += exts[nr].num_bytes;
4369 nr++;
4370
4371 if (cur_pos + offset >= last_byte)
4372 break;
4373
4374 if (no_fragment) {
4375 ret = 1;
4376 goto out;
4377 }
4378 path->slots[0]++;
4379 }
4380
4381 WARN_ON(cur_pos + offset > last_byte);
4382 if (cur_pos + offset < last_byte) {
4383 ret = -ENOENT;
4384 goto out;
4385 }
4386 ret = 0;
4387out:
4388 btrfs_free_path(path);
4389 if (ret) {
4390 if (exts != *extents)
4391 kfree(exts);
4392 } else {
4393 *extents = exts;
4394 *nr_extents = nr;
4395 }
4396 return ret;
4397}
4398
4399static int noinline replace_one_extent(struct btrfs_trans_handle *trans,
4400 struct btrfs_root *root,
4401 struct btrfs_path *path,
4402 struct btrfs_key *extent_key,
4403 struct btrfs_key *leaf_key,
4404 struct btrfs_ref_path *ref_path,
4405 struct disk_extent *new_extents,
4406 int nr_extents)
4407{
4408 struct extent_buffer *leaf;
4409 struct btrfs_file_extent_item *fi;
4410 struct inode *inode = NULL;
4411 struct btrfs_key key;
4412 u64 lock_start = 0;
4413 u64 lock_end = 0;
4414 u64 num_bytes;
4415 u64 ext_offset;
4416 u64 first_pos;
4417 u32 nritems;
4418 int nr_scaned = 0;
4419 int extent_locked = 0;
4420 int extent_type;
4421 int ret;
4422
4423 memcpy(&key, leaf_key, sizeof(key));
4424 first_pos = INT_LIMIT(loff_t) - extent_key->offset;
4425 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
4426 if (key.objectid < ref_path->owner_objectid ||
4427 (key.objectid == ref_path->owner_objectid &&
4428 key.type < BTRFS_EXTENT_DATA_KEY)) {
4429 key.objectid = ref_path->owner_objectid;
4430 key.type = BTRFS_EXTENT_DATA_KEY;
4431 key.offset = 0;
4432 }
4433 }
4434
4435 while (1) {
4436 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4437 if (ret < 0)
4438 goto out;
4439
4440 leaf = path->nodes[0];
4441 nritems = btrfs_header_nritems(leaf);
4442next:
4443 if (extent_locked && ret > 0) {
4444 /*
4445 * the file extent item was modified by someone
4446 * before the extent got locked.
4447 */
4448 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4449 lock_end, GFP_NOFS);
4450 extent_locked = 0;
4451 }
4452
4453 if (path->slots[0] >= nritems) {
4454 if (++nr_scaned > 2)
4455 break;
4456
4457 BUG_ON(extent_locked);
4458 ret = btrfs_next_leaf(root, path);
4459 if (ret < 0)
4460 goto out;
4461 if (ret > 0)
4462 break;
4463 leaf = path->nodes[0];
4464 nritems = btrfs_header_nritems(leaf);
4465 }
4466
4467 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4468
4469 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
4470 if ((key.objectid > ref_path->owner_objectid) ||
4471 (key.objectid == ref_path->owner_objectid &&
4472 key.type > BTRFS_EXTENT_DATA_KEY) ||
4473 (key.offset >= first_pos + extent_key->offset))
4474 break;
4475 }
4476
4477 if (inode && key.objectid != inode->i_ino) {
4478 BUG_ON(extent_locked);
4479 btrfs_release_path(root, path);
4480 mutex_unlock(&inode->i_mutex);
4481 iput(inode);
4482 inode = NULL;
4483 continue;
4484 }
4485
4486 if (key.type != BTRFS_EXTENT_DATA_KEY) {
4487 path->slots[0]++;
4488 ret = 1;
4489 goto next;
4490 }
4491 fi = btrfs_item_ptr(leaf, path->slots[0],
4492 struct btrfs_file_extent_item);
4493 extent_type = btrfs_file_extent_type(leaf, fi);
4494 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
4495 extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
4496 (btrfs_file_extent_disk_bytenr(leaf, fi) !=
4497 extent_key->objectid)) {
4498 path->slots[0]++;
4499 ret = 1;
4500 goto next;
4501 }
4502
4503 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4504 ext_offset = btrfs_file_extent_offset(leaf, fi);
4505
4506 if (first_pos > key.offset - ext_offset)
4507 first_pos = key.offset - ext_offset;
4508
4509 if (!extent_locked) {
4510 lock_start = key.offset;
4511 lock_end = lock_start + num_bytes - 1;
4512 } else {
4513 if (lock_start > key.offset ||
4514 lock_end + 1 < key.offset + num_bytes) {
4515 unlock_extent(&BTRFS_I(inode)->io_tree,
4516 lock_start, lock_end, GFP_NOFS);
4517 extent_locked = 0;
4518 }
4519 }
4520
4521 if (!inode) {
4522 btrfs_release_path(root, path);
4523
4524 inode = btrfs_iget_locked(root->fs_info->sb,
4525 key.objectid, root);
4526 if (inode->i_state & I_NEW) {
4527 BTRFS_I(inode)->root = root;
4528 BTRFS_I(inode)->location.objectid =
4529 key.objectid;
4530 BTRFS_I(inode)->location.type =
4531 BTRFS_INODE_ITEM_KEY;
4532 BTRFS_I(inode)->location.offset = 0;
4533 btrfs_read_locked_inode(inode);
4534 unlock_new_inode(inode);
4535 }
4536 /*
4537 * some code call btrfs_commit_transaction while
4538 * holding the i_mutex, so we can't use mutex_lock
4539 * here.
4540 */
4541 if (is_bad_inode(inode) ||
4542 !mutex_trylock(&inode->i_mutex)) {
4543 iput(inode);
4544 inode = NULL;
4545 key.offset = (u64)-1;
4546 goto skip;
4547 }
4548 }
4549
4550 if (!extent_locked) {
4551 struct btrfs_ordered_extent *ordered;
4552
4553 btrfs_release_path(root, path);
4554
4555 lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4556 lock_end, GFP_NOFS);
4557 ordered = btrfs_lookup_first_ordered_extent(inode,
4558 lock_end);
4559 if (ordered &&
4560 ordered->file_offset <= lock_end &&
4561 ordered->file_offset + ordered->len > lock_start) {
4562 unlock_extent(&BTRFS_I(inode)->io_tree,
4563 lock_start, lock_end, GFP_NOFS);
4564 btrfs_start_ordered_extent(inode, ordered, 1);
4565 btrfs_put_ordered_extent(ordered);
4566 key.offset += num_bytes;
4567 goto skip;
4568 }
4569 if (ordered)
4570 btrfs_put_ordered_extent(ordered);
4571
4572 extent_locked = 1;
4573 continue;
4574 }
4575
4576 if (nr_extents == 1) {
4577 /* update extent pointer in place */
4578 btrfs_set_file_extent_disk_bytenr(leaf, fi,
4579 new_extents[0].disk_bytenr);
4580 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4581 new_extents[0].disk_num_bytes);
4582 btrfs_mark_buffer_dirty(leaf);
4583
4584 btrfs_drop_extent_cache(inode, key.offset,
4585 key.offset + num_bytes - 1, 0);
4586
4587 ret = btrfs_inc_extent_ref(trans, root,
4588 new_extents[0].disk_bytenr,
4589 new_extents[0].disk_num_bytes,
4590 leaf->start,
4591 root->root_key.objectid,
4592 trans->transid,
4593 key.objectid);
4594 BUG_ON(ret);
4595
4596 ret = btrfs_free_extent(trans, root,
4597 extent_key->objectid,
4598 extent_key->offset,
4599 leaf->start,
4600 btrfs_header_owner(leaf),
4601 btrfs_header_generation(leaf),
4602 key.objectid, 0);
4603 BUG_ON(ret);
4604
4605 btrfs_release_path(root, path);
4606 key.offset += num_bytes;
4607 } else {
4608 BUG_ON(1);
4609#if 0
4610 u64 alloc_hint;
4611 u64 extent_len;
4612 int i;
4613 /*
4614 * drop old extent pointer at first, then insert the
4615 * new pointers one bye one
4616 */
4617 btrfs_release_path(root, path);
4618 ret = btrfs_drop_extents(trans, root, inode, key.offset,
4619 key.offset + num_bytes,
4620 key.offset, &alloc_hint);
4621 BUG_ON(ret);
4622
4623 for (i = 0; i < nr_extents; i++) {
4624 if (ext_offset >= new_extents[i].num_bytes) {
4625 ext_offset -= new_extents[i].num_bytes;
4626 continue;
4627 }
4628 extent_len = min(new_extents[i].num_bytes -
4629 ext_offset, num_bytes);
4630
4631 ret = btrfs_insert_empty_item(trans, root,
4632 path, &key,
4633 sizeof(*fi));
4634 BUG_ON(ret);
4635
4636 leaf = path->nodes[0];
4637 fi = btrfs_item_ptr(leaf, path->slots[0],
4638 struct btrfs_file_extent_item);
4639 btrfs_set_file_extent_generation(leaf, fi,
4640 trans->transid);
4641 btrfs_set_file_extent_type(leaf, fi,
4642 BTRFS_FILE_EXTENT_REG);
4643 btrfs_set_file_extent_disk_bytenr(leaf, fi,
4644 new_extents[i].disk_bytenr);
4645 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4646 new_extents[i].disk_num_bytes);
4647 btrfs_set_file_extent_ram_bytes(leaf, fi,
4648 new_extents[i].ram_bytes);
4649
4650 btrfs_set_file_extent_compression(leaf, fi,
4651 new_extents[i].compression);
4652 btrfs_set_file_extent_encryption(leaf, fi,
4653 new_extents[i].encryption);
4654 btrfs_set_file_extent_other_encoding(leaf, fi,
4655 new_extents[i].other_encoding);
4656
4657 btrfs_set_file_extent_num_bytes(leaf, fi,
4658 extent_len);
4659 ext_offset += new_extents[i].offset;
4660 btrfs_set_file_extent_offset(leaf, fi,
4661 ext_offset);
4662 btrfs_mark_buffer_dirty(leaf);
4663
4664 btrfs_drop_extent_cache(inode, key.offset,
4665 key.offset + extent_len - 1, 0);
4666
4667 ret = btrfs_inc_extent_ref(trans, root,
4668 new_extents[i].disk_bytenr,
4669 new_extents[i].disk_num_bytes,
4670 leaf->start,
4671 root->root_key.objectid,
4672 trans->transid, key.objectid);
4673 BUG_ON(ret);
4674 btrfs_release_path(root, path);
4675
4676 inode_add_bytes(inode, extent_len);
4677
4678 ext_offset = 0;
4679 num_bytes -= extent_len;
4680 key.offset += extent_len;
4681
4682 if (num_bytes == 0)
4683 break;
4684 }
4685 BUG_ON(i >= nr_extents);
4686#endif
4687 }
4688
4689 if (extent_locked) {
4690 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4691 lock_end, GFP_NOFS);
4692 extent_locked = 0;
4693 }
4694skip:
4695 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
4696 key.offset >= first_pos + extent_key->offset)
4697 break;
4698
4699 cond_resched();
4700 }
4701 ret = 0;
4702out:
4703 btrfs_release_path(root, path);
4704 if (inode) {
4705 mutex_unlock(&inode->i_mutex);
4706 if (extent_locked) {
4707 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4708 lock_end, GFP_NOFS);
4709 }
4710 iput(inode);
4711 }
4712 return ret;
4713}
4714
4715int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
4716 struct btrfs_root *root,
4717 struct extent_buffer *buf, u64 orig_start)
4718{
4719 int level;
4720 int ret;
4721
4722 BUG_ON(btrfs_header_generation(buf) != trans->transid);
4723 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
4724
4725 level = btrfs_header_level(buf);
4726 if (level == 0) {
4727 struct btrfs_leaf_ref *ref;
4728 struct btrfs_leaf_ref *orig_ref;
4729
4730 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
4731 if (!orig_ref)
4732 return -ENOENT;
4733
4734 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
4735 if (!ref) {
4736 btrfs_free_leaf_ref(root, orig_ref);
4737 return -ENOMEM;
4738 }
4739
4740 ref->nritems = orig_ref->nritems;
4741 memcpy(ref->extents, orig_ref->extents,
4742 sizeof(ref->extents[0]) * ref->nritems);
4743
4744 btrfs_free_leaf_ref(root, orig_ref);
4745
4746 ref->root_gen = trans->transid;
4747 ref->bytenr = buf->start;
4748 ref->owner = btrfs_header_owner(buf);
4749 ref->generation = btrfs_header_generation(buf);
4750 ret = btrfs_add_leaf_ref(root, ref, 0);
4751 WARN_ON(ret);
4752 btrfs_free_leaf_ref(root, ref);
4753 }
4754 return 0;
4755}
4756
4757static int noinline invalidate_extent_cache(struct btrfs_root *root,
4758 struct extent_buffer *leaf,
4759 struct btrfs_block_group_cache *group,
4760 struct btrfs_root *target_root)
4761{
4762 struct btrfs_key key;
4763 struct inode *inode = NULL;
4764 struct btrfs_file_extent_item *fi;
4765 u64 num_bytes;
4766 u64 skip_objectid = 0;
4767 u32 nritems;
4768 u32 i;
4769
4770 nritems = btrfs_header_nritems(leaf);
4771 for (i = 0; i < nritems; i++) {
4772 btrfs_item_key_to_cpu(leaf, &key, i);
4773 if (key.objectid == skip_objectid ||
4774 key.type != BTRFS_EXTENT_DATA_KEY)
4775 continue;
4776 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
4777 if (btrfs_file_extent_type(leaf, fi) ==
4778 BTRFS_FILE_EXTENT_INLINE)
4779 continue;
4780 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
4781 continue;
4782 if (!inode || inode->i_ino != key.objectid) {
4783 iput(inode);
4784 inode = btrfs_ilookup(target_root->fs_info->sb,
4785 key.objectid, target_root, 1);
4786 }
4787 if (!inode) {
4788 skip_objectid = key.objectid;
4789 continue;
4790 }
4791 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4792
4793 lock_extent(&BTRFS_I(inode)->io_tree, key.offset,
4794 key.offset + num_bytes - 1, GFP_NOFS);
4795 btrfs_drop_extent_cache(inode, key.offset,
4796 key.offset + num_bytes - 1, 1);
4797 unlock_extent(&BTRFS_I(inode)->io_tree, key.offset,
4798 key.offset + num_bytes - 1, GFP_NOFS);
4799 cond_resched();
4800 }
4801 iput(inode);
4802 return 0;
4803}
4804
4805static int noinline replace_extents_in_leaf(struct btrfs_trans_handle *trans,
4806 struct btrfs_root *root,
4807 struct extent_buffer *leaf,
4808 struct btrfs_block_group_cache *group,
4809 struct inode *reloc_inode)
4810{
4811 struct btrfs_key key;
4812 struct btrfs_key extent_key;
4813 struct btrfs_file_extent_item *fi;
4814 struct btrfs_leaf_ref *ref;
4815 struct disk_extent *new_extent;
4816 u64 bytenr;
4817 u64 num_bytes;
4818 u32 nritems;
4819 u32 i;
4820 int ext_index;
4821 int nr_extent;
4822 int ret;
4823
4824 new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
4825 BUG_ON(!new_extent);
4826
4827 ref = btrfs_lookup_leaf_ref(root, leaf->start);
4828 BUG_ON(!ref);
4829
4830 ext_index = -1;
4831 nritems = btrfs_header_nritems(leaf);
4832 for (i = 0; i < nritems; i++) {
4833 btrfs_item_key_to_cpu(leaf, &key, i);
4834 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
4835 continue;
4836 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
4837 if (btrfs_file_extent_type(leaf, fi) ==
4838 BTRFS_FILE_EXTENT_INLINE)
4839 continue;
4840 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
4841 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
4842 if (bytenr == 0)
4843 continue;
4844
4845 ext_index++;
4846 if (bytenr >= group->key.objectid + group->key.offset ||
4847 bytenr + num_bytes <= group->key.objectid)
4848 continue;
4849
4850 extent_key.objectid = bytenr;
4851 extent_key.offset = num_bytes;
4852 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
4853 nr_extent = 1;
4854 ret = get_new_locations(reloc_inode, &extent_key,
4855 group->key.objectid, 1,
4856 &new_extent, &nr_extent);
4857 if (ret > 0)
4858 continue;
4859 BUG_ON(ret < 0);
4860
4861 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
4862 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
4863 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
4864 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
4865
4866 btrfs_set_file_extent_disk_bytenr(leaf, fi,
4867 new_extent->disk_bytenr);
4868 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4869 new_extent->disk_num_bytes);
4870 btrfs_mark_buffer_dirty(leaf);
4871
4872 ret = btrfs_inc_extent_ref(trans, root,
4873 new_extent->disk_bytenr,
4874 new_extent->disk_num_bytes,
4875 leaf->start,
4876 root->root_key.objectid,
4877 trans->transid, key.objectid);
4878 BUG_ON(ret);
4879 ret = btrfs_free_extent(trans, root,
4880 bytenr, num_bytes, leaf->start,
4881 btrfs_header_owner(leaf),
4882 btrfs_header_generation(leaf),
4883 key.objectid, 0);
4884 BUG_ON(ret);
4885 cond_resched();
4886 }
4887 kfree(new_extent);
4888 BUG_ON(ext_index + 1 != ref->nritems);
4889 btrfs_free_leaf_ref(root, ref);
4890 return 0;
4891}
4892
4893int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
4894 struct btrfs_root *root)
4895{
4896 struct btrfs_root *reloc_root;
4897 int ret;
4898
4899 if (root->reloc_root) {
4900 reloc_root = root->reloc_root;
4901 root->reloc_root = NULL;
4902 list_add(&reloc_root->dead_list,
4903 &root->fs_info->dead_reloc_roots);
4904
4905 btrfs_set_root_bytenr(&reloc_root->root_item,
4906 reloc_root->node->start);
4907 btrfs_set_root_level(&root->root_item,
4908 btrfs_header_level(reloc_root->node));
4909 memset(&reloc_root->root_item.drop_progress, 0,
4910 sizeof(struct btrfs_disk_key));
4911 reloc_root->root_item.drop_level = 0;
4912
4913 ret = btrfs_update_root(trans, root->fs_info->tree_root,
4914 &reloc_root->root_key,
4915 &reloc_root->root_item);
4916 BUG_ON(ret);
4917 }
4918 return 0;
4919}
4920
4921int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
4922{
4923 struct btrfs_trans_handle *trans;
4924 struct btrfs_root *reloc_root;
4925 struct btrfs_root *prev_root = NULL;
4926 struct list_head dead_roots;
4927 int ret;
4928 unsigned long nr;
4929
4930 INIT_LIST_HEAD(&dead_roots);
4931 list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
4932
4933 while (!list_empty(&dead_roots)) {
4934 reloc_root = list_entry(dead_roots.prev,
4935 struct btrfs_root, dead_list);
4936 list_del_init(&reloc_root->dead_list);
4937
4938 BUG_ON(reloc_root->commit_root != NULL);
4939 while (1) {
4940 trans = btrfs_join_transaction(root, 1);
4941 BUG_ON(!trans);
4942
4943 mutex_lock(&root->fs_info->drop_mutex);
4944 ret = btrfs_drop_snapshot(trans, reloc_root);
4945 if (ret != -EAGAIN)
4946 break;
4947 mutex_unlock(&root->fs_info->drop_mutex);
4948
4949 nr = trans->blocks_used;
4950 ret = btrfs_end_transaction(trans, root);
4951 BUG_ON(ret);
4952 btrfs_btree_balance_dirty(root, nr);
4953 }
4954
4955 free_extent_buffer(reloc_root->node);
4956
4957 ret = btrfs_del_root(trans, root->fs_info->tree_root,
4958 &reloc_root->root_key);
4959 BUG_ON(ret);
4960 mutex_unlock(&root->fs_info->drop_mutex);
4961
4962 nr = trans->blocks_used;
4963 ret = btrfs_end_transaction(trans, root);
4964 BUG_ON(ret);
4965 btrfs_btree_balance_dirty(root, nr);
4966
4967 kfree(prev_root);
4968 prev_root = reloc_root;
4969 }
4970 if (prev_root) {
4971 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
4972 kfree(prev_root);
4973 }
4974 return 0;
4975}
4976
4977int btrfs_add_dead_reloc_root(struct btrfs_root *root)
4978{
4979 list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
4980 return 0;
4981}
4982
4983int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
4984{
4985 struct btrfs_root *reloc_root;
4986 struct btrfs_trans_handle *trans;
4987 struct btrfs_key location;
4988 int found;
4989 int ret;
4990
4991 mutex_lock(&root->fs_info->tree_reloc_mutex);
4992 ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
4993 BUG_ON(ret);
4994 found = !list_empty(&root->fs_info->dead_reloc_roots);
4995 mutex_unlock(&root->fs_info->tree_reloc_mutex);
4996
4997 if (found) {
4998 trans = btrfs_start_transaction(root, 1);
4999 BUG_ON(!trans);
5000 ret = btrfs_commit_transaction(trans, root);
5001 BUG_ON(ret);
5002 }
5003
5004 location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
5005 location.offset = (u64)-1;
5006 location.type = BTRFS_ROOT_ITEM_KEY;
5007
5008 reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
5009 BUG_ON(!reloc_root);
5010 btrfs_orphan_cleanup(reloc_root);
5011 return 0;
5012}
5013
5014static int noinline init_reloc_tree(struct btrfs_trans_handle *trans,
5015 struct btrfs_root *root)
5016{
5017 struct btrfs_root *reloc_root;
5018 struct extent_buffer *eb;
5019 struct btrfs_root_item *root_item;
5020 struct btrfs_key root_key;
5021 int ret;
5022
5023 BUG_ON(!root->ref_cows);
5024 if (root->reloc_root)
5025 return 0;
5026
5027 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
5028 BUG_ON(!root_item);
5029
5030 ret = btrfs_copy_root(trans, root, root->commit_root,
5031 &eb, BTRFS_TREE_RELOC_OBJECTID);
5032 BUG_ON(ret);
5033
5034 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
5035 root_key.offset = root->root_key.objectid;
5036 root_key.type = BTRFS_ROOT_ITEM_KEY;
5037
5038 memcpy(root_item, &root->root_item, sizeof(root_item));
5039 btrfs_set_root_refs(root_item, 0);
5040 btrfs_set_root_bytenr(root_item, eb->start);
5041 btrfs_set_root_level(root_item, btrfs_header_level(eb));
5042 btrfs_set_root_generation(root_item, trans->transid);
5043
5044 btrfs_tree_unlock(eb);
5045 free_extent_buffer(eb);
5046
5047 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
5048 &root_key, root_item);
5049 BUG_ON(ret);
5050 kfree(root_item);
5051
5052 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
5053 &root_key);
5054 BUG_ON(!reloc_root);
5055 reloc_root->last_trans = trans->transid;
5056 reloc_root->commit_root = NULL;
5057 reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
5058
5059 root->reloc_root = reloc_root;
5060 return 0;
5061}
5062
5063/*
5064 * Core function of space balance.
5065 *
5066 * The idea is using reloc trees to relocate tree blocks in reference
5067 * counted roots. There is one reloc tree for each subvol, and all
5068 * reloc trees share same root key objectid. Reloc trees are snapshots
5069 * of the latest committed roots of subvols (root->commit_root).
5070 *
5071 * To relocate a tree block referenced by a subvol, there are two steps.
5072 * COW the block through subvol's reloc tree, then update block pointer
5073 * in the subvol to point to the new block. Since all reloc trees share
5074 * same root key objectid, doing special handing for tree blocks owned
5075 * by them is easy. Once a tree block has been COWed in one reloc tree,
5076 * we can use the resulting new block directly when the same block is
5077 * required to COW again through other reloc trees. By this way, relocated
5078 * tree blocks are shared between reloc trees, so they are also shared
5079 * between subvols.
5080 */
5081static int noinline relocate_one_path(struct btrfs_trans_handle *trans,
5082 struct btrfs_root *root,
5083 struct btrfs_path *path,
5084 struct btrfs_key *first_key,
5085 struct btrfs_ref_path *ref_path,
5086 struct btrfs_block_group_cache *group,
5087 struct inode *reloc_inode)
5088{
5089 struct btrfs_root *reloc_root;
5090 struct extent_buffer *eb = NULL;
5091 struct btrfs_key *keys;
5092 u64 *nodes;
5093 int level;
5094 int shared_level;
5095 int lowest_level = 0;
5096 int ret;
5097
5098 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
5099 lowest_level = ref_path->owner_objectid;
5100
5101 if (!root->ref_cows) {
5102 path->lowest_level = lowest_level;
5103 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
5104 BUG_ON(ret < 0);
5105 path->lowest_level = 0;
5106 btrfs_release_path(root, path);
5107 return 0;
5108 }
5109
5110 mutex_lock(&root->fs_info->tree_reloc_mutex);
5111 ret = init_reloc_tree(trans, root);
5112 BUG_ON(ret);
5113 reloc_root = root->reloc_root;
5114
5115 shared_level = ref_path->shared_level;
5116 ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
5117
5118 keys = ref_path->node_keys;
5119 nodes = ref_path->new_nodes;
5120 memset(&keys[shared_level + 1], 0,
5121 sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
5122 memset(&nodes[shared_level + 1], 0,
5123 sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
5124
5125 if (nodes[lowest_level] == 0) {
5126 path->lowest_level = lowest_level;
5127 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
5128 0, 1);
5129 BUG_ON(ret);
5130 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
5131 eb = path->nodes[level];
5132 if (!eb || eb == reloc_root->node)
5133 break;
5134 nodes[level] = eb->start;
5135 if (level == 0)
5136 btrfs_item_key_to_cpu(eb, &keys[level], 0);
5137 else
5138 btrfs_node_key_to_cpu(eb, &keys[level], 0);
5139 }
5140 if (nodes[0] &&
5141 ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5142 eb = path->nodes[0];
5143 ret = replace_extents_in_leaf(trans, reloc_root, eb,
5144 group, reloc_inode);
5145 BUG_ON(ret);
5146 }
5147 btrfs_release_path(reloc_root, path);
5148 } else {
5149 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
5150 lowest_level);
5151 BUG_ON(ret);
5152 }
5153
5154 /*
5155 * replace tree blocks in the fs tree with tree blocks in
5156 * the reloc tree.
5157 */
5158 ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
5159 BUG_ON(ret < 0);
5160
5161 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5162 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
5163 0, 0);
5164 BUG_ON(ret);
5165 extent_buffer_get(path->nodes[0]);
5166 eb = path->nodes[0];
5167 btrfs_release_path(reloc_root, path);
5168 ret = invalidate_extent_cache(reloc_root, eb, group, root);
5169 BUG_ON(ret);
5170 free_extent_buffer(eb);
5171 }
5172
5173 mutex_unlock(&root->fs_info->tree_reloc_mutex);
5174 path->lowest_level = 0;
5175 return 0;
5176}
5177
5178static int noinline relocate_tree_block(struct btrfs_trans_handle *trans,
5179 struct btrfs_root *root,
5180 struct btrfs_path *path,
5181 struct btrfs_key *first_key,
5182 struct btrfs_ref_path *ref_path)
5183{
5184 int ret;
5185
5186 ret = relocate_one_path(trans, root, path, first_key,
5187 ref_path, NULL, NULL);
5188 BUG_ON(ret);
5189
5190 if (root == root->fs_info->extent_root)
5191 btrfs_extent_post_op(trans, root);
5192
5193 return 0;
5194}
5195
5196static int noinline del_extent_zero(struct btrfs_trans_handle *trans,
5197 struct btrfs_root *extent_root,
5198 struct btrfs_path *path,
5199 struct btrfs_key *extent_key)
5200{
5201 int ret;
5202
5203 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
5204 if (ret)
5205 goto out;
5206 ret = btrfs_del_item(trans, extent_root, path);
5207out:
5208 btrfs_release_path(extent_root, path);
5209 return ret;
5210}
5211
5212static struct btrfs_root noinline *read_ref_root(struct btrfs_fs_info *fs_info,
5213 struct btrfs_ref_path *ref_path)
5214{
5215 struct btrfs_key root_key;
5216
5217 root_key.objectid = ref_path->root_objectid;
5218 root_key.type = BTRFS_ROOT_ITEM_KEY;
5219 if (is_cowonly_root(ref_path->root_objectid))
5220 root_key.offset = 0;
5221 else
5222 root_key.offset = (u64)-1;
5223
5224 return btrfs_read_fs_root_no_name(fs_info, &root_key);
5225}
5226
5227static int noinline relocate_one_extent(struct btrfs_root *extent_root,
5228 struct btrfs_path *path,
5229 struct btrfs_key *extent_key,
5230 struct btrfs_block_group_cache *group,
5231 struct inode *reloc_inode, int pass)
5232{
5233 struct btrfs_trans_handle *trans;
5234 struct btrfs_root *found_root;
5235 struct btrfs_ref_path *ref_path = NULL;
5236 struct disk_extent *new_extents = NULL;
5237 int nr_extents = 0;
5238 int loops;
5239 int ret;
5240 int level;
5241 struct btrfs_key first_key;
5242 u64 prev_block = 0;
5243
5244
5245 trans = btrfs_start_transaction(extent_root, 1);
5246 BUG_ON(!trans);
5247
5248 if (extent_key->objectid == 0) {
5249 ret = del_extent_zero(trans, extent_root, path, extent_key);
5250 goto out;
5251 }
5252
5253 ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
5254 if (!ref_path) {
5255 ret = -ENOMEM;
5256 goto out;
5257 }
5258
5259 for (loops = 0; ; loops++) {
5260 if (loops == 0) {
5261 ret = btrfs_first_ref_path(trans, extent_root, ref_path,
5262 extent_key->objectid);
5263 } else {
5264 ret = btrfs_next_ref_path(trans, extent_root, ref_path);
5265 }
5266 if (ret < 0)
5267 goto out;
5268 if (ret > 0)
5269 break;
5270
5271 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
5272 ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
5273 continue;
5274
5275 found_root = read_ref_root(extent_root->fs_info, ref_path);
5276 BUG_ON(!found_root);
5277 /*
5278 * for reference counted tree, only process reference paths
5279 * rooted at the latest committed root.
5280 */
5281 if (found_root->ref_cows &&
5282 ref_path->root_generation != found_root->root_key.offset)
5283 continue;
5284
5285 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5286 if (pass == 0) {
5287 /*
5288 * copy data extents to new locations
5289 */
5290 u64 group_start = group->key.objectid;
5291 ret = relocate_data_extent(reloc_inode,
5292 extent_key,
5293 group_start);
5294 if (ret < 0)
5295 goto out;
5296 break;
5297 }
5298 level = 0;
5299 } else {
5300 level = ref_path->owner_objectid;
5301 }
5302
5303 if (prev_block != ref_path->nodes[level]) {
5304 struct extent_buffer *eb;
5305 u64 block_start = ref_path->nodes[level];
5306 u64 block_size = btrfs_level_size(found_root, level);
5307
5308 eb = read_tree_block(found_root, block_start,
5309 block_size, 0);
5310 btrfs_tree_lock(eb);
5311 BUG_ON(level != btrfs_header_level(eb));
5312
5313 if (level == 0)
5314 btrfs_item_key_to_cpu(eb, &first_key, 0);
5315 else
5316 btrfs_node_key_to_cpu(eb, &first_key, 0);
5317
5318 btrfs_tree_unlock(eb);
5319 free_extent_buffer(eb);
5320 prev_block = block_start;
5321 }
5322
5323 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
5324 pass >= 2) {
5325 /*
5326 * use fallback method to process the remaining
5327 * references.
5328 */
5329 if (!new_extents) {
5330 u64 group_start = group->key.objectid;
5331 new_extents = kmalloc(sizeof(*new_extents),
5332 GFP_NOFS);
5333 nr_extents = 1;
5334 ret = get_new_locations(reloc_inode,
5335 extent_key,
5336 group_start, 1,
5337 &new_extents,
5338 &nr_extents);
5339 if (ret)
5340 goto out;
5341 }
5342 btrfs_record_root_in_trans(found_root);
5343 ret = replace_one_extent(trans, found_root,
5344 path, extent_key,
5345 &first_key, ref_path,
5346 new_extents, nr_extents);
5347 if (ret < 0)
5348 goto out;
5349 continue;
5350 }
5351
5352 btrfs_record_root_in_trans(found_root);
5353 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
5354 ret = relocate_tree_block(trans, found_root, path,
5355 &first_key, ref_path);
5356 } else {
5357 /*
5358 * try to update data extent references while
5359 * keeping metadata shared between snapshots.
5360 */
5361 ret = relocate_one_path(trans, found_root, path,
5362 &first_key, ref_path,
5363 group, reloc_inode);
5364 }
5365 if (ret < 0)
5366 goto out;
5367 }
5368 ret = 0;
5369out:
5370 btrfs_end_transaction(trans, extent_root);
5371 kfree(new_extents);
5372 kfree(ref_path);
5373 return ret;
5374}
5375
5376static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
5377{
5378 u64 num_devices;
5379 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
5380 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
5381
5382 num_devices = root->fs_info->fs_devices->rw_devices;
5383 if (num_devices == 1) {
5384 stripped |= BTRFS_BLOCK_GROUP_DUP;
5385 stripped = flags & ~stripped;
5386
5387 /* turn raid0 into single device chunks */
5388 if (flags & BTRFS_BLOCK_GROUP_RAID0)
5389 return stripped;
5390
5391 /* turn mirroring into duplication */
5392 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
5393 BTRFS_BLOCK_GROUP_RAID10))
5394 return stripped | BTRFS_BLOCK_GROUP_DUP;
5395 return flags;
5396 } else {
5397 /* they already had raid on here, just return */
5398 if (flags & stripped)
5399 return flags;
5400
5401 stripped |= BTRFS_BLOCK_GROUP_DUP;
5402 stripped = flags & ~stripped;
5403
5404 /* switch duplicated blocks with raid1 */
5405 if (flags & BTRFS_BLOCK_GROUP_DUP)
5406 return stripped | BTRFS_BLOCK_GROUP_RAID1;
5407
5408 /* turn single device chunks into raid0 */
5409 return stripped | BTRFS_BLOCK_GROUP_RAID0;
5410 }
5411 return flags;
5412}
5413
5414int __alloc_chunk_for_shrink(struct btrfs_root *root,
5415 struct btrfs_block_group_cache *shrink_block_group,
5416 int force)
5417{
5418 struct btrfs_trans_handle *trans;
5419 u64 new_alloc_flags;
5420 u64 calc;
5421
5422 spin_lock(&shrink_block_group->lock);
5423 if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
5424 spin_unlock(&shrink_block_group->lock);
5425
5426 trans = btrfs_start_transaction(root, 1);
5427 spin_lock(&shrink_block_group->lock);
5428
5429 new_alloc_flags = update_block_group_flags(root,
5430 shrink_block_group->flags);
5431 if (new_alloc_flags != shrink_block_group->flags) {
5432 calc =
5433 btrfs_block_group_used(&shrink_block_group->item);
5434 } else {
5435 calc = shrink_block_group->key.offset;
5436 }
5437 spin_unlock(&shrink_block_group->lock);
5438
5439 do_chunk_alloc(trans, root->fs_info->extent_root,
5440 calc + 2 * 1024 * 1024, new_alloc_flags, force);
5441
5442 btrfs_end_transaction(trans, root);
5443 } else
5444 spin_unlock(&shrink_block_group->lock);
5445 return 0;
5446}
5447
5448static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
5449 struct btrfs_root *root,
5450 u64 objectid, u64 size)
5451{
5452 struct btrfs_path *path;
5453 struct btrfs_inode_item *item;
5454 struct extent_buffer *leaf;
5455 int ret;
5456
5457 path = btrfs_alloc_path();
5458 if (!path)
5459 return -ENOMEM;
5460
5461 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
5462 if (ret)
5463 goto out;
5464
5465 leaf = path->nodes[0];
5466 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
5467 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
5468 btrfs_set_inode_generation(leaf, item, 1);
5469 btrfs_set_inode_size(leaf, item, size);
5470 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
5471 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NODATASUM |
5472 BTRFS_INODE_NOCOMPRESS);
5473 btrfs_mark_buffer_dirty(leaf);
5474 btrfs_release_path(root, path);
5475out:
5476 btrfs_free_path(path);
5477 return ret;
5478}
5479
5480static struct inode noinline *create_reloc_inode(struct btrfs_fs_info *fs_info,
5481 struct btrfs_block_group_cache *group)
5482{
5483 struct inode *inode = NULL;
5484 struct btrfs_trans_handle *trans;
5485 struct btrfs_root *root;
5486 struct btrfs_key root_key;
5487 u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
5488 int err = 0;
5489
5490 root_key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
5491 root_key.type = BTRFS_ROOT_ITEM_KEY;
5492 root_key.offset = (u64)-1;
5493 root = btrfs_read_fs_root_no_name(fs_info, &root_key);
5494 if (IS_ERR(root))
5495 return ERR_CAST(root);
5496
5497 trans = btrfs_start_transaction(root, 1);
5498 BUG_ON(!trans);
5499
5500 err = btrfs_find_free_objectid(trans, root, objectid, &objectid);
5501 if (err)
5502 goto out;
5503
5504 err = __insert_orphan_inode(trans, root, objectid, group->key.offset);
5505 BUG_ON(err);
5506
5507 err = btrfs_insert_file_extent(trans, root, objectid, 0, 0, 0,
5508 group->key.offset, 0, group->key.offset,
5509 0, 0, 0);
5510 BUG_ON(err);
5511
5512 inode = btrfs_iget_locked(root->fs_info->sb, objectid, root);
5513 if (inode->i_state & I_NEW) {
5514 BTRFS_I(inode)->root = root;
5515 BTRFS_I(inode)->location.objectid = objectid;
5516 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
5517 BTRFS_I(inode)->location.offset = 0;
5518 btrfs_read_locked_inode(inode);
5519 unlock_new_inode(inode);
5520 BUG_ON(is_bad_inode(inode));
5521 } else {
5522 BUG_ON(1);
5523 }
5524
5525 err = btrfs_orphan_add(trans, inode);
5526out:
5527 btrfs_end_transaction(trans, root);
5528 if (err) {
5529 if (inode)
5530 iput(inode);
5531 inode = ERR_PTR(err);
5532 }
5533 return inode;
5534}
5535
5536int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start)
5537{
5538 struct btrfs_trans_handle *trans;
5539 struct btrfs_path *path;
5540 struct btrfs_fs_info *info = root->fs_info;
5541 struct extent_buffer *leaf;
5542 struct inode *reloc_inode;
5543 struct btrfs_block_group_cache *block_group;
5544 struct btrfs_key key;
5545 u64 skipped;
5546 u64 cur_byte;
5547 u64 total_found;
5548 u32 nritems;
5549 int ret;
5550 int progress;
5551 int pass = 0;
5552
5553 root = root->fs_info->extent_root;
5554
5555 block_group = btrfs_lookup_block_group(info, group_start);
5556 BUG_ON(!block_group);
5557
5558 printk("btrfs relocating block group %llu flags %llu\n",
5559 (unsigned long long)block_group->key.objectid,
5560 (unsigned long long)block_group->flags);
5561
5562 path = btrfs_alloc_path();
5563 BUG_ON(!path);
5564
5565 reloc_inode = create_reloc_inode(info, block_group);
5566 BUG_ON(IS_ERR(reloc_inode));
5567
5568 __alloc_chunk_for_shrink(root, block_group, 1);
5569 set_block_group_readonly(block_group);
5570
5571 btrfs_start_delalloc_inodes(info->tree_root);
5572 btrfs_wait_ordered_extents(info->tree_root, 0);
5573again:
5574 skipped = 0;
5575 total_found = 0;
5576 progress = 0;
5577 key.objectid = block_group->key.objectid;
5578 key.offset = 0;
5579 key.type = 0;
5580 cur_byte = key.objectid;
5581
5582 trans = btrfs_start_transaction(info->tree_root, 1);
5583 btrfs_commit_transaction(trans, info->tree_root);
5584
5585 mutex_lock(&root->fs_info->cleaner_mutex);
5586 btrfs_clean_old_snapshots(info->tree_root);
5587 btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1);
5588 mutex_unlock(&root->fs_info->cleaner_mutex);
5589
5590 while(1) {
5591 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5592 if (ret < 0)
5593 goto out;
5594next:
5595 leaf = path->nodes[0];
5596 nritems = btrfs_header_nritems(leaf);
5597 if (path->slots[0] >= nritems) {
5598 ret = btrfs_next_leaf(root, path);
5599 if (ret < 0)
5600 goto out;
5601 if (ret == 1) {
5602 ret = 0;
5603 break;
5604 }
5605 leaf = path->nodes[0];
5606 nritems = btrfs_header_nritems(leaf);
5607 }
5608
5609 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5610
5611 if (key.objectid >= block_group->key.objectid +
5612 block_group->key.offset)
5613 break;
5614
5615 if (progress && need_resched()) {
5616 btrfs_release_path(root, path);
5617 cond_resched();
5618 progress = 0;
5619 continue;
5620 }
5621 progress = 1;
5622
5623 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY ||
5624 key.objectid + key.offset <= cur_byte) {
5625 path->slots[0]++;
5626 goto next;
5627 }
5628
5629 total_found++;
5630 cur_byte = key.objectid + key.offset;
5631 btrfs_release_path(root, path);
5632
5633 __alloc_chunk_for_shrink(root, block_group, 0);
5634 ret = relocate_one_extent(root, path, &key, block_group,
5635 reloc_inode, pass);
5636 BUG_ON(ret < 0);
5637 if (ret > 0)
5638 skipped++;
5639
5640 key.objectid = cur_byte;
5641 key.type = 0;
5642 key.offset = 0;
5643 }
5644
5645 btrfs_release_path(root, path);
5646
5647 if (pass == 0) {
5648 btrfs_wait_ordered_range(reloc_inode, 0, (u64)-1);
5649 invalidate_mapping_pages(reloc_inode->i_mapping, 0, -1);
5650 WARN_ON(reloc_inode->i_mapping->nrpages);
5651 }
5652
5653 if (total_found > 0) {
5654 printk("btrfs found %llu extents in pass %d\n",
5655 (unsigned long long)total_found, pass);
5656 pass++;
5657 if (total_found == skipped && pass > 2) {
5658 iput(reloc_inode);
5659 reloc_inode = create_reloc_inode(info, block_group);
5660 pass = 0;
5661 }
5662 goto again;
5663 }
5664
5665 /* delete reloc_inode */
5666 iput(reloc_inode);
5667
5668 /* unpin extents in this range */
5669 trans = btrfs_start_transaction(info->tree_root, 1);
5670 btrfs_commit_transaction(trans, info->tree_root);
5671
5672 spin_lock(&block_group->lock);
5673 WARN_ON(block_group->pinned > 0);
5674 WARN_ON(block_group->reserved > 0);
5675 WARN_ON(btrfs_block_group_used(&block_group->item) > 0);
5676 spin_unlock(&block_group->lock);
5677 ret = 0;
5678out:
5679 btrfs_free_path(path);
5680 return ret;
5681}
5682
5683int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path,
5684 struct btrfs_key *key)
5685{
5686 int ret = 0;
5687 struct btrfs_key found_key;
5688 struct extent_buffer *leaf;
5689 int slot;
5690
5691 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
5692 if (ret < 0)
5693 goto out;
5694
5695 while(1) {
5696 slot = path->slots[0];
5697 leaf = path->nodes[0];
5698 if (slot >= btrfs_header_nritems(leaf)) {
5699 ret = btrfs_next_leaf(root, path);
5700 if (ret == 0)
5701 continue;
5702 if (ret < 0)
5703 goto out;
5704 break;
5705 }
5706 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5707
5708 if (found_key.objectid >= key->objectid &&
5709 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
5710 ret = 0;
5711 goto out;
5712 }
5713 path->slots[0]++;
5714 }
5715 ret = -ENOENT;
5716out:
5717 return ret;
5718}
5719
5720int btrfs_free_block_groups(struct btrfs_fs_info *info)
5721{
5722 struct btrfs_block_group_cache *block_group;
5723 struct rb_node *n;
5724
5725 spin_lock(&info->block_group_cache_lock);
5726 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
5727 block_group = rb_entry(n, struct btrfs_block_group_cache,
5728 cache_node);
5729 rb_erase(&block_group->cache_node,
5730 &info->block_group_cache_tree);
5731 spin_unlock(&info->block_group_cache_lock);
5732
5733 btrfs_remove_free_space_cache(block_group);
5734 down_write(&block_group->space_info->groups_sem);
5735 list_del(&block_group->list);
5736 up_write(&block_group->space_info->groups_sem);
5737 kfree(block_group);
5738
5739 spin_lock(&info->block_group_cache_lock);
5740 }
5741 spin_unlock(&info->block_group_cache_lock);
5742 return 0;
5743}
5744
5745int btrfs_read_block_groups(struct btrfs_root *root)
5746{
5747 struct btrfs_path *path;
5748 int ret;
5749 struct btrfs_block_group_cache *cache;
5750 struct btrfs_fs_info *info = root->fs_info;
5751 struct btrfs_space_info *space_info;
5752 struct btrfs_key key;
5753 struct btrfs_key found_key;
5754 struct extent_buffer *leaf;
5755
5756 root = info->extent_root;
5757 key.objectid = 0;
5758 key.offset = 0;
5759 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
5760 path = btrfs_alloc_path();
5761 if (!path)
5762 return -ENOMEM;
5763
5764 while(1) {
5765 ret = find_first_block_group(root, path, &key);
5766 if (ret > 0) {
5767 ret = 0;
5768 goto error;
5769 }
5770 if (ret != 0)
5771 goto error;
5772
5773 leaf = path->nodes[0];
5774 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5775 cache = kzalloc(sizeof(*cache), GFP_NOFS);
5776 if (!cache) {
5777 ret = -ENOMEM;
5778 break;
5779 }
5780
5781 spin_lock_init(&cache->lock);
5782 mutex_init(&cache->alloc_mutex);
5783 INIT_LIST_HEAD(&cache->list);
5784 read_extent_buffer(leaf, &cache->item,
5785 btrfs_item_ptr_offset(leaf, path->slots[0]),
5786 sizeof(cache->item));
5787 memcpy(&cache->key, &found_key, sizeof(found_key));
5788
5789 key.objectid = found_key.objectid + found_key.offset;
5790 btrfs_release_path(root, path);
5791 cache->flags = btrfs_block_group_flags(&cache->item);
5792
5793 ret = update_space_info(info, cache->flags, found_key.offset,
5794 btrfs_block_group_used(&cache->item),
5795 &space_info);
5796 BUG_ON(ret);
5797 cache->space_info = space_info;
5798 down_write(&space_info->groups_sem);
5799 list_add_tail(&cache->list, &space_info->block_groups);
5800 up_write(&space_info->groups_sem);
5801
5802 ret = btrfs_add_block_group_cache(root->fs_info, cache);
5803 BUG_ON(ret);
5804
5805 set_avail_alloc_bits(root->fs_info, cache->flags);
5806 if (btrfs_chunk_readonly(root, cache->key.objectid))
5807 set_block_group_readonly(cache);
5808 }
5809 ret = 0;
5810error:
5811 btrfs_free_path(path);
5812 return ret;
5813}
5814
5815int btrfs_make_block_group(struct btrfs_trans_handle *trans,
5816 struct btrfs_root *root, u64 bytes_used,
5817 u64 type, u64 chunk_objectid, u64 chunk_offset,
5818 u64 size)
5819{
5820 int ret;
5821 struct btrfs_root *extent_root;
5822 struct btrfs_block_group_cache *cache;
5823
5824 extent_root = root->fs_info->extent_root;
5825
5826 root->fs_info->last_trans_new_blockgroup = trans->transid;
5827
5828 cache = kzalloc(sizeof(*cache), GFP_NOFS);
5829 if (!cache)
5830 return -ENOMEM;
5831
5832 cache->key.objectid = chunk_offset;
5833 cache->key.offset = size;
5834 spin_lock_init(&cache->lock);
5835 mutex_init(&cache->alloc_mutex);
5836 INIT_LIST_HEAD(&cache->list);
5837 btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
5838
5839 btrfs_set_block_group_used(&cache->item, bytes_used);
5840 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
5841 cache->flags = type;
5842 btrfs_set_block_group_flags(&cache->item, type);
5843
5844 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
5845 &cache->space_info);
5846 BUG_ON(ret);
5847 down_write(&cache->space_info->groups_sem);
5848 list_add_tail(&cache->list, &cache->space_info->block_groups);
5849 up_write(&cache->space_info->groups_sem);
5850
5851 ret = btrfs_add_block_group_cache(root->fs_info, cache);
5852 BUG_ON(ret);
5853
5854 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
5855 sizeof(cache->item));
5856 BUG_ON(ret);
5857
5858 finish_current_insert(trans, extent_root, 0);
5859 ret = del_pending_extents(trans, extent_root, 0);
5860 BUG_ON(ret);
5861 set_avail_alloc_bits(extent_root->fs_info, type);
5862
5863 return 0;
5864}
5865
5866int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
5867 struct btrfs_root *root, u64 group_start)
5868{
5869 struct btrfs_path *path;
5870 struct btrfs_block_group_cache *block_group;
5871 struct btrfs_key key;
5872 int ret;
5873
5874 root = root->fs_info->extent_root;
5875
5876 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
5877 BUG_ON(!block_group);
5878 BUG_ON(!block_group->ro);
5879
5880 memcpy(&key, &block_group->key, sizeof(key));
5881
5882 path = btrfs_alloc_path();
5883 BUG_ON(!path);
5884
5885 btrfs_remove_free_space_cache(block_group);
5886 rb_erase(&block_group->cache_node,
5887 &root->fs_info->block_group_cache_tree);
5888 down_write(&block_group->space_info->groups_sem);
5889 list_del(&block_group->list);
5890 up_write(&block_group->space_info->groups_sem);
5891
5892 spin_lock(&block_group->space_info->lock);
5893 block_group->space_info->total_bytes -= block_group->key.offset;
5894 block_group->space_info->bytes_readonly -= block_group->key.offset;
5895 spin_unlock(&block_group->space_info->lock);
5896 block_group->space_info->full = 0;
5897
5898 /*
5899 memset(shrink_block_group, 0, sizeof(*shrink_block_group));
5900 kfree(shrink_block_group);
5901 */
5902
5903 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
5904 if (ret > 0)
5905 ret = -EIO;
5906 if (ret < 0)
5907 goto out;
5908
5909 ret = btrfs_del_item(trans, root, path);
5910out:
5911 btrfs_free_path(path);
5912 return ret;
5913}