Btrfs: mnt_drop_write in ioctl_trans_end
[linux-2.6-block.git] / fs / btrfs / extent-tree.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#include <linux/sched.h>
19#include <linux/pagemap.h>
20#include <linux/writeback.h>
21#include <linux/blkdev.h>
22#include <linux/version.h>
23#include "compat.h"
24#include "hash.h"
25#include "crc32c.h"
26#include "ctree.h"
27#include "disk-io.h"
28#include "print-tree.h"
29#include "transaction.h"
30#include "volumes.h"
31#include "locking.h"
32#include "ref-cache.h"
33#include "compat.h"
34
35#define PENDING_EXTENT_INSERT 0
36#define PENDING_EXTENT_DELETE 1
37#define PENDING_BACKREF_UPDATE 2
38
39struct pending_extent_op {
40 int type;
41 u64 bytenr;
42 u64 num_bytes;
43 u64 parent;
44 u64 orig_parent;
45 u64 generation;
46 u64 orig_generation;
47 int level;
48 struct list_head list;
49 int del;
50};
51
52static int finish_current_insert(struct btrfs_trans_handle *trans, struct
53 btrfs_root *extent_root, int all);
54static int del_pending_extents(struct btrfs_trans_handle *trans, struct
55 btrfs_root *extent_root, int all);
56static struct btrfs_block_group_cache *
57__btrfs_find_block_group(struct btrfs_root *root,
58 struct btrfs_block_group_cache *hint,
59 u64 search_start, int data, int owner);
60static int pin_down_bytes(struct btrfs_trans_handle *trans,
61 struct btrfs_root *root,
62 u64 bytenr, u64 num_bytes, int is_data);
63static int update_block_group(struct btrfs_trans_handle *trans,
64 struct btrfs_root *root,
65 u64 bytenr, u64 num_bytes, int alloc,
66 int mark_free);
67
68static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
69{
70 return (cache->flags & bits) == bits;
71}
72
73/*
74 * this adds the block group to the fs_info rb tree for the block group
75 * cache
76 */
77static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
78 struct btrfs_block_group_cache *block_group)
79{
80 struct rb_node **p;
81 struct rb_node *parent = NULL;
82 struct btrfs_block_group_cache *cache;
83
84 spin_lock(&info->block_group_cache_lock);
85 p = &info->block_group_cache_tree.rb_node;
86
87 while (*p) {
88 parent = *p;
89 cache = rb_entry(parent, struct btrfs_block_group_cache,
90 cache_node);
91 if (block_group->key.objectid < cache->key.objectid) {
92 p = &(*p)->rb_left;
93 } else if (block_group->key.objectid > cache->key.objectid) {
94 p = &(*p)->rb_right;
95 } else {
96 spin_unlock(&info->block_group_cache_lock);
97 return -EEXIST;
98 }
99 }
100
101 rb_link_node(&block_group->cache_node, parent, p);
102 rb_insert_color(&block_group->cache_node,
103 &info->block_group_cache_tree);
104 spin_unlock(&info->block_group_cache_lock);
105
106 return 0;
107}
108
109/*
110 * This will return the block group at or after bytenr if contains is 0, else
111 * it will return the block group that contains the bytenr
112 */
113static struct btrfs_block_group_cache *
114block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
115 int contains)
116{
117 struct btrfs_block_group_cache *cache, *ret = NULL;
118 struct rb_node *n;
119 u64 end, start;
120
121 spin_lock(&info->block_group_cache_lock);
122 n = info->block_group_cache_tree.rb_node;
123
124 while (n) {
125 cache = rb_entry(n, struct btrfs_block_group_cache,
126 cache_node);
127 end = cache->key.objectid + cache->key.offset - 1;
128 start = cache->key.objectid;
129
130 if (bytenr < start) {
131 if (!contains && (!ret || start < ret->key.objectid))
132 ret = cache;
133 n = n->rb_left;
134 } else if (bytenr > start) {
135 if (contains && bytenr <= end) {
136 ret = cache;
137 break;
138 }
139 n = n->rb_right;
140 } else {
141 ret = cache;
142 break;
143 }
144 }
145 spin_unlock(&info->block_group_cache_lock);
146
147 return ret;
148}
149
150/*
151 * this is only called by cache_block_group, since we could have freed extents
152 * we need to check the pinned_extents for any extents that can't be used yet
153 * since their free space will be released as soon as the transaction commits.
154 */
155static int add_new_free_space(struct btrfs_block_group_cache *block_group,
156 struct btrfs_fs_info *info, u64 start, u64 end)
157{
158 u64 extent_start, extent_end, size;
159 int ret;
160
161 mutex_lock(&info->pinned_mutex);
162 while (start < end) {
163 ret = find_first_extent_bit(&info->pinned_extents, start,
164 &extent_start, &extent_end,
165 EXTENT_DIRTY);
166 if (ret)
167 break;
168
169 if (extent_start == start) {
170 start = extent_end + 1;
171 } else if (extent_start > start && extent_start < end) {
172 size = extent_start - start;
173 ret = btrfs_add_free_space(block_group, start,
174 size);
175 BUG_ON(ret);
176 start = extent_end + 1;
177 } else {
178 break;
179 }
180 }
181
182 if (start < end) {
183 size = end - start;
184 ret = btrfs_add_free_space(block_group, start, size);
185 BUG_ON(ret);
186 }
187 mutex_unlock(&info->pinned_mutex);
188
189 return 0;
190}
191
192static int remove_sb_from_cache(struct btrfs_root *root,
193 struct btrfs_block_group_cache *cache)
194{
195 u64 bytenr;
196 u64 *logical;
197 int stripe_len;
198 int i, nr, ret;
199
200 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
201 bytenr = btrfs_sb_offset(i);
202 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
203 cache->key.objectid, bytenr, 0,
204 &logical, &nr, &stripe_len);
205 BUG_ON(ret);
206 while (nr--) {
207 btrfs_remove_free_space(cache, logical[nr],
208 stripe_len);
209 }
210 kfree(logical);
211 }
212 return 0;
213}
214
215static int cache_block_group(struct btrfs_root *root,
216 struct btrfs_block_group_cache *block_group)
217{
218 struct btrfs_path *path;
219 int ret = 0;
220 struct btrfs_key key;
221 struct extent_buffer *leaf;
222 int slot;
223 u64 last = block_group->key.objectid;
224
225 if (!block_group)
226 return 0;
227
228 root = root->fs_info->extent_root;
229
230 if (block_group->cached)
231 return 0;
232
233 path = btrfs_alloc_path();
234 if (!path)
235 return -ENOMEM;
236
237 path->reada = 2;
238 /*
239 * we get into deadlocks with paths held by callers of this function.
240 * since the alloc_mutex is protecting things right now, just
241 * skip the locking here
242 */
243 path->skip_locking = 1;
244 key.objectid = max_t(u64, last, BTRFS_SUPER_INFO_OFFSET);
245 key.offset = 0;
246 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
247 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
248 if (ret < 0)
249 goto err;
250
251 while(1) {
252 leaf = path->nodes[0];
253 slot = path->slots[0];
254 if (slot >= btrfs_header_nritems(leaf)) {
255 ret = btrfs_next_leaf(root, path);
256 if (ret < 0)
257 goto err;
258 if (ret == 0)
259 continue;
260 else
261 break;
262 }
263 btrfs_item_key_to_cpu(leaf, &key, slot);
264 if (key.objectid < block_group->key.objectid)
265 goto next;
266
267 if (key.objectid >= block_group->key.objectid +
268 block_group->key.offset)
269 break;
270
271 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
272 add_new_free_space(block_group, root->fs_info, last,
273 key.objectid);
274
275 last = key.objectid + key.offset;
276 }
277next:
278 path->slots[0]++;
279 }
280
281 add_new_free_space(block_group, root->fs_info, last,
282 block_group->key.objectid +
283 block_group->key.offset);
284
285 remove_sb_from_cache(root, block_group);
286 block_group->cached = 1;
287 ret = 0;
288err:
289 btrfs_free_path(path);
290 return ret;
291}
292
293/*
294 * return the block group that starts at or after bytenr
295 */
296static struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
297 btrfs_fs_info *info,
298 u64 bytenr)
299{
300 struct btrfs_block_group_cache *cache;
301
302 cache = block_group_cache_tree_search(info, bytenr, 0);
303
304 return cache;
305}
306
307/*
308 * return the block group that contains teh given bytenr
309 */
310struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
311 btrfs_fs_info *info,
312 u64 bytenr)
313{
314 struct btrfs_block_group_cache *cache;
315
316 cache = block_group_cache_tree_search(info, bytenr, 1);
317
318 return cache;
319}
320
321static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
322 u64 flags)
323{
324 struct list_head *head = &info->space_info;
325 struct list_head *cur;
326 struct btrfs_space_info *found;
327 list_for_each(cur, head) {
328 found = list_entry(cur, struct btrfs_space_info, list);
329 if (found->flags == flags)
330 return found;
331 }
332 return NULL;
333}
334
335static u64 div_factor(u64 num, int factor)
336{
337 if (factor == 10)
338 return num;
339 num *= factor;
340 do_div(num, 10);
341 return num;
342}
343
344static struct btrfs_block_group_cache *
345__btrfs_find_block_group(struct btrfs_root *root,
346 struct btrfs_block_group_cache *hint,
347 u64 search_start, int data, int owner)
348{
349 struct btrfs_block_group_cache *cache;
350 struct btrfs_block_group_cache *found_group = NULL;
351 struct btrfs_fs_info *info = root->fs_info;
352 u64 used;
353 u64 last = 0;
354 u64 free_check;
355 int full_search = 0;
356 int factor = 10;
357 int wrapped = 0;
358
359 if (data & BTRFS_BLOCK_GROUP_METADATA)
360 factor = 9;
361
362 if (search_start) {
363 struct btrfs_block_group_cache *shint;
364 shint = btrfs_lookup_first_block_group(info, search_start);
365 if (shint && block_group_bits(shint, data)) {
366 spin_lock(&shint->lock);
367 used = btrfs_block_group_used(&shint->item);
368 if (used + shint->pinned + shint->reserved <
369 div_factor(shint->key.offset, factor)) {
370 spin_unlock(&shint->lock);
371 return shint;
372 }
373 spin_unlock(&shint->lock);
374 }
375 }
376 if (hint && block_group_bits(hint, data)) {
377 spin_lock(&hint->lock);
378 used = btrfs_block_group_used(&hint->item);
379 if (used + hint->pinned + hint->reserved <
380 div_factor(hint->key.offset, factor)) {
381 spin_unlock(&hint->lock);
382 return hint;
383 }
384 spin_unlock(&hint->lock);
385 last = hint->key.objectid + hint->key.offset;
386 } else {
387 if (hint)
388 last = max(hint->key.objectid, search_start);
389 else
390 last = search_start;
391 }
392again:
393 while (1) {
394 cache = btrfs_lookup_first_block_group(root->fs_info, last);
395 if (!cache)
396 break;
397
398 spin_lock(&cache->lock);
399 last = cache->key.objectid + cache->key.offset;
400 used = btrfs_block_group_used(&cache->item);
401
402 if (block_group_bits(cache, data)) {
403 free_check = div_factor(cache->key.offset, factor);
404 if (used + cache->pinned + cache->reserved <
405 free_check) {
406 found_group = cache;
407 spin_unlock(&cache->lock);
408 goto found;
409 }
410 }
411 spin_unlock(&cache->lock);
412 cond_resched();
413 }
414 if (!wrapped) {
415 last = search_start;
416 wrapped = 1;
417 goto again;
418 }
419 if (!full_search && factor < 10) {
420 last = search_start;
421 full_search = 1;
422 factor = 10;
423 goto again;
424 }
425found:
426 return found_group;
427}
428
429struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
430 struct btrfs_block_group_cache
431 *hint, u64 search_start,
432 int data, int owner)
433{
434
435 struct btrfs_block_group_cache *ret;
436 ret = __btrfs_find_block_group(root, hint, search_start, data, owner);
437 return ret;
438}
439
440/* simple helper to search for an existing extent at a given offset */
441int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
442{
443 int ret;
444 struct btrfs_key key;
445 struct btrfs_path *path;
446
447 path = btrfs_alloc_path();
448 BUG_ON(!path);
449 key.objectid = start;
450 key.offset = len;
451 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
452 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
453 0, 0);
454 btrfs_free_path(path);
455 return ret;
456}
457
458/*
459 * Back reference rules. Back refs have three main goals:
460 *
461 * 1) differentiate between all holders of references to an extent so that
462 * when a reference is dropped we can make sure it was a valid reference
463 * before freeing the extent.
464 *
465 * 2) Provide enough information to quickly find the holders of an extent
466 * if we notice a given block is corrupted or bad.
467 *
468 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
469 * maintenance. This is actually the same as #2, but with a slightly
470 * different use case.
471 *
472 * File extents can be referenced by:
473 *
474 * - multiple snapshots, subvolumes, or different generations in one subvol
475 * - different files inside a single subvolume
476 * - different offsets inside a file (bookend extents in file.c)
477 *
478 * The extent ref structure has fields for:
479 *
480 * - Objectid of the subvolume root
481 * - Generation number of the tree holding the reference
482 * - objectid of the file holding the reference
483 * - number of references holding by parent node (alway 1 for tree blocks)
484 *
485 * Btree leaf may hold multiple references to a file extent. In most cases,
486 * these references are from same file and the corresponding offsets inside
487 * the file are close together.
488 *
489 * When a file extent is allocated the fields are filled in:
490 * (root_key.objectid, trans->transid, inode objectid, 1)
491 *
492 * When a leaf is cow'd new references are added for every file extent found
493 * in the leaf. It looks similar to the create case, but trans->transid will
494 * be different when the block is cow'd.
495 *
496 * (root_key.objectid, trans->transid, inode objectid,
497 * number of references in the leaf)
498 *
499 * When a file extent is removed either during snapshot deletion or
500 * file truncation, we find the corresponding back reference and check
501 * the following fields:
502 *
503 * (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
504 * inode objectid)
505 *
506 * Btree extents can be referenced by:
507 *
508 * - Different subvolumes
509 * - Different generations of the same subvolume
510 *
511 * When a tree block is created, back references are inserted:
512 *
513 * (root->root_key.objectid, trans->transid, level, 1)
514 *
515 * When a tree block is cow'd, new back references are added for all the
516 * blocks it points to. If the tree block isn't in reference counted root,
517 * the old back references are removed. These new back references are of
518 * the form (trans->transid will have increased since creation):
519 *
520 * (root->root_key.objectid, trans->transid, level, 1)
521 *
522 * When a backref is in deleting, the following fields are checked:
523 *
524 * if backref was for a tree root:
525 * (btrfs_header_owner(itself), btrfs_header_generation(itself), level)
526 * else
527 * (btrfs_header_owner(parent), btrfs_header_generation(parent), level)
528 *
529 * Back Reference Key composing:
530 *
531 * The key objectid corresponds to the first byte in the extent, the key
532 * type is set to BTRFS_EXTENT_REF_KEY, and the key offset is the first
533 * byte of parent extent. If a extent is tree root, the key offset is set
534 * to the key objectid.
535 */
536
537static int noinline lookup_extent_backref(struct btrfs_trans_handle *trans,
538 struct btrfs_root *root,
539 struct btrfs_path *path,
540 u64 bytenr, u64 parent,
541 u64 ref_root, u64 ref_generation,
542 u64 owner_objectid, int del)
543{
544 struct btrfs_key key;
545 struct btrfs_extent_ref *ref;
546 struct extent_buffer *leaf;
547 u64 ref_objectid;
548 int ret;
549
550 key.objectid = bytenr;
551 key.type = BTRFS_EXTENT_REF_KEY;
552 key.offset = parent;
553
554 ret = btrfs_search_slot(trans, root, &key, path, del ? -1 : 0, 1);
555 if (ret < 0)
556 goto out;
557 if (ret > 0) {
558 ret = -ENOENT;
559 goto out;
560 }
561
562 leaf = path->nodes[0];
563 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
564 ref_objectid = btrfs_ref_objectid(leaf, ref);
565 if (btrfs_ref_root(leaf, ref) != ref_root ||
566 btrfs_ref_generation(leaf, ref) != ref_generation ||
567 (ref_objectid != owner_objectid &&
568 ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) {
569 ret = -EIO;
570 WARN_ON(1);
571 goto out;
572 }
573 ret = 0;
574out:
575 return ret;
576}
577
578/*
579 * updates all the backrefs that are pending on update_list for the
580 * extent_root
581 */
582static int noinline update_backrefs(struct btrfs_trans_handle *trans,
583 struct btrfs_root *extent_root,
584 struct btrfs_path *path,
585 struct list_head *update_list)
586{
587 struct btrfs_key key;
588 struct btrfs_extent_ref *ref;
589 struct btrfs_fs_info *info = extent_root->fs_info;
590 struct pending_extent_op *op;
591 struct extent_buffer *leaf;
592 int ret = 0;
593 struct list_head *cur = update_list->next;
594 u64 ref_objectid;
595 u64 ref_root = extent_root->root_key.objectid;
596
597 op = list_entry(cur, struct pending_extent_op, list);
598
599search:
600 key.objectid = op->bytenr;
601 key.type = BTRFS_EXTENT_REF_KEY;
602 key.offset = op->orig_parent;
603
604 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 1);
605 BUG_ON(ret);
606
607 leaf = path->nodes[0];
608
609loop:
610 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
611
612 ref_objectid = btrfs_ref_objectid(leaf, ref);
613
614 if (btrfs_ref_root(leaf, ref) != ref_root ||
615 btrfs_ref_generation(leaf, ref) != op->orig_generation ||
616 (ref_objectid != op->level &&
617 ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) {
618 printk(KERN_ERR "couldn't find %Lu, parent %Lu, root %Lu, "
619 "owner %u\n", op->bytenr, op->orig_parent,
620 ref_root, op->level);
621 btrfs_print_leaf(extent_root, leaf);
622 BUG();
623 }
624
625 key.objectid = op->bytenr;
626 key.offset = op->parent;
627 key.type = BTRFS_EXTENT_REF_KEY;
628 ret = btrfs_set_item_key_safe(trans, extent_root, path, &key);
629 BUG_ON(ret);
630 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
631 btrfs_set_ref_generation(leaf, ref, op->generation);
632
633 cur = cur->next;
634
635 list_del_init(&op->list);
636 unlock_extent(&info->extent_ins, op->bytenr,
637 op->bytenr + op->num_bytes - 1, GFP_NOFS);
638 kfree(op);
639
640 if (cur == update_list) {
641 btrfs_mark_buffer_dirty(path->nodes[0]);
642 btrfs_release_path(extent_root, path);
643 goto out;
644 }
645
646 op = list_entry(cur, struct pending_extent_op, list);
647
648 path->slots[0]++;
649 while (path->slots[0] < btrfs_header_nritems(leaf)) {
650 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
651 if (key.objectid == op->bytenr &&
652 key.type == BTRFS_EXTENT_REF_KEY)
653 goto loop;
654 path->slots[0]++;
655 }
656
657 btrfs_mark_buffer_dirty(path->nodes[0]);
658 btrfs_release_path(extent_root, path);
659 goto search;
660
661out:
662 return 0;
663}
664
665static int noinline insert_extents(struct btrfs_trans_handle *trans,
666 struct btrfs_root *extent_root,
667 struct btrfs_path *path,
668 struct list_head *insert_list, int nr)
669{
670 struct btrfs_key *keys;
671 u32 *data_size;
672 struct pending_extent_op *op;
673 struct extent_buffer *leaf;
674 struct list_head *cur = insert_list->next;
675 struct btrfs_fs_info *info = extent_root->fs_info;
676 u64 ref_root = extent_root->root_key.objectid;
677 int i = 0, last = 0, ret;
678 int total = nr * 2;
679
680 if (!nr)
681 return 0;
682
683 keys = kzalloc(total * sizeof(struct btrfs_key), GFP_NOFS);
684 if (!keys)
685 return -ENOMEM;
686
687 data_size = kzalloc(total * sizeof(u32), GFP_NOFS);
688 if (!data_size) {
689 kfree(keys);
690 return -ENOMEM;
691 }
692
693 list_for_each_entry(op, insert_list, list) {
694 keys[i].objectid = op->bytenr;
695 keys[i].offset = op->num_bytes;
696 keys[i].type = BTRFS_EXTENT_ITEM_KEY;
697 data_size[i] = sizeof(struct btrfs_extent_item);
698 i++;
699
700 keys[i].objectid = op->bytenr;
701 keys[i].offset = op->parent;
702 keys[i].type = BTRFS_EXTENT_REF_KEY;
703 data_size[i] = sizeof(struct btrfs_extent_ref);
704 i++;
705 }
706
707 op = list_entry(cur, struct pending_extent_op, list);
708 i = 0;
709 while (i < total) {
710 int c;
711 ret = btrfs_insert_some_items(trans, extent_root, path,
712 keys+i, data_size+i, total-i);
713 BUG_ON(ret < 0);
714
715 if (last && ret > 1)
716 BUG();
717
718 leaf = path->nodes[0];
719 for (c = 0; c < ret; c++) {
720 int ref_first = keys[i].type == BTRFS_EXTENT_REF_KEY;
721
722 /*
723 * if the first item we inserted was a backref, then
724 * the EXTENT_ITEM will be the odd c's, else it will
725 * be the even c's
726 */
727 if ((ref_first && (c % 2)) ||
728 (!ref_first && !(c % 2))) {
729 struct btrfs_extent_item *itm;
730
731 itm = btrfs_item_ptr(leaf, path->slots[0] + c,
732 struct btrfs_extent_item);
733 btrfs_set_extent_refs(path->nodes[0], itm, 1);
734 op->del++;
735 } else {
736 struct btrfs_extent_ref *ref;
737
738 ref = btrfs_item_ptr(leaf, path->slots[0] + c,
739 struct btrfs_extent_ref);
740 btrfs_set_ref_root(leaf, ref, ref_root);
741 btrfs_set_ref_generation(leaf, ref,
742 op->generation);
743 btrfs_set_ref_objectid(leaf, ref, op->level);
744 btrfs_set_ref_num_refs(leaf, ref, 1);
745 op->del++;
746 }
747
748 /*
749 * using del to see when its ok to free up the
750 * pending_extent_op. In the case where we insert the
751 * last item on the list in order to help do batching
752 * we need to not free the extent op until we actually
753 * insert the extent_item
754 */
755 if (op->del == 2) {
756 unlock_extent(&info->extent_ins, op->bytenr,
757 op->bytenr + op->num_bytes - 1,
758 GFP_NOFS);
759 cur = cur->next;
760 list_del_init(&op->list);
761 kfree(op);
762 if (cur != insert_list)
763 op = list_entry(cur,
764 struct pending_extent_op,
765 list);
766 }
767 }
768 btrfs_mark_buffer_dirty(leaf);
769 btrfs_release_path(extent_root, path);
770
771 /*
772 * Ok backref's and items usually go right next to eachother,
773 * but if we could only insert 1 item that means that we
774 * inserted on the end of a leaf, and we have no idea what may
775 * be on the next leaf so we just play it safe. In order to
776 * try and help this case we insert the last thing on our
777 * insert list so hopefully it will end up being the last
778 * thing on the leaf and everything else will be before it,
779 * which will let us insert a whole bunch of items at the same
780 * time.
781 */
782 if (ret == 1 && !last && (i + ret < total)) {
783 /*
784 * last: where we will pick up the next time around
785 * i: our current key to insert, will be total - 1
786 * cur: the current op we are screwing with
787 * op: duh
788 */
789 last = i + ret;
790 i = total - 1;
791 cur = insert_list->prev;
792 op = list_entry(cur, struct pending_extent_op, list);
793 } else if (last) {
794 /*
795 * ok we successfully inserted the last item on the
796 * list, lets reset everything
797 *
798 * i: our current key to insert, so where we left off
799 * last time
800 * last: done with this
801 * cur: the op we are messing with
802 * op: duh
803 * total: since we inserted the last key, we need to
804 * decrement total so we dont overflow
805 */
806 i = last;
807 last = 0;
808 total--;
809 if (i < total) {
810 cur = insert_list->next;
811 op = list_entry(cur, struct pending_extent_op,
812 list);
813 }
814 } else {
815 i += ret;
816 }
817
818 cond_resched();
819 }
820 ret = 0;
821 kfree(keys);
822 kfree(data_size);
823 return ret;
824}
825
826static int noinline insert_extent_backref(struct btrfs_trans_handle *trans,
827 struct btrfs_root *root,
828 struct btrfs_path *path,
829 u64 bytenr, u64 parent,
830 u64 ref_root, u64 ref_generation,
831 u64 owner_objectid)
832{
833 struct btrfs_key key;
834 struct extent_buffer *leaf;
835 struct btrfs_extent_ref *ref;
836 u32 num_refs;
837 int ret;
838
839 key.objectid = bytenr;
840 key.type = BTRFS_EXTENT_REF_KEY;
841 key.offset = parent;
842
843 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*ref));
844 if (ret == 0) {
845 leaf = path->nodes[0];
846 ref = btrfs_item_ptr(leaf, path->slots[0],
847 struct btrfs_extent_ref);
848 btrfs_set_ref_root(leaf, ref, ref_root);
849 btrfs_set_ref_generation(leaf, ref, ref_generation);
850 btrfs_set_ref_objectid(leaf, ref, owner_objectid);
851 btrfs_set_ref_num_refs(leaf, ref, 1);
852 } else if (ret == -EEXIST) {
853 u64 existing_owner;
854 BUG_ON(owner_objectid < BTRFS_FIRST_FREE_OBJECTID);
855 leaf = path->nodes[0];
856 ref = btrfs_item_ptr(leaf, path->slots[0],
857 struct btrfs_extent_ref);
858 if (btrfs_ref_root(leaf, ref) != ref_root ||
859 btrfs_ref_generation(leaf, ref) != ref_generation) {
860 ret = -EIO;
861 WARN_ON(1);
862 goto out;
863 }
864
865 num_refs = btrfs_ref_num_refs(leaf, ref);
866 BUG_ON(num_refs == 0);
867 btrfs_set_ref_num_refs(leaf, ref, num_refs + 1);
868
869 existing_owner = btrfs_ref_objectid(leaf, ref);
870 if (existing_owner != owner_objectid &&
871 existing_owner != BTRFS_MULTIPLE_OBJECTIDS) {
872 btrfs_set_ref_objectid(leaf, ref,
873 BTRFS_MULTIPLE_OBJECTIDS);
874 }
875 ret = 0;
876 } else {
877 goto out;
878 }
879 btrfs_mark_buffer_dirty(path->nodes[0]);
880out:
881 btrfs_release_path(root, path);
882 return ret;
883}
884
885static int noinline remove_extent_backref(struct btrfs_trans_handle *trans,
886 struct btrfs_root *root,
887 struct btrfs_path *path)
888{
889 struct extent_buffer *leaf;
890 struct btrfs_extent_ref *ref;
891 u32 num_refs;
892 int ret = 0;
893
894 leaf = path->nodes[0];
895 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
896 num_refs = btrfs_ref_num_refs(leaf, ref);
897 BUG_ON(num_refs == 0);
898 num_refs -= 1;
899 if (num_refs == 0) {
900 ret = btrfs_del_item(trans, root, path);
901 } else {
902 btrfs_set_ref_num_refs(leaf, ref, num_refs);
903 btrfs_mark_buffer_dirty(leaf);
904 }
905 btrfs_release_path(root, path);
906 return ret;
907}
908
909#ifdef BIO_RW_DISCARD
910static void btrfs_issue_discard(struct block_device *bdev,
911 u64 start, u64 len)
912{
913#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
914 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
915#else
916 blkdev_issue_discard(bdev, start >> 9, len >> 9);
917#endif
918}
919#endif
920
921static int noinline free_extents(struct btrfs_trans_handle *trans,
922 struct btrfs_root *extent_root,
923 struct list_head *del_list)
924{
925 struct btrfs_fs_info *info = extent_root->fs_info;
926 struct btrfs_path *path;
927 struct btrfs_key key, found_key;
928 struct extent_buffer *leaf;
929 struct list_head *cur;
930 struct pending_extent_op *op;
931 struct btrfs_extent_item *ei;
932 int ret, num_to_del, extent_slot = 0, found_extent = 0;
933 u32 refs;
934 u64 bytes_freed = 0;
935
936 path = btrfs_alloc_path();
937 if (!path)
938 return -ENOMEM;
939 path->reada = 1;
940
941search:
942 /* search for the backref for the current ref we want to delete */
943 cur = del_list->next;
944 op = list_entry(cur, struct pending_extent_op, list);
945 ret = lookup_extent_backref(trans, extent_root, path, op->bytenr,
946 op->orig_parent,
947 extent_root->root_key.objectid,
948 op->orig_generation, op->level, 1);
949 if (ret) {
950 printk("Unable to find backref byte nr %Lu root %Lu gen %Lu "
951 "owner %u\n", op->bytenr,
952 extent_root->root_key.objectid, op->orig_generation,
953 op->level);
954 btrfs_print_leaf(extent_root, path->nodes[0]);
955 WARN_ON(1);
956 goto out;
957 }
958
959 extent_slot = path->slots[0];
960 num_to_del = 1;
961 found_extent = 0;
962
963 /*
964 * if we aren't the first item on the leaf we can move back one and see
965 * if our ref is right next to our extent item
966 */
967 if (likely(extent_slot)) {
968 extent_slot--;
969 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
970 extent_slot);
971 if (found_key.objectid == op->bytenr &&
972 found_key.type == BTRFS_EXTENT_ITEM_KEY &&
973 found_key.offset == op->num_bytes) {
974 num_to_del++;
975 found_extent = 1;
976 }
977 }
978
979 /*
980 * if we didn't find the extent we need to delete the backref and then
981 * search for the extent item key so we can update its ref count
982 */
983 if (!found_extent) {
984 key.objectid = op->bytenr;
985 key.type = BTRFS_EXTENT_ITEM_KEY;
986 key.offset = op->num_bytes;
987
988 ret = remove_extent_backref(trans, extent_root, path);
989 BUG_ON(ret);
990 btrfs_release_path(extent_root, path);
991 ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
992 BUG_ON(ret);
993 extent_slot = path->slots[0];
994 }
995
996 /* this is where we update the ref count for the extent */
997 leaf = path->nodes[0];
998 ei = btrfs_item_ptr(leaf, extent_slot, struct btrfs_extent_item);
999 refs = btrfs_extent_refs(leaf, ei);
1000 BUG_ON(refs == 0);
1001 refs--;
1002 btrfs_set_extent_refs(leaf, ei, refs);
1003
1004 btrfs_mark_buffer_dirty(leaf);
1005
1006 /*
1007 * This extent needs deleting. The reason cur_slot is extent_slot +
1008 * num_to_del is because extent_slot points to the slot where the extent
1009 * is, and if the backref was not right next to the extent we will be
1010 * deleting at least 1 item, and will want to start searching at the
1011 * slot directly next to extent_slot. However if we did find the
1012 * backref next to the extent item them we will be deleting at least 2
1013 * items and will want to start searching directly after the ref slot
1014 */
1015 if (!refs) {
1016 struct list_head *pos, *n, *end;
1017 int cur_slot = extent_slot+num_to_del;
1018 u64 super_used;
1019 u64 root_used;
1020
1021 path->slots[0] = extent_slot;
1022 bytes_freed = op->num_bytes;
1023
1024 mutex_lock(&info->pinned_mutex);
1025 ret = pin_down_bytes(trans, extent_root, op->bytenr,
1026 op->num_bytes, op->level >=
1027 BTRFS_FIRST_FREE_OBJECTID);
1028 mutex_unlock(&info->pinned_mutex);
1029 BUG_ON(ret < 0);
1030 op->del = ret;
1031
1032 /*
1033 * we need to see if we can delete multiple things at once, so
1034 * start looping through the list of extents we are wanting to
1035 * delete and see if their extent/backref's are right next to
1036 * eachother and the extents only have 1 ref
1037 */
1038 for (pos = cur->next; pos != del_list; pos = pos->next) {
1039 struct pending_extent_op *tmp;
1040
1041 tmp = list_entry(pos, struct pending_extent_op, list);
1042
1043 /* we only want to delete extent+ref at this stage */
1044 if (cur_slot >= btrfs_header_nritems(leaf) - 1)
1045 break;
1046
1047 btrfs_item_key_to_cpu(leaf, &found_key, cur_slot);
1048 if (found_key.objectid != tmp->bytenr ||
1049 found_key.type != BTRFS_EXTENT_ITEM_KEY ||
1050 found_key.offset != tmp->num_bytes)
1051 break;
1052
1053 /* check to make sure this extent only has one ref */
1054 ei = btrfs_item_ptr(leaf, cur_slot,
1055 struct btrfs_extent_item);
1056 if (btrfs_extent_refs(leaf, ei) != 1)
1057 break;
1058
1059 btrfs_item_key_to_cpu(leaf, &found_key, cur_slot+1);
1060 if (found_key.objectid != tmp->bytenr ||
1061 found_key.type != BTRFS_EXTENT_REF_KEY ||
1062 found_key.offset != tmp->orig_parent)
1063 break;
1064
1065 /*
1066 * the ref is right next to the extent, we can set the
1067 * ref count to 0 since we will delete them both now
1068 */
1069 btrfs_set_extent_refs(leaf, ei, 0);
1070
1071 /* pin down the bytes for this extent */
1072 mutex_lock(&info->pinned_mutex);
1073 ret = pin_down_bytes(trans, extent_root, tmp->bytenr,
1074 tmp->num_bytes, tmp->level >=
1075 BTRFS_FIRST_FREE_OBJECTID);
1076 mutex_unlock(&info->pinned_mutex);
1077 BUG_ON(ret < 0);
1078
1079 /*
1080 * use the del field to tell if we need to go ahead and
1081 * free up the extent when we delete the item or not.
1082 */
1083 tmp->del = ret;
1084 bytes_freed += tmp->num_bytes;
1085
1086 num_to_del += 2;
1087 cur_slot += 2;
1088 }
1089 end = pos;
1090
1091 /* update the free space counters */
1092 spin_lock_irq(&info->delalloc_lock);
1093 super_used = btrfs_super_bytes_used(&info->super_copy);
1094 btrfs_set_super_bytes_used(&info->super_copy,
1095 super_used - bytes_freed);
1096 spin_unlock_irq(&info->delalloc_lock);
1097
1098 root_used = btrfs_root_used(&extent_root->root_item);
1099 btrfs_set_root_used(&extent_root->root_item,
1100 root_used - bytes_freed);
1101
1102 /* delete the items */
1103 ret = btrfs_del_items(trans, extent_root, path,
1104 path->slots[0], num_to_del);
1105 BUG_ON(ret);
1106
1107 /*
1108 * loop through the extents we deleted and do the cleanup work
1109 * on them
1110 */
1111 for (pos = cur, n = pos->next; pos != end;
1112 pos = n, n = pos->next) {
1113 struct pending_extent_op *tmp;
1114#ifdef BIO_RW_DISCARD
1115 u64 map_length;
1116 struct btrfs_multi_bio *multi = NULL;
1117#endif
1118 tmp = list_entry(pos, struct pending_extent_op, list);
1119
1120 /*
1121 * remember tmp->del tells us wether or not we pinned
1122 * down the extent
1123 */
1124 ret = update_block_group(trans, extent_root,
1125 tmp->bytenr, tmp->num_bytes, 0,
1126 tmp->del);
1127 BUG_ON(ret);
1128
1129#ifdef BIO_RW_DISCARD
1130 map_length = tmp->num_bytes;
1131 ret = btrfs_map_block(&info->mapping_tree, READ,
1132 tmp->bytenr, &map_length, &multi,
1133 0);
1134 if (!ret) {
1135 struct btrfs_bio_stripe *stripe;
1136 int i;
1137
1138 stripe = multi->stripes;
1139
1140 if (map_length > tmp->num_bytes)
1141 map_length = tmp->num_bytes;
1142
1143 for (i = 0; i < multi->num_stripes;
1144 i++, stripe++)
1145 btrfs_issue_discard(stripe->dev->bdev,
1146 stripe->physical,
1147 map_length);
1148 kfree(multi);
1149 }
1150#endif
1151 list_del_init(&tmp->list);
1152 unlock_extent(&info->extent_ins, tmp->bytenr,
1153 tmp->bytenr + tmp->num_bytes - 1,
1154 GFP_NOFS);
1155 kfree(tmp);
1156 }
1157 } else if (refs && found_extent) {
1158 /*
1159 * the ref and extent were right next to eachother, but the
1160 * extent still has a ref, so just free the backref and keep
1161 * going
1162 */
1163 ret = remove_extent_backref(trans, extent_root, path);
1164 BUG_ON(ret);
1165
1166 list_del_init(&op->list);
1167 unlock_extent(&info->extent_ins, op->bytenr,
1168 op->bytenr + op->num_bytes - 1, GFP_NOFS);
1169 kfree(op);
1170 } else {
1171 /*
1172 * the extent has multiple refs and the backref we were looking
1173 * for was not right next to it, so just unlock and go next,
1174 * we're good to go
1175 */
1176 list_del_init(&op->list);
1177 unlock_extent(&info->extent_ins, op->bytenr,
1178 op->bytenr + op->num_bytes - 1, GFP_NOFS);
1179 kfree(op);
1180 }
1181
1182 btrfs_release_path(extent_root, path);
1183 if (!list_empty(del_list))
1184 goto search;
1185
1186out:
1187 btrfs_free_path(path);
1188 return ret;
1189}
1190
1191static int __btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
1192 struct btrfs_root *root, u64 bytenr,
1193 u64 orig_parent, u64 parent,
1194 u64 orig_root, u64 ref_root,
1195 u64 orig_generation, u64 ref_generation,
1196 u64 owner_objectid)
1197{
1198 int ret;
1199 struct btrfs_root *extent_root = root->fs_info->extent_root;
1200 struct btrfs_path *path;
1201
1202 if (root == root->fs_info->extent_root) {
1203 struct pending_extent_op *extent_op;
1204 u64 num_bytes;
1205
1206 BUG_ON(owner_objectid >= BTRFS_MAX_LEVEL);
1207 num_bytes = btrfs_level_size(root, (int)owner_objectid);
1208 mutex_lock(&root->fs_info->extent_ins_mutex);
1209 if (test_range_bit(&root->fs_info->extent_ins, bytenr,
1210 bytenr + num_bytes - 1, EXTENT_WRITEBACK, 0)) {
1211 u64 priv;
1212 ret = get_state_private(&root->fs_info->extent_ins,
1213 bytenr, &priv);
1214 BUG_ON(ret);
1215 extent_op = (struct pending_extent_op *)
1216 (unsigned long)priv;
1217 BUG_ON(extent_op->parent != orig_parent);
1218 BUG_ON(extent_op->generation != orig_generation);
1219
1220 extent_op->parent = parent;
1221 extent_op->generation = ref_generation;
1222 } else {
1223 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
1224 BUG_ON(!extent_op);
1225
1226 extent_op->type = PENDING_BACKREF_UPDATE;
1227 extent_op->bytenr = bytenr;
1228 extent_op->num_bytes = num_bytes;
1229 extent_op->parent = parent;
1230 extent_op->orig_parent = orig_parent;
1231 extent_op->generation = ref_generation;
1232 extent_op->orig_generation = orig_generation;
1233 extent_op->level = (int)owner_objectid;
1234 INIT_LIST_HEAD(&extent_op->list);
1235 extent_op->del = 0;
1236
1237 set_extent_bits(&root->fs_info->extent_ins,
1238 bytenr, bytenr + num_bytes - 1,
1239 EXTENT_WRITEBACK, GFP_NOFS);
1240 set_state_private(&root->fs_info->extent_ins,
1241 bytenr, (unsigned long)extent_op);
1242 }
1243 mutex_unlock(&root->fs_info->extent_ins_mutex);
1244 return 0;
1245 }
1246
1247 path = btrfs_alloc_path();
1248 if (!path)
1249 return -ENOMEM;
1250 ret = lookup_extent_backref(trans, extent_root, path,
1251 bytenr, orig_parent, orig_root,
1252 orig_generation, owner_objectid, 1);
1253 if (ret)
1254 goto out;
1255 ret = remove_extent_backref(trans, extent_root, path);
1256 if (ret)
1257 goto out;
1258 ret = insert_extent_backref(trans, extent_root, path, bytenr,
1259 parent, ref_root, ref_generation,
1260 owner_objectid);
1261 BUG_ON(ret);
1262 finish_current_insert(trans, extent_root, 0);
1263 del_pending_extents(trans, extent_root, 0);
1264out:
1265 btrfs_free_path(path);
1266 return ret;
1267}
1268
1269int btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
1270 struct btrfs_root *root, u64 bytenr,
1271 u64 orig_parent, u64 parent,
1272 u64 ref_root, u64 ref_generation,
1273 u64 owner_objectid)
1274{
1275 int ret;
1276 if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
1277 owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
1278 return 0;
1279 ret = __btrfs_update_extent_ref(trans, root, bytenr, orig_parent,
1280 parent, ref_root, ref_root,
1281 ref_generation, ref_generation,
1282 owner_objectid);
1283 return ret;
1284}
1285
1286static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1287 struct btrfs_root *root, u64 bytenr,
1288 u64 orig_parent, u64 parent,
1289 u64 orig_root, u64 ref_root,
1290 u64 orig_generation, u64 ref_generation,
1291 u64 owner_objectid)
1292{
1293 struct btrfs_path *path;
1294 int ret;
1295 struct btrfs_key key;
1296 struct extent_buffer *l;
1297 struct btrfs_extent_item *item;
1298 u32 refs;
1299
1300 path = btrfs_alloc_path();
1301 if (!path)
1302 return -ENOMEM;
1303
1304 path->reada = 1;
1305 key.objectid = bytenr;
1306 key.type = BTRFS_EXTENT_ITEM_KEY;
1307 key.offset = (u64)-1;
1308
1309 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
1310 0, 1);
1311 if (ret < 0)
1312 return ret;
1313 BUG_ON(ret == 0 || path->slots[0] == 0);
1314
1315 path->slots[0]--;
1316 l = path->nodes[0];
1317
1318 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1319 if (key.objectid != bytenr) {
1320 btrfs_print_leaf(root->fs_info->extent_root, path->nodes[0]);
1321 printk("wanted %Lu found %Lu\n", bytenr, key.objectid);
1322 BUG();
1323 }
1324 BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY);
1325
1326 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
1327 refs = btrfs_extent_refs(l, item);
1328 btrfs_set_extent_refs(l, item, refs + 1);
1329 btrfs_mark_buffer_dirty(path->nodes[0]);
1330
1331 btrfs_release_path(root->fs_info->extent_root, path);
1332
1333 path->reada = 1;
1334 ret = insert_extent_backref(trans, root->fs_info->extent_root,
1335 path, bytenr, parent,
1336 ref_root, ref_generation,
1337 owner_objectid);
1338 BUG_ON(ret);
1339 finish_current_insert(trans, root->fs_info->extent_root, 0);
1340 del_pending_extents(trans, root->fs_info->extent_root, 0);
1341
1342 btrfs_free_path(path);
1343 return 0;
1344}
1345
1346int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1347 struct btrfs_root *root,
1348 u64 bytenr, u64 num_bytes, u64 parent,
1349 u64 ref_root, u64 ref_generation,
1350 u64 owner_objectid)
1351{
1352 int ret;
1353 if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
1354 owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
1355 return 0;
1356 ret = __btrfs_inc_extent_ref(trans, root, bytenr, 0, parent,
1357 0, ref_root, 0, ref_generation,
1358 owner_objectid);
1359 return ret;
1360}
1361
1362int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
1363 struct btrfs_root *root)
1364{
1365 finish_current_insert(trans, root->fs_info->extent_root, 1);
1366 del_pending_extents(trans, root->fs_info->extent_root, 1);
1367 return 0;
1368}
1369
1370int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans,
1371 struct btrfs_root *root, u64 bytenr,
1372 u64 num_bytes, u32 *refs)
1373{
1374 struct btrfs_path *path;
1375 int ret;
1376 struct btrfs_key key;
1377 struct extent_buffer *l;
1378 struct btrfs_extent_item *item;
1379
1380 WARN_ON(num_bytes < root->sectorsize);
1381 path = btrfs_alloc_path();
1382 path->reada = 1;
1383 key.objectid = bytenr;
1384 key.offset = num_bytes;
1385 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
1386 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
1387 0, 0);
1388 if (ret < 0)
1389 goto out;
1390 if (ret != 0) {
1391 btrfs_print_leaf(root, path->nodes[0]);
1392 printk("failed to find block number %Lu\n", bytenr);
1393 BUG();
1394 }
1395 l = path->nodes[0];
1396 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
1397 *refs = btrfs_extent_refs(l, item);
1398out:
1399 btrfs_free_path(path);
1400 return 0;
1401}
1402
1403int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
1404 struct btrfs_root *root, u64 bytenr)
1405{
1406 struct btrfs_root *extent_root = root->fs_info->extent_root;
1407 struct btrfs_path *path;
1408 struct extent_buffer *leaf;
1409 struct btrfs_extent_ref *ref_item;
1410 struct btrfs_key key;
1411 struct btrfs_key found_key;
1412 u64 ref_root;
1413 u64 last_snapshot;
1414 u32 nritems;
1415 int ret;
1416
1417 key.objectid = bytenr;
1418 key.offset = (u64)-1;
1419 key.type = BTRFS_EXTENT_ITEM_KEY;
1420
1421 path = btrfs_alloc_path();
1422 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1423 if (ret < 0)
1424 goto out;
1425 BUG_ON(ret == 0);
1426
1427 ret = -ENOENT;
1428 if (path->slots[0] == 0)
1429 goto out;
1430
1431 path->slots[0]--;
1432 leaf = path->nodes[0];
1433 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1434
1435 if (found_key.objectid != bytenr ||
1436 found_key.type != BTRFS_EXTENT_ITEM_KEY)
1437 goto out;
1438
1439 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1440 while (1) {
1441 leaf = path->nodes[0];
1442 nritems = btrfs_header_nritems(leaf);
1443 if (path->slots[0] >= nritems) {
1444 ret = btrfs_next_leaf(extent_root, path);
1445 if (ret < 0)
1446 goto out;
1447 if (ret == 0)
1448 continue;
1449 break;
1450 }
1451 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1452 if (found_key.objectid != bytenr)
1453 break;
1454
1455 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
1456 path->slots[0]++;
1457 continue;
1458 }
1459
1460 ref_item = btrfs_item_ptr(leaf, path->slots[0],
1461 struct btrfs_extent_ref);
1462 ref_root = btrfs_ref_root(leaf, ref_item);
1463 if (ref_root != root->root_key.objectid &&
1464 ref_root != BTRFS_TREE_LOG_OBJECTID) {
1465 ret = 1;
1466 goto out;
1467 }
1468 if (btrfs_ref_generation(leaf, ref_item) <= last_snapshot) {
1469 ret = 1;
1470 goto out;
1471 }
1472
1473 path->slots[0]++;
1474 }
1475 ret = 0;
1476out:
1477 btrfs_free_path(path);
1478 return ret;
1479}
1480
1481int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1482 struct extent_buffer *buf, u32 nr_extents)
1483{
1484 struct btrfs_key key;
1485 struct btrfs_file_extent_item *fi;
1486 u64 root_gen;
1487 u32 nritems;
1488 int i;
1489 int level;
1490 int ret = 0;
1491 int shared = 0;
1492
1493 if (!root->ref_cows)
1494 return 0;
1495
1496 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1497 shared = 0;
1498 root_gen = root->root_key.offset;
1499 } else {
1500 shared = 1;
1501 root_gen = trans->transid - 1;
1502 }
1503
1504 level = btrfs_header_level(buf);
1505 nritems = btrfs_header_nritems(buf);
1506
1507 if (level == 0) {
1508 struct btrfs_leaf_ref *ref;
1509 struct btrfs_extent_info *info;
1510
1511 ref = btrfs_alloc_leaf_ref(root, nr_extents);
1512 if (!ref) {
1513 ret = -ENOMEM;
1514 goto out;
1515 }
1516
1517 ref->root_gen = root_gen;
1518 ref->bytenr = buf->start;
1519 ref->owner = btrfs_header_owner(buf);
1520 ref->generation = btrfs_header_generation(buf);
1521 ref->nritems = nr_extents;
1522 info = ref->extents;
1523
1524 for (i = 0; nr_extents > 0 && i < nritems; i++) {
1525 u64 disk_bytenr;
1526 btrfs_item_key_to_cpu(buf, &key, i);
1527 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1528 continue;
1529 fi = btrfs_item_ptr(buf, i,
1530 struct btrfs_file_extent_item);
1531 if (btrfs_file_extent_type(buf, fi) ==
1532 BTRFS_FILE_EXTENT_INLINE)
1533 continue;
1534 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1535 if (disk_bytenr == 0)
1536 continue;
1537
1538 info->bytenr = disk_bytenr;
1539 info->num_bytes =
1540 btrfs_file_extent_disk_num_bytes(buf, fi);
1541 info->objectid = key.objectid;
1542 info->offset = key.offset;
1543 info++;
1544 }
1545
1546 ret = btrfs_add_leaf_ref(root, ref, shared);
1547 if (ret == -EEXIST && shared) {
1548 struct btrfs_leaf_ref *old;
1549 old = btrfs_lookup_leaf_ref(root, ref->bytenr);
1550 BUG_ON(!old);
1551 btrfs_remove_leaf_ref(root, old);
1552 btrfs_free_leaf_ref(root, old);
1553 ret = btrfs_add_leaf_ref(root, ref, shared);
1554 }
1555 WARN_ON(ret);
1556 btrfs_free_leaf_ref(root, ref);
1557 }
1558out:
1559 return ret;
1560}
1561
1562int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1563 struct extent_buffer *orig_buf, struct extent_buffer *buf,
1564 u32 *nr_extents)
1565{
1566 u64 bytenr;
1567 u64 ref_root;
1568 u64 orig_root;
1569 u64 ref_generation;
1570 u64 orig_generation;
1571 u32 nritems;
1572 u32 nr_file_extents = 0;
1573 struct btrfs_key key;
1574 struct btrfs_file_extent_item *fi;
1575 int i;
1576 int level;
1577 int ret = 0;
1578 int faili = 0;
1579 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
1580 u64, u64, u64, u64, u64, u64, u64, u64);
1581
1582 ref_root = btrfs_header_owner(buf);
1583 ref_generation = btrfs_header_generation(buf);
1584 orig_root = btrfs_header_owner(orig_buf);
1585 orig_generation = btrfs_header_generation(orig_buf);
1586
1587 nritems = btrfs_header_nritems(buf);
1588 level = btrfs_header_level(buf);
1589
1590 if (root->ref_cows) {
1591 process_func = __btrfs_inc_extent_ref;
1592 } else {
1593 if (level == 0 &&
1594 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
1595 goto out;
1596 if (level != 0 &&
1597 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
1598 goto out;
1599 process_func = __btrfs_update_extent_ref;
1600 }
1601
1602 for (i = 0; i < nritems; i++) {
1603 cond_resched();
1604 if (level == 0) {
1605 btrfs_item_key_to_cpu(buf, &key, i);
1606 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1607 continue;
1608 fi = btrfs_item_ptr(buf, i,
1609 struct btrfs_file_extent_item);
1610 if (btrfs_file_extent_type(buf, fi) ==
1611 BTRFS_FILE_EXTENT_INLINE)
1612 continue;
1613 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1614 if (bytenr == 0)
1615 continue;
1616
1617 nr_file_extents++;
1618
1619 ret = process_func(trans, root, bytenr,
1620 orig_buf->start, buf->start,
1621 orig_root, ref_root,
1622 orig_generation, ref_generation,
1623 key.objectid);
1624
1625 if (ret) {
1626 faili = i;
1627 WARN_ON(1);
1628 goto fail;
1629 }
1630 } else {
1631 bytenr = btrfs_node_blockptr(buf, i);
1632 ret = process_func(trans, root, bytenr,
1633 orig_buf->start, buf->start,
1634 orig_root, ref_root,
1635 orig_generation, ref_generation,
1636 level - 1);
1637 if (ret) {
1638 faili = i;
1639 WARN_ON(1);
1640 goto fail;
1641 }
1642 }
1643 }
1644out:
1645 if (nr_extents) {
1646 if (level == 0)
1647 *nr_extents = nr_file_extents;
1648 else
1649 *nr_extents = nritems;
1650 }
1651 return 0;
1652fail:
1653 WARN_ON(1);
1654 return ret;
1655}
1656
1657int btrfs_update_ref(struct btrfs_trans_handle *trans,
1658 struct btrfs_root *root, struct extent_buffer *orig_buf,
1659 struct extent_buffer *buf, int start_slot, int nr)
1660
1661{
1662 u64 bytenr;
1663 u64 ref_root;
1664 u64 orig_root;
1665 u64 ref_generation;
1666 u64 orig_generation;
1667 struct btrfs_key key;
1668 struct btrfs_file_extent_item *fi;
1669 int i;
1670 int ret;
1671 int slot;
1672 int level;
1673
1674 BUG_ON(start_slot < 0);
1675 BUG_ON(start_slot + nr > btrfs_header_nritems(buf));
1676
1677 ref_root = btrfs_header_owner(buf);
1678 ref_generation = btrfs_header_generation(buf);
1679 orig_root = btrfs_header_owner(orig_buf);
1680 orig_generation = btrfs_header_generation(orig_buf);
1681 level = btrfs_header_level(buf);
1682
1683 if (!root->ref_cows) {
1684 if (level == 0 &&
1685 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
1686 return 0;
1687 if (level != 0 &&
1688 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
1689 return 0;
1690 }
1691
1692 for (i = 0, slot = start_slot; i < nr; i++, slot++) {
1693 cond_resched();
1694 if (level == 0) {
1695 btrfs_item_key_to_cpu(buf, &key, slot);
1696 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1697 continue;
1698 fi = btrfs_item_ptr(buf, slot,
1699 struct btrfs_file_extent_item);
1700 if (btrfs_file_extent_type(buf, fi) ==
1701 BTRFS_FILE_EXTENT_INLINE)
1702 continue;
1703 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1704 if (bytenr == 0)
1705 continue;
1706 ret = __btrfs_update_extent_ref(trans, root, bytenr,
1707 orig_buf->start, buf->start,
1708 orig_root, ref_root,
1709 orig_generation, ref_generation,
1710 key.objectid);
1711 if (ret)
1712 goto fail;
1713 } else {
1714 bytenr = btrfs_node_blockptr(buf, slot);
1715 ret = __btrfs_update_extent_ref(trans, root, bytenr,
1716 orig_buf->start, buf->start,
1717 orig_root, ref_root,
1718 orig_generation, ref_generation,
1719 level - 1);
1720 if (ret)
1721 goto fail;
1722 }
1723 }
1724 return 0;
1725fail:
1726 WARN_ON(1);
1727 return -1;
1728}
1729
1730static int write_one_cache_group(struct btrfs_trans_handle *trans,
1731 struct btrfs_root *root,
1732 struct btrfs_path *path,
1733 struct btrfs_block_group_cache *cache)
1734{
1735 int ret;
1736 int pending_ret;
1737 struct btrfs_root *extent_root = root->fs_info->extent_root;
1738 unsigned long bi;
1739 struct extent_buffer *leaf;
1740
1741 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
1742 if (ret < 0)
1743 goto fail;
1744 BUG_ON(ret);
1745
1746 leaf = path->nodes[0];
1747 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
1748 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
1749 btrfs_mark_buffer_dirty(leaf);
1750 btrfs_release_path(extent_root, path);
1751fail:
1752 finish_current_insert(trans, extent_root, 0);
1753 pending_ret = del_pending_extents(trans, extent_root, 0);
1754 if (ret)
1755 return ret;
1756 if (pending_ret)
1757 return pending_ret;
1758 return 0;
1759
1760}
1761
1762int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
1763 struct btrfs_root *root)
1764{
1765 struct btrfs_block_group_cache *cache, *entry;
1766 struct rb_node *n;
1767 int err = 0;
1768 int werr = 0;
1769 struct btrfs_path *path;
1770 u64 last = 0;
1771
1772 path = btrfs_alloc_path();
1773 if (!path)
1774 return -ENOMEM;
1775
1776 while(1) {
1777 cache = NULL;
1778 spin_lock(&root->fs_info->block_group_cache_lock);
1779 for (n = rb_first(&root->fs_info->block_group_cache_tree);
1780 n; n = rb_next(n)) {
1781 entry = rb_entry(n, struct btrfs_block_group_cache,
1782 cache_node);
1783 if (entry->dirty) {
1784 cache = entry;
1785 break;
1786 }
1787 }
1788 spin_unlock(&root->fs_info->block_group_cache_lock);
1789
1790 if (!cache)
1791 break;
1792
1793 cache->dirty = 0;
1794 last += cache->key.offset;
1795
1796 err = write_one_cache_group(trans, root,
1797 path, cache);
1798 /*
1799 * if we fail to write the cache group, we want
1800 * to keep it marked dirty in hopes that a later
1801 * write will work
1802 */
1803 if (err) {
1804 werr = err;
1805 continue;
1806 }
1807 }
1808 btrfs_free_path(path);
1809 return werr;
1810}
1811
1812static int update_space_info(struct btrfs_fs_info *info, u64 flags,
1813 u64 total_bytes, u64 bytes_used,
1814 struct btrfs_space_info **space_info)
1815{
1816 struct btrfs_space_info *found;
1817
1818 found = __find_space_info(info, flags);
1819 if (found) {
1820 spin_lock(&found->lock);
1821 found->total_bytes += total_bytes;
1822 found->bytes_used += bytes_used;
1823 found->full = 0;
1824 spin_unlock(&found->lock);
1825 *space_info = found;
1826 return 0;
1827 }
1828 found = kzalloc(sizeof(*found), GFP_NOFS);
1829 if (!found)
1830 return -ENOMEM;
1831
1832 list_add(&found->list, &info->space_info);
1833 INIT_LIST_HEAD(&found->block_groups);
1834 init_rwsem(&found->groups_sem);
1835 spin_lock_init(&found->lock);
1836 found->flags = flags;
1837 found->total_bytes = total_bytes;
1838 found->bytes_used = bytes_used;
1839 found->bytes_pinned = 0;
1840 found->bytes_reserved = 0;
1841 found->bytes_readonly = 0;
1842 found->full = 0;
1843 found->force_alloc = 0;
1844 *space_info = found;
1845 return 0;
1846}
1847
1848static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1849{
1850 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
1851 BTRFS_BLOCK_GROUP_RAID1 |
1852 BTRFS_BLOCK_GROUP_RAID10 |
1853 BTRFS_BLOCK_GROUP_DUP);
1854 if (extra_flags) {
1855 if (flags & BTRFS_BLOCK_GROUP_DATA)
1856 fs_info->avail_data_alloc_bits |= extra_flags;
1857 if (flags & BTRFS_BLOCK_GROUP_METADATA)
1858 fs_info->avail_metadata_alloc_bits |= extra_flags;
1859 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1860 fs_info->avail_system_alloc_bits |= extra_flags;
1861 }
1862}
1863
1864static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
1865{
1866 spin_lock(&cache->space_info->lock);
1867 spin_lock(&cache->lock);
1868 if (!cache->ro) {
1869 cache->space_info->bytes_readonly += cache->key.offset -
1870 btrfs_block_group_used(&cache->item);
1871 cache->ro = 1;
1872 }
1873 spin_unlock(&cache->lock);
1874 spin_unlock(&cache->space_info->lock);
1875}
1876
1877u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
1878{
1879 u64 num_devices = root->fs_info->fs_devices->rw_devices;
1880
1881 if (num_devices == 1)
1882 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
1883 if (num_devices < 4)
1884 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
1885
1886 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
1887 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
1888 BTRFS_BLOCK_GROUP_RAID10))) {
1889 flags &= ~BTRFS_BLOCK_GROUP_DUP;
1890 }
1891
1892 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
1893 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
1894 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
1895 }
1896
1897 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
1898 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
1899 (flags & BTRFS_BLOCK_GROUP_RAID10) |
1900 (flags & BTRFS_BLOCK_GROUP_DUP)))
1901 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
1902 return flags;
1903}
1904
1905static int do_chunk_alloc(struct btrfs_trans_handle *trans,
1906 struct btrfs_root *extent_root, u64 alloc_bytes,
1907 u64 flags, int force)
1908{
1909 struct btrfs_space_info *space_info;
1910 u64 thresh;
1911 int ret = 0;
1912
1913 mutex_lock(&extent_root->fs_info->chunk_mutex);
1914
1915 flags = btrfs_reduce_alloc_profile(extent_root, flags);
1916
1917 space_info = __find_space_info(extent_root->fs_info, flags);
1918 if (!space_info) {
1919 ret = update_space_info(extent_root->fs_info, flags,
1920 0, 0, &space_info);
1921 BUG_ON(ret);
1922 }
1923 BUG_ON(!space_info);
1924
1925 spin_lock(&space_info->lock);
1926 if (space_info->force_alloc) {
1927 force = 1;
1928 space_info->force_alloc = 0;
1929 }
1930 if (space_info->full) {
1931 spin_unlock(&space_info->lock);
1932 goto out;
1933 }
1934
1935 thresh = space_info->total_bytes - space_info->bytes_readonly;
1936 thresh = div_factor(thresh, 6);
1937 if (!force &&
1938 (space_info->bytes_used + space_info->bytes_pinned +
1939 space_info->bytes_reserved + alloc_bytes) < thresh) {
1940 spin_unlock(&space_info->lock);
1941 goto out;
1942 }
1943 spin_unlock(&space_info->lock);
1944
1945 ret = btrfs_alloc_chunk(trans, extent_root, flags);
1946 if (ret) {
1947printk("space info full %Lu\n", flags);
1948 space_info->full = 1;
1949 }
1950out:
1951 mutex_unlock(&extent_root->fs_info->chunk_mutex);
1952 return ret;
1953}
1954
1955static int update_block_group(struct btrfs_trans_handle *trans,
1956 struct btrfs_root *root,
1957 u64 bytenr, u64 num_bytes, int alloc,
1958 int mark_free)
1959{
1960 struct btrfs_block_group_cache *cache;
1961 struct btrfs_fs_info *info = root->fs_info;
1962 u64 total = num_bytes;
1963 u64 old_val;
1964 u64 byte_in_group;
1965
1966 while(total) {
1967 cache = btrfs_lookup_block_group(info, bytenr);
1968 if (!cache)
1969 return -1;
1970 byte_in_group = bytenr - cache->key.objectid;
1971 WARN_ON(byte_in_group > cache->key.offset);
1972
1973 spin_lock(&cache->space_info->lock);
1974 spin_lock(&cache->lock);
1975 cache->dirty = 1;
1976 old_val = btrfs_block_group_used(&cache->item);
1977 num_bytes = min(total, cache->key.offset - byte_in_group);
1978 if (alloc) {
1979 old_val += num_bytes;
1980 cache->space_info->bytes_used += num_bytes;
1981 if (cache->ro)
1982 cache->space_info->bytes_readonly -= num_bytes;
1983 btrfs_set_block_group_used(&cache->item, old_val);
1984 spin_unlock(&cache->lock);
1985 spin_unlock(&cache->space_info->lock);
1986 } else {
1987 old_val -= num_bytes;
1988 cache->space_info->bytes_used -= num_bytes;
1989 if (cache->ro)
1990 cache->space_info->bytes_readonly += num_bytes;
1991 btrfs_set_block_group_used(&cache->item, old_val);
1992 spin_unlock(&cache->lock);
1993 spin_unlock(&cache->space_info->lock);
1994 if (mark_free) {
1995 int ret;
1996 ret = btrfs_add_free_space(cache, bytenr,
1997 num_bytes);
1998 if (ret)
1999 return -1;
2000 }
2001 }
2002 total -= num_bytes;
2003 bytenr += num_bytes;
2004 }
2005 return 0;
2006}
2007
2008static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
2009{
2010 struct btrfs_block_group_cache *cache;
2011
2012 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
2013 if (!cache)
2014 return 0;
2015
2016 return cache->key.objectid;
2017}
2018
2019int btrfs_update_pinned_extents(struct btrfs_root *root,
2020 u64 bytenr, u64 num, int pin)
2021{
2022 u64 len;
2023 struct btrfs_block_group_cache *cache;
2024 struct btrfs_fs_info *fs_info = root->fs_info;
2025
2026 WARN_ON(!mutex_is_locked(&root->fs_info->pinned_mutex));
2027 if (pin) {
2028 set_extent_dirty(&fs_info->pinned_extents,
2029 bytenr, bytenr + num - 1, GFP_NOFS);
2030 } else {
2031 clear_extent_dirty(&fs_info->pinned_extents,
2032 bytenr, bytenr + num - 1, GFP_NOFS);
2033 }
2034 while (num > 0) {
2035 cache = btrfs_lookup_block_group(fs_info, bytenr);
2036 BUG_ON(!cache);
2037 len = min(num, cache->key.offset -
2038 (bytenr - cache->key.objectid));
2039 if (pin) {
2040 spin_lock(&cache->space_info->lock);
2041 spin_lock(&cache->lock);
2042 cache->pinned += len;
2043 cache->space_info->bytes_pinned += len;
2044 spin_unlock(&cache->lock);
2045 spin_unlock(&cache->space_info->lock);
2046 fs_info->total_pinned += len;
2047 } else {
2048 spin_lock(&cache->space_info->lock);
2049 spin_lock(&cache->lock);
2050 cache->pinned -= len;
2051 cache->space_info->bytes_pinned -= len;
2052 spin_unlock(&cache->lock);
2053 spin_unlock(&cache->space_info->lock);
2054 fs_info->total_pinned -= len;
2055 if (cache->cached)
2056 btrfs_add_free_space(cache, bytenr, len);
2057 }
2058 bytenr += len;
2059 num -= len;
2060 }
2061 return 0;
2062}
2063
2064static int update_reserved_extents(struct btrfs_root *root,
2065 u64 bytenr, u64 num, int reserve)
2066{
2067 u64 len;
2068 struct btrfs_block_group_cache *cache;
2069 struct btrfs_fs_info *fs_info = root->fs_info;
2070
2071 while (num > 0) {
2072 cache = btrfs_lookup_block_group(fs_info, bytenr);
2073 BUG_ON(!cache);
2074 len = min(num, cache->key.offset -
2075 (bytenr - cache->key.objectid));
2076
2077 spin_lock(&cache->space_info->lock);
2078 spin_lock(&cache->lock);
2079 if (reserve) {
2080 cache->reserved += len;
2081 cache->space_info->bytes_reserved += len;
2082 } else {
2083 cache->reserved -= len;
2084 cache->space_info->bytes_reserved -= len;
2085 }
2086 spin_unlock(&cache->lock);
2087 spin_unlock(&cache->space_info->lock);
2088 bytenr += len;
2089 num -= len;
2090 }
2091 return 0;
2092}
2093
2094int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
2095{
2096 u64 last = 0;
2097 u64 start;
2098 u64 end;
2099 struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
2100 int ret;
2101
2102 mutex_lock(&root->fs_info->pinned_mutex);
2103 while(1) {
2104 ret = find_first_extent_bit(pinned_extents, last,
2105 &start, &end, EXTENT_DIRTY);
2106 if (ret)
2107 break;
2108 set_extent_dirty(copy, start, end, GFP_NOFS);
2109 last = end + 1;
2110 }
2111 mutex_unlock(&root->fs_info->pinned_mutex);
2112 return 0;
2113}
2114
2115int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
2116 struct btrfs_root *root,
2117 struct extent_io_tree *unpin)
2118{
2119 u64 start;
2120 u64 end;
2121 int ret;
2122
2123 mutex_lock(&root->fs_info->pinned_mutex);
2124 while(1) {
2125 ret = find_first_extent_bit(unpin, 0, &start, &end,
2126 EXTENT_DIRTY);
2127 if (ret)
2128 break;
2129 btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
2130 clear_extent_dirty(unpin, start, end, GFP_NOFS);
2131 if (need_resched()) {
2132 mutex_unlock(&root->fs_info->pinned_mutex);
2133 cond_resched();
2134 mutex_lock(&root->fs_info->pinned_mutex);
2135 }
2136 }
2137 mutex_unlock(&root->fs_info->pinned_mutex);
2138 return 0;
2139}
2140
2141static int finish_current_insert(struct btrfs_trans_handle *trans,
2142 struct btrfs_root *extent_root, int all)
2143{
2144 u64 start;
2145 u64 end;
2146 u64 priv;
2147 u64 search = 0;
2148 u64 skipped = 0;
2149 struct btrfs_fs_info *info = extent_root->fs_info;
2150 struct btrfs_path *path;
2151 struct pending_extent_op *extent_op, *tmp;
2152 struct list_head insert_list, update_list;
2153 int ret;
2154 int num_inserts = 0, max_inserts;
2155
2156 path = btrfs_alloc_path();
2157 INIT_LIST_HEAD(&insert_list);
2158 INIT_LIST_HEAD(&update_list);
2159
2160 max_inserts = extent_root->leafsize /
2161 (2 * sizeof(struct btrfs_key) + 2 * sizeof(struct btrfs_item) +
2162 sizeof(struct btrfs_extent_ref) +
2163 sizeof(struct btrfs_extent_item));
2164again:
2165 mutex_lock(&info->extent_ins_mutex);
2166 while (1) {
2167 ret = find_first_extent_bit(&info->extent_ins, search, &start,
2168 &end, EXTENT_WRITEBACK);
2169 if (ret) {
2170 if (skipped && all && !num_inserts) {
2171 skipped = 0;
2172 search = 0;
2173 continue;
2174 }
2175 mutex_unlock(&info->extent_ins_mutex);
2176 break;
2177 }
2178
2179 ret = try_lock_extent(&info->extent_ins, start, end, GFP_NOFS);
2180 if (!ret) {
2181 skipped = 1;
2182 search = end + 1;
2183 if (need_resched()) {
2184 mutex_unlock(&info->extent_ins_mutex);
2185 cond_resched();
2186 mutex_lock(&info->extent_ins_mutex);
2187 }
2188 continue;
2189 }
2190
2191 ret = get_state_private(&info->extent_ins, start, &priv);
2192 BUG_ON(ret);
2193 extent_op = (struct pending_extent_op *)(unsigned long) priv;
2194
2195 if (extent_op->type == PENDING_EXTENT_INSERT) {
2196 num_inserts++;
2197 list_add_tail(&extent_op->list, &insert_list);
2198 search = end + 1;
2199 if (num_inserts == max_inserts) {
2200 mutex_unlock(&info->extent_ins_mutex);
2201 break;
2202 }
2203 } else if (extent_op->type == PENDING_BACKREF_UPDATE) {
2204 list_add_tail(&extent_op->list, &update_list);
2205 search = end + 1;
2206 } else {
2207 BUG();
2208 }
2209 }
2210
2211 /*
2212 * process the update list, clear the writeback bit for it, and if
2213 * somebody marked this thing for deletion then just unlock it and be
2214 * done, the free_extents will handle it
2215 */
2216 mutex_lock(&info->extent_ins_mutex);
2217 list_for_each_entry_safe(extent_op, tmp, &update_list, list) {
2218 clear_extent_bits(&info->extent_ins, extent_op->bytenr,
2219 extent_op->bytenr + extent_op->num_bytes - 1,
2220 EXTENT_WRITEBACK, GFP_NOFS);
2221 if (extent_op->del) {
2222 list_del_init(&extent_op->list);
2223 unlock_extent(&info->extent_ins, extent_op->bytenr,
2224 extent_op->bytenr + extent_op->num_bytes
2225 - 1, GFP_NOFS);
2226 kfree(extent_op);
2227 }
2228 }
2229 mutex_unlock(&info->extent_ins_mutex);
2230
2231 /*
2232 * still have things left on the update list, go ahead an update
2233 * everything
2234 */
2235 if (!list_empty(&update_list)) {
2236 ret = update_backrefs(trans, extent_root, path, &update_list);
2237 BUG_ON(ret);
2238 }
2239
2240 /*
2241 * if no inserts need to be done, but we skipped some extents and we
2242 * need to make sure everything is cleaned then reset everything and
2243 * go back to the beginning
2244 */
2245 if (!num_inserts && all && skipped) {
2246 search = 0;
2247 skipped = 0;
2248 INIT_LIST_HEAD(&update_list);
2249 INIT_LIST_HEAD(&insert_list);
2250 goto again;
2251 } else if (!num_inserts) {
2252 goto out;
2253 }
2254
2255 /*
2256 * process the insert extents list. Again if we are deleting this
2257 * extent, then just unlock it, pin down the bytes if need be, and be
2258 * done with it. Saves us from having to actually insert the extent
2259 * into the tree and then subsequently come along and delete it
2260 */
2261 mutex_lock(&info->extent_ins_mutex);
2262 list_for_each_entry_safe(extent_op, tmp, &insert_list, list) {
2263 clear_extent_bits(&info->extent_ins, extent_op->bytenr,
2264 extent_op->bytenr + extent_op->num_bytes - 1,
2265 EXTENT_WRITEBACK, GFP_NOFS);
2266 if (extent_op->del) {
2267 list_del_init(&extent_op->list);
2268 unlock_extent(&info->extent_ins, extent_op->bytenr,
2269 extent_op->bytenr + extent_op->num_bytes
2270 - 1, GFP_NOFS);
2271
2272 mutex_lock(&extent_root->fs_info->pinned_mutex);
2273 ret = pin_down_bytes(trans, extent_root,
2274 extent_op->bytenr,
2275 extent_op->num_bytes, 0);
2276 mutex_unlock(&extent_root->fs_info->pinned_mutex);
2277
2278 ret = update_block_group(trans, extent_root,
2279 extent_op->bytenr,
2280 extent_op->num_bytes,
2281 0, ret > 0);
2282 BUG_ON(ret);
2283 kfree(extent_op);
2284 num_inserts--;
2285 }
2286 }
2287 mutex_unlock(&info->extent_ins_mutex);
2288
2289 ret = insert_extents(trans, extent_root, path, &insert_list,
2290 num_inserts);
2291 BUG_ON(ret);
2292
2293 /*
2294 * if we broke out of the loop in order to insert stuff because we hit
2295 * the maximum number of inserts at a time we can handle, then loop
2296 * back and pick up where we left off
2297 */
2298 if (num_inserts == max_inserts) {
2299 INIT_LIST_HEAD(&insert_list);
2300 INIT_LIST_HEAD(&update_list);
2301 num_inserts = 0;
2302 goto again;
2303 }
2304
2305 /*
2306 * again, if we need to make absolutely sure there are no more pending
2307 * extent operations left and we know that we skipped some, go back to
2308 * the beginning and do it all again
2309 */
2310 if (all && skipped) {
2311 INIT_LIST_HEAD(&insert_list);
2312 INIT_LIST_HEAD(&update_list);
2313 search = 0;
2314 skipped = 0;
2315 num_inserts = 0;
2316 goto again;
2317 }
2318out:
2319 btrfs_free_path(path);
2320 return 0;
2321}
2322
2323static int pin_down_bytes(struct btrfs_trans_handle *trans,
2324 struct btrfs_root *root,
2325 u64 bytenr, u64 num_bytes, int is_data)
2326{
2327 int err = 0;
2328 struct extent_buffer *buf;
2329
2330 if (is_data)
2331 goto pinit;
2332
2333 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
2334 if (!buf)
2335 goto pinit;
2336
2337 /* we can reuse a block if it hasn't been written
2338 * and it is from this transaction. We can't
2339 * reuse anything from the tree log root because
2340 * it has tiny sub-transactions.
2341 */
2342 if (btrfs_buffer_uptodate(buf, 0) &&
2343 btrfs_try_tree_lock(buf)) {
2344 u64 header_owner = btrfs_header_owner(buf);
2345 u64 header_transid = btrfs_header_generation(buf);
2346 if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
2347 header_owner != BTRFS_TREE_RELOC_OBJECTID &&
2348 header_transid == trans->transid &&
2349 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
2350 clean_tree_block(NULL, root, buf);
2351 btrfs_tree_unlock(buf);
2352 free_extent_buffer(buf);
2353 return 1;
2354 }
2355 btrfs_tree_unlock(buf);
2356 }
2357 free_extent_buffer(buf);
2358pinit:
2359 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
2360
2361 BUG_ON(err < 0);
2362 return 0;
2363}
2364
2365/*
2366 * remove an extent from the root, returns 0 on success
2367 */
2368static int __free_extent(struct btrfs_trans_handle *trans,
2369 struct btrfs_root *root,
2370 u64 bytenr, u64 num_bytes, u64 parent,
2371 u64 root_objectid, u64 ref_generation,
2372 u64 owner_objectid, int pin, int mark_free)
2373{
2374 struct btrfs_path *path;
2375 struct btrfs_key key;
2376 struct btrfs_fs_info *info = root->fs_info;
2377 struct btrfs_root *extent_root = info->extent_root;
2378 struct extent_buffer *leaf;
2379 int ret;
2380 int extent_slot = 0;
2381 int found_extent = 0;
2382 int num_to_del = 1;
2383 struct btrfs_extent_item *ei;
2384 u32 refs;
2385
2386 key.objectid = bytenr;
2387 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
2388 key.offset = num_bytes;
2389 path = btrfs_alloc_path();
2390 if (!path)
2391 return -ENOMEM;
2392
2393 path->reada = 1;
2394 ret = lookup_extent_backref(trans, extent_root, path,
2395 bytenr, parent, root_objectid,
2396 ref_generation, owner_objectid, 1);
2397 if (ret == 0) {
2398 struct btrfs_key found_key;
2399 extent_slot = path->slots[0];
2400 while(extent_slot > 0) {
2401 extent_slot--;
2402 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2403 extent_slot);
2404 if (found_key.objectid != bytenr)
2405 break;
2406 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
2407 found_key.offset == num_bytes) {
2408 found_extent = 1;
2409 break;
2410 }
2411 if (path->slots[0] - extent_slot > 5)
2412 break;
2413 }
2414 if (!found_extent) {
2415 ret = remove_extent_backref(trans, extent_root, path);
2416 BUG_ON(ret);
2417 btrfs_release_path(extent_root, path);
2418 ret = btrfs_search_slot(trans, extent_root,
2419 &key, path, -1, 1);
2420 if (ret) {
2421 printk(KERN_ERR "umm, got %d back from search"
2422 ", was looking for %Lu\n", ret,
2423 bytenr);
2424 btrfs_print_leaf(extent_root, path->nodes[0]);
2425 }
2426 BUG_ON(ret);
2427 extent_slot = path->slots[0];
2428 }
2429 } else {
2430 btrfs_print_leaf(extent_root, path->nodes[0]);
2431 WARN_ON(1);
2432 printk("Unable to find ref byte nr %Lu root %Lu "
2433 "gen %Lu owner %Lu\n", bytenr,
2434 root_objectid, ref_generation, owner_objectid);
2435 }
2436
2437 leaf = path->nodes[0];
2438 ei = btrfs_item_ptr(leaf, extent_slot,
2439 struct btrfs_extent_item);
2440 refs = btrfs_extent_refs(leaf, ei);
2441 BUG_ON(refs == 0);
2442 refs -= 1;
2443 btrfs_set_extent_refs(leaf, ei, refs);
2444
2445 btrfs_mark_buffer_dirty(leaf);
2446
2447 if (refs == 0 && found_extent && path->slots[0] == extent_slot + 1) {
2448 struct btrfs_extent_ref *ref;
2449 ref = btrfs_item_ptr(leaf, path->slots[0],
2450 struct btrfs_extent_ref);
2451 BUG_ON(btrfs_ref_num_refs(leaf, ref) != 1);
2452 /* if the back ref and the extent are next to each other
2453 * they get deleted below in one shot
2454 */
2455 path->slots[0] = extent_slot;
2456 num_to_del = 2;
2457 } else if (found_extent) {
2458 /* otherwise delete the extent back ref */
2459 ret = remove_extent_backref(trans, extent_root, path);
2460 BUG_ON(ret);
2461 /* if refs are 0, we need to setup the path for deletion */
2462 if (refs == 0) {
2463 btrfs_release_path(extent_root, path);
2464 ret = btrfs_search_slot(trans, extent_root, &key, path,
2465 -1, 1);
2466 BUG_ON(ret);
2467 }
2468 }
2469
2470 if (refs == 0) {
2471 u64 super_used;
2472 u64 root_used;
2473#ifdef BIO_RW_DISCARD
2474 u64 map_length = num_bytes;
2475 struct btrfs_multi_bio *multi = NULL;
2476#endif
2477
2478 if (pin) {
2479 mutex_lock(&root->fs_info->pinned_mutex);
2480 ret = pin_down_bytes(trans, root, bytenr, num_bytes,
2481 owner_objectid >= BTRFS_FIRST_FREE_OBJECTID);
2482 mutex_unlock(&root->fs_info->pinned_mutex);
2483 if (ret > 0)
2484 mark_free = 1;
2485 BUG_ON(ret < 0);
2486 }
2487 /* block accounting for super block */
2488 spin_lock_irq(&info->delalloc_lock);
2489 super_used = btrfs_super_bytes_used(&info->super_copy);
2490 btrfs_set_super_bytes_used(&info->super_copy,
2491 super_used - num_bytes);
2492 spin_unlock_irq(&info->delalloc_lock);
2493
2494 /* block accounting for root item */
2495 root_used = btrfs_root_used(&root->root_item);
2496 btrfs_set_root_used(&root->root_item,
2497 root_used - num_bytes);
2498 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
2499 num_to_del);
2500 BUG_ON(ret);
2501 btrfs_release_path(extent_root, path);
2502 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
2503 mark_free);
2504 BUG_ON(ret);
2505
2506 if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
2507 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
2508 BUG_ON(ret);
2509 }
2510
2511#ifdef BIO_RW_DISCARD
2512 /* Tell the block device(s) that the sectors can be discarded */
2513 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
2514 bytenr, &map_length, &multi, 0);
2515 if (!ret) {
2516 struct btrfs_bio_stripe *stripe = multi->stripes;
2517 int i;
2518
2519 if (map_length > num_bytes)
2520 map_length = num_bytes;
2521
2522 for (i = 0; i < multi->num_stripes; i++, stripe++) {
2523 btrfs_issue_discard(stripe->dev->bdev,
2524 stripe->physical,
2525 map_length);
2526 }
2527 kfree(multi);
2528 }
2529#endif
2530 }
2531 btrfs_free_path(path);
2532 finish_current_insert(trans, extent_root, 0);
2533 return ret;
2534}
2535
2536/*
2537 * find all the blocks marked as pending in the radix tree and remove
2538 * them from the extent map
2539 */
2540static int del_pending_extents(struct btrfs_trans_handle *trans, struct
2541 btrfs_root *extent_root, int all)
2542{
2543 int ret;
2544 int err = 0;
2545 u64 start;
2546 u64 end;
2547 u64 priv;
2548 u64 search = 0;
2549 int nr = 0, skipped = 0;
2550 struct extent_io_tree *pending_del;
2551 struct extent_io_tree *extent_ins;
2552 struct pending_extent_op *extent_op;
2553 struct btrfs_fs_info *info = extent_root->fs_info;
2554 struct list_head delete_list;
2555
2556 INIT_LIST_HEAD(&delete_list);
2557 extent_ins = &extent_root->fs_info->extent_ins;
2558 pending_del = &extent_root->fs_info->pending_del;
2559
2560again:
2561 mutex_lock(&info->extent_ins_mutex);
2562 while(1) {
2563 ret = find_first_extent_bit(pending_del, search, &start, &end,
2564 EXTENT_WRITEBACK);
2565 if (ret) {
2566 if (all && skipped && !nr) {
2567 search = 0;
2568 continue;
2569 }
2570 mutex_unlock(&info->extent_ins_mutex);
2571 break;
2572 }
2573
2574 ret = try_lock_extent(extent_ins, start, end, GFP_NOFS);
2575 if (!ret) {
2576 search = end+1;
2577 skipped = 1;
2578
2579 if (need_resched()) {
2580 mutex_unlock(&info->extent_ins_mutex);
2581 cond_resched();
2582 mutex_lock(&info->extent_ins_mutex);
2583 }
2584
2585 continue;
2586 }
2587 BUG_ON(ret < 0);
2588
2589 ret = get_state_private(pending_del, start, &priv);
2590 BUG_ON(ret);
2591 extent_op = (struct pending_extent_op *)(unsigned long)priv;
2592
2593 clear_extent_bits(pending_del, start, end, EXTENT_WRITEBACK,
2594 GFP_NOFS);
2595 if (!test_range_bit(extent_ins, start, end,
2596 EXTENT_WRITEBACK, 0)) {
2597 list_add_tail(&extent_op->list, &delete_list);
2598 nr++;
2599 } else {
2600 kfree(extent_op);
2601
2602 ret = get_state_private(&info->extent_ins, start,
2603 &priv);
2604 BUG_ON(ret);
2605 extent_op = (struct pending_extent_op *)
2606 (unsigned long)priv;
2607
2608 clear_extent_bits(&info->extent_ins, start, end,
2609 EXTENT_WRITEBACK, GFP_NOFS);
2610
2611 if (extent_op->type == PENDING_BACKREF_UPDATE) {
2612 list_add_tail(&extent_op->list, &delete_list);
2613 search = end + 1;
2614 nr++;
2615 continue;
2616 }
2617
2618 mutex_lock(&extent_root->fs_info->pinned_mutex);
2619 ret = pin_down_bytes(trans, extent_root, start,
2620 end + 1 - start, 0);
2621 mutex_unlock(&extent_root->fs_info->pinned_mutex);
2622
2623 ret = update_block_group(trans, extent_root, start,
2624 end + 1 - start, 0, ret > 0);
2625
2626 unlock_extent(extent_ins, start, end, GFP_NOFS);
2627 BUG_ON(ret);
2628 kfree(extent_op);
2629 }
2630 if (ret)
2631 err = ret;
2632
2633 search = end + 1;
2634
2635 if (need_resched()) {
2636 mutex_unlock(&info->extent_ins_mutex);
2637 cond_resched();
2638 mutex_lock(&info->extent_ins_mutex);
2639 }
2640 }
2641
2642 if (nr) {
2643 ret = free_extents(trans, extent_root, &delete_list);
2644 BUG_ON(ret);
2645 }
2646
2647 if (all && skipped) {
2648 INIT_LIST_HEAD(&delete_list);
2649 search = 0;
2650 nr = 0;
2651 goto again;
2652 }
2653
2654 return err;
2655}
2656
2657/*
2658 * remove an extent from the root, returns 0 on success
2659 */
2660static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
2661 struct btrfs_root *root,
2662 u64 bytenr, u64 num_bytes, u64 parent,
2663 u64 root_objectid, u64 ref_generation,
2664 u64 owner_objectid, int pin)
2665{
2666 struct btrfs_root *extent_root = root->fs_info->extent_root;
2667 int pending_ret;
2668 int ret;
2669
2670 WARN_ON(num_bytes < root->sectorsize);
2671 if (root == extent_root) {
2672 struct pending_extent_op *extent_op = NULL;
2673
2674 mutex_lock(&root->fs_info->extent_ins_mutex);
2675 if (test_range_bit(&root->fs_info->extent_ins, bytenr,
2676 bytenr + num_bytes - 1, EXTENT_WRITEBACK, 0)) {
2677 u64 priv;
2678 ret = get_state_private(&root->fs_info->extent_ins,
2679 bytenr, &priv);
2680 BUG_ON(ret);
2681 extent_op = (struct pending_extent_op *)
2682 (unsigned long)priv;
2683
2684 extent_op->del = 1;
2685 if (extent_op->type == PENDING_EXTENT_INSERT) {
2686 mutex_unlock(&root->fs_info->extent_ins_mutex);
2687 return 0;
2688 }
2689 }
2690
2691 if (extent_op) {
2692 ref_generation = extent_op->orig_generation;
2693 parent = extent_op->orig_parent;
2694 }
2695
2696 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2697 BUG_ON(!extent_op);
2698
2699 extent_op->type = PENDING_EXTENT_DELETE;
2700 extent_op->bytenr = bytenr;
2701 extent_op->num_bytes = num_bytes;
2702 extent_op->parent = parent;
2703 extent_op->orig_parent = parent;
2704 extent_op->generation = ref_generation;
2705 extent_op->orig_generation = ref_generation;
2706 extent_op->level = (int)owner_objectid;
2707 INIT_LIST_HEAD(&extent_op->list);
2708 extent_op->del = 0;
2709
2710 set_extent_bits(&root->fs_info->pending_del,
2711 bytenr, bytenr + num_bytes - 1,
2712 EXTENT_WRITEBACK, GFP_NOFS);
2713 set_state_private(&root->fs_info->pending_del,
2714 bytenr, (unsigned long)extent_op);
2715 mutex_unlock(&root->fs_info->extent_ins_mutex);
2716 return 0;
2717 }
2718 /* if metadata always pin */
2719 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
2720 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
2721 struct btrfs_block_group_cache *cache;
2722
2723 /* btrfs_free_reserved_extent */
2724 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
2725 BUG_ON(!cache);
2726 btrfs_add_free_space(cache, bytenr, num_bytes);
2727 update_reserved_extents(root, bytenr, num_bytes, 0);
2728 return 0;
2729 }
2730 pin = 1;
2731 }
2732
2733 /* if data pin when any transaction has committed this */
2734 if (ref_generation != trans->transid)
2735 pin = 1;
2736
2737 ret = __free_extent(trans, root, bytenr, num_bytes, parent,
2738 root_objectid, ref_generation,
2739 owner_objectid, pin, pin == 0);
2740
2741 finish_current_insert(trans, root->fs_info->extent_root, 0);
2742 pending_ret = del_pending_extents(trans, root->fs_info->extent_root, 0);
2743 return ret ? ret : pending_ret;
2744}
2745
2746int btrfs_free_extent(struct btrfs_trans_handle *trans,
2747 struct btrfs_root *root,
2748 u64 bytenr, u64 num_bytes, u64 parent,
2749 u64 root_objectid, u64 ref_generation,
2750 u64 owner_objectid, int pin)
2751{
2752 int ret;
2753
2754 ret = __btrfs_free_extent(trans, root, bytenr, num_bytes, parent,
2755 root_objectid, ref_generation,
2756 owner_objectid, pin);
2757 return ret;
2758}
2759
2760static u64 stripe_align(struct btrfs_root *root, u64 val)
2761{
2762 u64 mask = ((u64)root->stripesize - 1);
2763 u64 ret = (val + mask) & ~mask;
2764 return ret;
2765}
2766
2767/*
2768 * walks the btree of allocated extents and find a hole of a given size.
2769 * The key ins is changed to record the hole:
2770 * ins->objectid == block start
2771 * ins->flags = BTRFS_EXTENT_ITEM_KEY
2772 * ins->offset == number of blocks
2773 * Any available blocks before search_start are skipped.
2774 */
2775static int noinline find_free_extent(struct btrfs_trans_handle *trans,
2776 struct btrfs_root *orig_root,
2777 u64 num_bytes, u64 empty_size,
2778 u64 search_start, u64 search_end,
2779 u64 hint_byte, struct btrfs_key *ins,
2780 u64 exclude_start, u64 exclude_nr,
2781 int data)
2782{
2783 int ret = 0;
2784 struct btrfs_root * root = orig_root->fs_info->extent_root;
2785 u64 total_needed = num_bytes;
2786 u64 *last_ptr = NULL;
2787 u64 last_wanted = 0;
2788 struct btrfs_block_group_cache *block_group = NULL;
2789 int chunk_alloc_done = 0;
2790 int empty_cluster = 2 * 1024 * 1024;
2791 int allowed_chunk_alloc = 0;
2792 struct list_head *head = NULL, *cur = NULL;
2793 int loop = 0;
2794 int extra_loop = 0;
2795 struct btrfs_space_info *space_info;
2796
2797 WARN_ON(num_bytes < root->sectorsize);
2798 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
2799 ins->objectid = 0;
2800 ins->offset = 0;
2801
2802 if (orig_root->ref_cows || empty_size)
2803 allowed_chunk_alloc = 1;
2804
2805 if (data & BTRFS_BLOCK_GROUP_METADATA) {
2806 last_ptr = &root->fs_info->last_alloc;
2807 empty_cluster = 64 * 1024;
2808 }
2809
2810 if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD))
2811 last_ptr = &root->fs_info->last_data_alloc;
2812
2813 if (last_ptr) {
2814 if (*last_ptr) {
2815 hint_byte = *last_ptr;
2816 last_wanted = *last_ptr;
2817 } else
2818 empty_size += empty_cluster;
2819 } else {
2820 empty_cluster = 0;
2821 }
2822 search_start = max(search_start, first_logical_byte(root, 0));
2823 search_start = max(search_start, hint_byte);
2824
2825 if (last_wanted && search_start != last_wanted) {
2826 last_wanted = 0;
2827 empty_size += empty_cluster;
2828 }
2829
2830 total_needed += empty_size;
2831 block_group = btrfs_lookup_block_group(root->fs_info, search_start);
2832 if (!block_group)
2833 block_group = btrfs_lookup_first_block_group(root->fs_info,
2834 search_start);
2835 space_info = __find_space_info(root->fs_info, data);
2836
2837 down_read(&space_info->groups_sem);
2838 while (1) {
2839 struct btrfs_free_space *free_space;
2840 /*
2841 * the only way this happens if our hint points to a block
2842 * group thats not of the proper type, while looping this
2843 * should never happen
2844 */
2845 if (empty_size)
2846 extra_loop = 1;
2847
2848 if (!block_group)
2849 goto new_group_no_lock;
2850
2851 if (unlikely(!block_group->cached)) {
2852 mutex_lock(&block_group->cache_mutex);
2853 ret = cache_block_group(root, block_group);
2854 mutex_unlock(&block_group->cache_mutex);
2855 if (ret)
2856 break;
2857 }
2858
2859 mutex_lock(&block_group->alloc_mutex);
2860 if (unlikely(!block_group_bits(block_group, data)))
2861 goto new_group;
2862
2863 if (unlikely(block_group->ro))
2864 goto new_group;
2865
2866 free_space = btrfs_find_free_space(block_group, search_start,
2867 total_needed);
2868 if (free_space) {
2869 u64 start = block_group->key.objectid;
2870 u64 end = block_group->key.objectid +
2871 block_group->key.offset;
2872
2873 search_start = stripe_align(root, free_space->offset);
2874
2875 /* move on to the next group */
2876 if (search_start + num_bytes >= search_end)
2877 goto new_group;
2878
2879 /* move on to the next group */
2880 if (search_start + num_bytes > end)
2881 goto new_group;
2882
2883 if (last_wanted && search_start != last_wanted) {
2884 total_needed += empty_cluster;
2885 empty_size += empty_cluster;
2886 last_wanted = 0;
2887 /*
2888 * if search_start is still in this block group
2889 * then we just re-search this block group
2890 */
2891 if (search_start >= start &&
2892 search_start < end) {
2893 mutex_unlock(&block_group->alloc_mutex);
2894 continue;
2895 }
2896
2897 /* else we go to the next block group */
2898 goto new_group;
2899 }
2900
2901 if (exclude_nr > 0 &&
2902 (search_start + num_bytes > exclude_start &&
2903 search_start < exclude_start + exclude_nr)) {
2904 search_start = exclude_start + exclude_nr;
2905 /*
2906 * if search_start is still in this block group
2907 * then we just re-search this block group
2908 */
2909 if (search_start >= start &&
2910 search_start < end) {
2911 mutex_unlock(&block_group->alloc_mutex);
2912 last_wanted = 0;
2913 continue;
2914 }
2915
2916 /* else we go to the next block group */
2917 goto new_group;
2918 }
2919
2920 ins->objectid = search_start;
2921 ins->offset = num_bytes;
2922
2923 btrfs_remove_free_space_lock(block_group, search_start,
2924 num_bytes);
2925 /* we are all good, lets return */
2926 mutex_unlock(&block_group->alloc_mutex);
2927 break;
2928 }
2929new_group:
2930 mutex_unlock(&block_group->alloc_mutex);
2931new_group_no_lock:
2932 /* don't try to compare new allocations against the
2933 * last allocation any more
2934 */
2935 last_wanted = 0;
2936
2937 /*
2938 * Here's how this works.
2939 * loop == 0: we were searching a block group via a hint
2940 * and didn't find anything, so we start at
2941 * the head of the block groups and keep searching
2942 * loop == 1: we're searching through all of the block groups
2943 * if we hit the head again we have searched
2944 * all of the block groups for this space and we
2945 * need to try and allocate, if we cant error out.
2946 * loop == 2: we allocated more space and are looping through
2947 * all of the block groups again.
2948 */
2949 if (loop == 0) {
2950 head = &space_info->block_groups;
2951 cur = head->next;
2952 loop++;
2953 } else if (loop == 1 && cur == head) {
2954 int keep_going;
2955
2956 /* at this point we give up on the empty_size
2957 * allocations and just try to allocate the min
2958 * space.
2959 *
2960 * The extra_loop field was set if an empty_size
2961 * allocation was attempted above, and if this
2962 * is try we need to try the loop again without
2963 * the additional empty_size.
2964 */
2965 total_needed -= empty_size;
2966 empty_size = 0;
2967 keep_going = extra_loop;
2968 loop++;
2969
2970 if (allowed_chunk_alloc && !chunk_alloc_done) {
2971 up_read(&space_info->groups_sem);
2972 ret = do_chunk_alloc(trans, root, num_bytes +
2973 2 * 1024 * 1024, data, 1);
2974 down_read(&space_info->groups_sem);
2975 if (ret < 0)
2976 goto loop_check;
2977 head = &space_info->block_groups;
2978 /*
2979 * we've allocated a new chunk, keep
2980 * trying
2981 */
2982 keep_going = 1;
2983 chunk_alloc_done = 1;
2984 } else if (!allowed_chunk_alloc) {
2985 space_info->force_alloc = 1;
2986 }
2987loop_check:
2988 if (keep_going) {
2989 cur = head->next;
2990 extra_loop = 0;
2991 } else {
2992 break;
2993 }
2994 } else if (cur == head) {
2995 break;
2996 }
2997
2998 block_group = list_entry(cur, struct btrfs_block_group_cache,
2999 list);
3000 search_start = block_group->key.objectid;
3001 cur = cur->next;
3002 }
3003
3004 /* we found what we needed */
3005 if (ins->objectid) {
3006 if (!(data & BTRFS_BLOCK_GROUP_DATA))
3007 trans->block_group = block_group;
3008
3009 if (last_ptr)
3010 *last_ptr = ins->objectid + ins->offset;
3011 ret = 0;
3012 } else if (!ret) {
3013 printk(KERN_ERR "we were searching for %Lu bytes, num_bytes %Lu,"
3014 " loop %d, allowed_alloc %d\n", total_needed, num_bytes,
3015 loop, allowed_chunk_alloc);
3016 ret = -ENOSPC;
3017 }
3018
3019 up_read(&space_info->groups_sem);
3020 return ret;
3021}
3022
3023static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
3024{
3025 struct btrfs_block_group_cache *cache;
3026 struct list_head *l;
3027
3028 printk(KERN_INFO "space_info has %Lu free, is %sfull\n",
3029 info->total_bytes - info->bytes_used - info->bytes_pinned -
3030 info->bytes_reserved, (info->full) ? "" : "not ");
3031
3032 down_read(&info->groups_sem);
3033 list_for_each(l, &info->block_groups) {
3034 cache = list_entry(l, struct btrfs_block_group_cache, list);
3035 spin_lock(&cache->lock);
3036 printk(KERN_INFO "block group %Lu has %Lu bytes, %Lu used "
3037 "%Lu pinned %Lu reserved\n",
3038 cache->key.objectid, cache->key.offset,
3039 btrfs_block_group_used(&cache->item),
3040 cache->pinned, cache->reserved);
3041 btrfs_dump_free_space(cache, bytes);
3042 spin_unlock(&cache->lock);
3043 }
3044 up_read(&info->groups_sem);
3045}
3046
3047static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
3048 struct btrfs_root *root,
3049 u64 num_bytes, u64 min_alloc_size,
3050 u64 empty_size, u64 hint_byte,
3051 u64 search_end, struct btrfs_key *ins,
3052 u64 data)
3053{
3054 int ret;
3055 u64 search_start = 0;
3056 u64 alloc_profile;
3057 struct btrfs_fs_info *info = root->fs_info;
3058
3059 if (data) {
3060 alloc_profile = info->avail_data_alloc_bits &
3061 info->data_alloc_profile;
3062 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
3063 } else if (root == root->fs_info->chunk_root) {
3064 alloc_profile = info->avail_system_alloc_bits &
3065 info->system_alloc_profile;
3066 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
3067 } else {
3068 alloc_profile = info->avail_metadata_alloc_bits &
3069 info->metadata_alloc_profile;
3070 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
3071 }
3072again:
3073 data = btrfs_reduce_alloc_profile(root, data);
3074 /*
3075 * the only place that sets empty_size is btrfs_realloc_node, which
3076 * is not called recursively on allocations
3077 */
3078 if (empty_size || root->ref_cows) {
3079 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
3080 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3081 2 * 1024 * 1024,
3082 BTRFS_BLOCK_GROUP_METADATA |
3083 (info->metadata_alloc_profile &
3084 info->avail_metadata_alloc_bits), 0);
3085 }
3086 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3087 num_bytes + 2 * 1024 * 1024, data, 0);
3088 }
3089
3090 WARN_ON(num_bytes < root->sectorsize);
3091 ret = find_free_extent(trans, root, num_bytes, empty_size,
3092 search_start, search_end, hint_byte, ins,
3093 trans->alloc_exclude_start,
3094 trans->alloc_exclude_nr, data);
3095
3096 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
3097 num_bytes = num_bytes >> 1;
3098 num_bytes = num_bytes & ~(root->sectorsize - 1);
3099 num_bytes = max(num_bytes, min_alloc_size);
3100 do_chunk_alloc(trans, root->fs_info->extent_root,
3101 num_bytes, data, 1);
3102 goto again;
3103 }
3104 if (ret) {
3105 struct btrfs_space_info *sinfo;
3106
3107 sinfo = __find_space_info(root->fs_info, data);
3108 printk("allocation failed flags %Lu, wanted %Lu\n",
3109 data, num_bytes);
3110 dump_space_info(sinfo, num_bytes);
3111 BUG();
3112 }
3113
3114 return ret;
3115}
3116
3117int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
3118{
3119 struct btrfs_block_group_cache *cache;
3120
3121 cache = btrfs_lookup_block_group(root->fs_info, start);
3122 if (!cache) {
3123 printk(KERN_ERR "Unable to find block group for %Lu\n", start);
3124 return -ENOSPC;
3125 }
3126 btrfs_add_free_space(cache, start, len);
3127 update_reserved_extents(root, start, len, 0);
3128 return 0;
3129}
3130
3131int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
3132 struct btrfs_root *root,
3133 u64 num_bytes, u64 min_alloc_size,
3134 u64 empty_size, u64 hint_byte,
3135 u64 search_end, struct btrfs_key *ins,
3136 u64 data)
3137{
3138 int ret;
3139 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
3140 empty_size, hint_byte, search_end, ins,
3141 data);
3142 update_reserved_extents(root, ins->objectid, ins->offset, 1);
3143 return ret;
3144}
3145
3146static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
3147 struct btrfs_root *root, u64 parent,
3148 u64 root_objectid, u64 ref_generation,
3149 u64 owner, struct btrfs_key *ins)
3150{
3151 int ret;
3152 int pending_ret;
3153 u64 super_used;
3154 u64 root_used;
3155 u64 num_bytes = ins->offset;
3156 u32 sizes[2];
3157 struct btrfs_fs_info *info = root->fs_info;
3158 struct btrfs_root *extent_root = info->extent_root;
3159 struct btrfs_extent_item *extent_item;
3160 struct btrfs_extent_ref *ref;
3161 struct btrfs_path *path;
3162 struct btrfs_key keys[2];
3163
3164 if (parent == 0)
3165 parent = ins->objectid;
3166
3167 /* block accounting for super block */
3168 spin_lock_irq(&info->delalloc_lock);
3169 super_used = btrfs_super_bytes_used(&info->super_copy);
3170 btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
3171 spin_unlock_irq(&info->delalloc_lock);
3172
3173 /* block accounting for root item */
3174 root_used = btrfs_root_used(&root->root_item);
3175 btrfs_set_root_used(&root->root_item, root_used + num_bytes);
3176
3177 if (root == extent_root) {
3178 struct pending_extent_op *extent_op;
3179
3180 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
3181 BUG_ON(!extent_op);
3182
3183 extent_op->type = PENDING_EXTENT_INSERT;
3184 extent_op->bytenr = ins->objectid;
3185 extent_op->num_bytes = ins->offset;
3186 extent_op->parent = parent;
3187 extent_op->orig_parent = 0;
3188 extent_op->generation = ref_generation;
3189 extent_op->orig_generation = 0;
3190 extent_op->level = (int)owner;
3191 INIT_LIST_HEAD(&extent_op->list);
3192 extent_op->del = 0;
3193
3194 mutex_lock(&root->fs_info->extent_ins_mutex);
3195 set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
3196 ins->objectid + ins->offset - 1,
3197 EXTENT_WRITEBACK, GFP_NOFS);
3198 set_state_private(&root->fs_info->extent_ins,
3199 ins->objectid, (unsigned long)extent_op);
3200 mutex_unlock(&root->fs_info->extent_ins_mutex);
3201 goto update_block;
3202 }
3203
3204 memcpy(&keys[0], ins, sizeof(*ins));
3205 keys[1].objectid = ins->objectid;
3206 keys[1].type = BTRFS_EXTENT_REF_KEY;
3207 keys[1].offset = parent;
3208 sizes[0] = sizeof(*extent_item);
3209 sizes[1] = sizeof(*ref);
3210
3211 path = btrfs_alloc_path();
3212 BUG_ON(!path);
3213
3214 ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
3215 sizes, 2);
3216 BUG_ON(ret);
3217
3218 extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3219 struct btrfs_extent_item);
3220 btrfs_set_extent_refs(path->nodes[0], extent_item, 1);
3221 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
3222 struct btrfs_extent_ref);
3223
3224 btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
3225 btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
3226 btrfs_set_ref_objectid(path->nodes[0], ref, owner);
3227 btrfs_set_ref_num_refs(path->nodes[0], ref, 1);
3228
3229 btrfs_mark_buffer_dirty(path->nodes[0]);
3230
3231 trans->alloc_exclude_start = 0;
3232 trans->alloc_exclude_nr = 0;
3233 btrfs_free_path(path);
3234 finish_current_insert(trans, extent_root, 0);
3235 pending_ret = del_pending_extents(trans, extent_root, 0);
3236
3237 if (ret)
3238 goto out;
3239 if (pending_ret) {
3240 ret = pending_ret;
3241 goto out;
3242 }
3243
3244update_block:
3245 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0);
3246 if (ret) {
3247 printk("update block group failed for %Lu %Lu\n",
3248 ins->objectid, ins->offset);
3249 BUG();
3250 }
3251out:
3252 return ret;
3253}
3254
3255int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
3256 struct btrfs_root *root, u64 parent,
3257 u64 root_objectid, u64 ref_generation,
3258 u64 owner, struct btrfs_key *ins)
3259{
3260 int ret;
3261
3262 if (root_objectid == BTRFS_TREE_LOG_OBJECTID)
3263 return 0;
3264 ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
3265 ref_generation, owner, ins);
3266 update_reserved_extents(root, ins->objectid, ins->offset, 0);
3267 return ret;
3268}
3269
3270/*
3271 * this is used by the tree logging recovery code. It records that
3272 * an extent has been allocated and makes sure to clear the free
3273 * space cache bits as well
3274 */
3275int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
3276 struct btrfs_root *root, u64 parent,
3277 u64 root_objectid, u64 ref_generation,
3278 u64 owner, struct btrfs_key *ins)
3279{
3280 int ret;
3281 struct btrfs_block_group_cache *block_group;
3282
3283 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
3284 mutex_lock(&block_group->cache_mutex);
3285 cache_block_group(root, block_group);
3286 mutex_unlock(&block_group->cache_mutex);
3287
3288 ret = btrfs_remove_free_space(block_group, ins->objectid,
3289 ins->offset);
3290 BUG_ON(ret);
3291 ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
3292 ref_generation, owner, ins);
3293 return ret;
3294}
3295
3296/*
3297 * finds a free extent and does all the dirty work required for allocation
3298 * returns the key for the extent through ins, and a tree buffer for
3299 * the first block of the extent through buf.
3300 *
3301 * returns 0 if everything worked, non-zero otherwise.
3302 */
3303int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
3304 struct btrfs_root *root,
3305 u64 num_bytes, u64 parent, u64 min_alloc_size,
3306 u64 root_objectid, u64 ref_generation,
3307 u64 owner_objectid, u64 empty_size, u64 hint_byte,
3308 u64 search_end, struct btrfs_key *ins, u64 data)
3309{
3310 int ret;
3311
3312 ret = __btrfs_reserve_extent(trans, root, num_bytes,
3313 min_alloc_size, empty_size, hint_byte,
3314 search_end, ins, data);
3315 BUG_ON(ret);
3316 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
3317 ret = __btrfs_alloc_reserved_extent(trans, root, parent,
3318 root_objectid, ref_generation,
3319 owner_objectid, ins);
3320 BUG_ON(ret);
3321
3322 } else {
3323 update_reserved_extents(root, ins->objectid, ins->offset, 1);
3324 }
3325 return ret;
3326}
3327
3328struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
3329 struct btrfs_root *root,
3330 u64 bytenr, u32 blocksize)
3331{
3332 struct extent_buffer *buf;
3333
3334 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
3335 if (!buf)
3336 return ERR_PTR(-ENOMEM);
3337 btrfs_set_header_generation(buf, trans->transid);
3338 btrfs_tree_lock(buf);
3339 clean_tree_block(trans, root, buf);
3340 btrfs_set_buffer_uptodate(buf);
3341 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
3342 set_extent_dirty(&root->dirty_log_pages, buf->start,
3343 buf->start + buf->len - 1, GFP_NOFS);
3344 } else {
3345 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
3346 buf->start + buf->len - 1, GFP_NOFS);
3347 }
3348 trans->blocks_used++;
3349 return buf;
3350}
3351
3352/*
3353 * helper function to allocate a block for a given tree
3354 * returns the tree buffer or NULL.
3355 */
3356struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
3357 struct btrfs_root *root,
3358 u32 blocksize, u64 parent,
3359 u64 root_objectid,
3360 u64 ref_generation,
3361 int level,
3362 u64 hint,
3363 u64 empty_size)
3364{
3365 struct btrfs_key ins;
3366 int ret;
3367 struct extent_buffer *buf;
3368
3369 ret = btrfs_alloc_extent(trans, root, blocksize, parent, blocksize,
3370 root_objectid, ref_generation, level,
3371 empty_size, hint, (u64)-1, &ins, 0);
3372 if (ret) {
3373 BUG_ON(ret > 0);
3374 return ERR_PTR(ret);
3375 }
3376
3377 buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize);
3378 return buf;
3379}
3380
3381int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
3382 struct btrfs_root *root, struct extent_buffer *leaf)
3383{
3384 u64 leaf_owner;
3385 u64 leaf_generation;
3386 struct btrfs_key key;
3387 struct btrfs_file_extent_item *fi;
3388 int i;
3389 int nritems;
3390 int ret;
3391
3392 BUG_ON(!btrfs_is_leaf(leaf));
3393 nritems = btrfs_header_nritems(leaf);
3394 leaf_owner = btrfs_header_owner(leaf);
3395 leaf_generation = btrfs_header_generation(leaf);
3396
3397 for (i = 0; i < nritems; i++) {
3398 u64 disk_bytenr;
3399 cond_resched();
3400
3401 btrfs_item_key_to_cpu(leaf, &key, i);
3402 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3403 continue;
3404 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3405 if (btrfs_file_extent_type(leaf, fi) ==
3406 BTRFS_FILE_EXTENT_INLINE)
3407 continue;
3408 /*
3409 * FIXME make sure to insert a trans record that
3410 * repeats the snapshot del on crash
3411 */
3412 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
3413 if (disk_bytenr == 0)
3414 continue;
3415
3416 ret = __btrfs_free_extent(trans, root, disk_bytenr,
3417 btrfs_file_extent_disk_num_bytes(leaf, fi),
3418 leaf->start, leaf_owner, leaf_generation,
3419 key.objectid, 0);
3420 BUG_ON(ret);
3421
3422 atomic_inc(&root->fs_info->throttle_gen);
3423 wake_up(&root->fs_info->transaction_throttle);
3424 cond_resched();
3425 }
3426 return 0;
3427}
3428
3429static int noinline cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
3430 struct btrfs_root *root,
3431 struct btrfs_leaf_ref *ref)
3432{
3433 int i;
3434 int ret;
3435 struct btrfs_extent_info *info = ref->extents;
3436
3437 for (i = 0; i < ref->nritems; i++) {
3438 ret = __btrfs_free_extent(trans, root, info->bytenr,
3439 info->num_bytes, ref->bytenr,
3440 ref->owner, ref->generation,
3441 info->objectid, 0);
3442
3443 atomic_inc(&root->fs_info->throttle_gen);
3444 wake_up(&root->fs_info->transaction_throttle);
3445 cond_resched();
3446
3447 BUG_ON(ret);
3448 info++;
3449 }
3450
3451 return 0;
3452}
3453
3454static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
3455 u32 *refs)
3456{
3457 int ret;
3458
3459 ret = btrfs_lookup_extent_ref(NULL, root, start, len, refs);
3460 BUG_ON(ret);
3461
3462#if 0 // some debugging code in case we see problems here
3463 /* if the refs count is one, it won't get increased again. But
3464 * if the ref count is > 1, someone may be decreasing it at
3465 * the same time we are.
3466 */
3467 if (*refs != 1) {
3468 struct extent_buffer *eb = NULL;
3469 eb = btrfs_find_create_tree_block(root, start, len);
3470 if (eb)
3471 btrfs_tree_lock(eb);
3472
3473 mutex_lock(&root->fs_info->alloc_mutex);
3474 ret = lookup_extent_ref(NULL, root, start, len, refs);
3475 BUG_ON(ret);
3476 mutex_unlock(&root->fs_info->alloc_mutex);
3477
3478 if (eb) {
3479 btrfs_tree_unlock(eb);
3480 free_extent_buffer(eb);
3481 }
3482 if (*refs == 1) {
3483 printk("block %llu went down to one during drop_snap\n",
3484 (unsigned long long)start);
3485 }
3486
3487 }
3488#endif
3489
3490 cond_resched();
3491 return ret;
3492}
3493
3494/*
3495 * helper function for drop_snapshot, this walks down the tree dropping ref
3496 * counts as it goes.
3497 */
3498static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
3499 struct btrfs_root *root,
3500 struct btrfs_path *path, int *level)
3501{
3502 u64 root_owner;
3503 u64 root_gen;
3504 u64 bytenr;
3505 u64 ptr_gen;
3506 struct extent_buffer *next;
3507 struct extent_buffer *cur;
3508 struct extent_buffer *parent;
3509 struct btrfs_leaf_ref *ref;
3510 u32 blocksize;
3511 int ret;
3512 u32 refs;
3513
3514 WARN_ON(*level < 0);
3515 WARN_ON(*level >= BTRFS_MAX_LEVEL);
3516 ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
3517 path->nodes[*level]->len, &refs);
3518 BUG_ON(ret);
3519 if (refs > 1)
3520 goto out;
3521
3522 /*
3523 * walk down to the last node level and free all the leaves
3524 */
3525 while(*level >= 0) {
3526 WARN_ON(*level < 0);
3527 WARN_ON(*level >= BTRFS_MAX_LEVEL);
3528 cur = path->nodes[*level];
3529
3530 if (btrfs_header_level(cur) != *level)
3531 WARN_ON(1);
3532
3533 if (path->slots[*level] >=
3534 btrfs_header_nritems(cur))
3535 break;
3536 if (*level == 0) {
3537 ret = btrfs_drop_leaf_ref(trans, root, cur);
3538 BUG_ON(ret);
3539 break;
3540 }
3541 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
3542 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
3543 blocksize = btrfs_level_size(root, *level - 1);
3544
3545 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
3546 BUG_ON(ret);
3547 if (refs != 1) {
3548 parent = path->nodes[*level];
3549 root_owner = btrfs_header_owner(parent);
3550 root_gen = btrfs_header_generation(parent);
3551 path->slots[*level]++;
3552
3553 ret = __btrfs_free_extent(trans, root, bytenr,
3554 blocksize, parent->start,
3555 root_owner, root_gen,
3556 *level - 1, 1);
3557 BUG_ON(ret);
3558
3559 atomic_inc(&root->fs_info->throttle_gen);
3560 wake_up(&root->fs_info->transaction_throttle);
3561 cond_resched();
3562
3563 continue;
3564 }
3565 /*
3566 * at this point, we have a single ref, and since the
3567 * only place referencing this extent is a dead root
3568 * the reference count should never go higher.
3569 * So, we don't need to check it again
3570 */
3571 if (*level == 1) {
3572 ref = btrfs_lookup_leaf_ref(root, bytenr);
3573 if (ref && ref->generation != ptr_gen) {
3574 btrfs_free_leaf_ref(root, ref);
3575 ref = NULL;
3576 }
3577 if (ref) {
3578 ret = cache_drop_leaf_ref(trans, root, ref);
3579 BUG_ON(ret);
3580 btrfs_remove_leaf_ref(root, ref);
3581 btrfs_free_leaf_ref(root, ref);
3582 *level = 0;
3583 break;
3584 }
3585 if (printk_ratelimit()) {
3586 printk("leaf ref miss for bytenr %llu\n",
3587 (unsigned long long)bytenr);
3588 }
3589 }
3590 next = btrfs_find_tree_block(root, bytenr, blocksize);
3591 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
3592 free_extent_buffer(next);
3593
3594 next = read_tree_block(root, bytenr, blocksize,
3595 ptr_gen);
3596 cond_resched();
3597#if 0
3598 /*
3599 * this is a debugging check and can go away
3600 * the ref should never go all the way down to 1
3601 * at this point
3602 */
3603 ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
3604 &refs);
3605 BUG_ON(ret);
3606 WARN_ON(refs != 1);
3607#endif
3608 }
3609 WARN_ON(*level <= 0);
3610 if (path->nodes[*level-1])
3611 free_extent_buffer(path->nodes[*level-1]);
3612 path->nodes[*level-1] = next;
3613 *level = btrfs_header_level(next);
3614 path->slots[*level] = 0;
3615 cond_resched();
3616 }
3617out:
3618 WARN_ON(*level < 0);
3619 WARN_ON(*level >= BTRFS_MAX_LEVEL);
3620
3621 if (path->nodes[*level] == root->node) {
3622 parent = path->nodes[*level];
3623 bytenr = path->nodes[*level]->start;
3624 } else {
3625 parent = path->nodes[*level + 1];
3626 bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
3627 }
3628
3629 blocksize = btrfs_level_size(root, *level);
3630 root_owner = btrfs_header_owner(parent);
3631 root_gen = btrfs_header_generation(parent);
3632
3633 ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
3634 parent->start, root_owner, root_gen,
3635 *level, 1);
3636 free_extent_buffer(path->nodes[*level]);
3637 path->nodes[*level] = NULL;
3638 *level += 1;
3639 BUG_ON(ret);
3640
3641 cond_resched();
3642 return 0;
3643}
3644
3645/*
3646 * helper function for drop_subtree, this function is similar to
3647 * walk_down_tree. The main difference is that it checks reference
3648 * counts while tree blocks are locked.
3649 */
3650static int noinline walk_down_subtree(struct btrfs_trans_handle *trans,
3651 struct btrfs_root *root,
3652 struct btrfs_path *path, int *level)
3653{
3654 struct extent_buffer *next;
3655 struct extent_buffer *cur;
3656 struct extent_buffer *parent;
3657 u64 bytenr;
3658 u64 ptr_gen;
3659 u32 blocksize;
3660 u32 refs;
3661 int ret;
3662
3663 cur = path->nodes[*level];
3664 ret = btrfs_lookup_extent_ref(trans, root, cur->start, cur->len,
3665 &refs);
3666 BUG_ON(ret);
3667 if (refs > 1)
3668 goto out;
3669
3670 while (*level >= 0) {
3671 cur = path->nodes[*level];
3672 if (*level == 0) {
3673 ret = btrfs_drop_leaf_ref(trans, root, cur);
3674 BUG_ON(ret);
3675 clean_tree_block(trans, root, cur);
3676 break;
3677 }
3678 if (path->slots[*level] >= btrfs_header_nritems(cur)) {
3679 clean_tree_block(trans, root, cur);
3680 break;
3681 }
3682
3683 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
3684 blocksize = btrfs_level_size(root, *level - 1);
3685 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
3686
3687 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
3688 btrfs_tree_lock(next);
3689
3690 ret = btrfs_lookup_extent_ref(trans, root, bytenr, blocksize,
3691 &refs);
3692 BUG_ON(ret);
3693 if (refs > 1) {
3694 parent = path->nodes[*level];
3695 ret = btrfs_free_extent(trans, root, bytenr,
3696 blocksize, parent->start,
3697 btrfs_header_owner(parent),
3698 btrfs_header_generation(parent),
3699 *level - 1, 1);
3700 BUG_ON(ret);
3701 path->slots[*level]++;
3702 btrfs_tree_unlock(next);
3703 free_extent_buffer(next);
3704 continue;
3705 }
3706
3707 *level = btrfs_header_level(next);
3708 path->nodes[*level] = next;
3709 path->slots[*level] = 0;
3710 path->locks[*level] = 1;
3711 cond_resched();
3712 }
3713out:
3714 parent = path->nodes[*level + 1];
3715 bytenr = path->nodes[*level]->start;
3716 blocksize = path->nodes[*level]->len;
3717
3718 ret = btrfs_free_extent(trans, root, bytenr, blocksize,
3719 parent->start, btrfs_header_owner(parent),
3720 btrfs_header_generation(parent), *level, 1);
3721 BUG_ON(ret);
3722
3723 if (path->locks[*level]) {
3724 btrfs_tree_unlock(path->nodes[*level]);
3725 path->locks[*level] = 0;
3726 }
3727 free_extent_buffer(path->nodes[*level]);
3728 path->nodes[*level] = NULL;
3729 *level += 1;
3730 cond_resched();
3731 return 0;
3732}
3733
3734/*
3735 * helper for dropping snapshots. This walks back up the tree in the path
3736 * to find the first node higher up where we haven't yet gone through
3737 * all the slots
3738 */
3739static int noinline walk_up_tree(struct btrfs_trans_handle *trans,
3740 struct btrfs_root *root,
3741 struct btrfs_path *path,
3742 int *level, int max_level)
3743{
3744 u64 root_owner;
3745 u64 root_gen;
3746 struct btrfs_root_item *root_item = &root->root_item;
3747 int i;
3748 int slot;
3749 int ret;
3750
3751 for (i = *level; i < max_level && path->nodes[i]; i++) {
3752 slot = path->slots[i];
3753 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
3754 struct extent_buffer *node;
3755 struct btrfs_disk_key disk_key;
3756 node = path->nodes[i];
3757 path->slots[i]++;
3758 *level = i;
3759 WARN_ON(*level == 0);
3760 btrfs_node_key(node, &disk_key, path->slots[i]);
3761 memcpy(&root_item->drop_progress,
3762 &disk_key, sizeof(disk_key));
3763 root_item->drop_level = i;
3764 return 0;
3765 } else {
3766 struct extent_buffer *parent;
3767 if (path->nodes[*level] == root->node)
3768 parent = path->nodes[*level];
3769 else
3770 parent = path->nodes[*level + 1];
3771
3772 root_owner = btrfs_header_owner(parent);
3773 root_gen = btrfs_header_generation(parent);
3774
3775 clean_tree_block(trans, root, path->nodes[*level]);
3776 ret = btrfs_free_extent(trans, root,
3777 path->nodes[*level]->start,
3778 path->nodes[*level]->len,
3779 parent->start, root_owner,
3780 root_gen, *level, 1);
3781 BUG_ON(ret);
3782 if (path->locks[*level]) {
3783 btrfs_tree_unlock(path->nodes[*level]);
3784 path->locks[*level] = 0;
3785 }
3786 free_extent_buffer(path->nodes[*level]);
3787 path->nodes[*level] = NULL;
3788 *level = i + 1;
3789 }
3790 }
3791 return 1;
3792}
3793
3794/*
3795 * drop the reference count on the tree rooted at 'snap'. This traverses
3796 * the tree freeing any blocks that have a ref count of zero after being
3797 * decremented.
3798 */
3799int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
3800 *root)
3801{
3802 int ret = 0;
3803 int wret;
3804 int level;
3805 struct btrfs_path *path;
3806 int i;
3807 int orig_level;
3808 struct btrfs_root_item *root_item = &root->root_item;
3809
3810 WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
3811 path = btrfs_alloc_path();
3812 BUG_ON(!path);
3813
3814 level = btrfs_header_level(root->node);
3815 orig_level = level;
3816 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
3817 path->nodes[level] = root->node;
3818 extent_buffer_get(root->node);
3819 path->slots[level] = 0;
3820 } else {
3821 struct btrfs_key key;
3822 struct btrfs_disk_key found_key;
3823 struct extent_buffer *node;
3824
3825 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
3826 level = root_item->drop_level;
3827 path->lowest_level = level;
3828 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3829 if (wret < 0) {
3830 ret = wret;
3831 goto out;
3832 }
3833 node = path->nodes[level];
3834 btrfs_node_key(node, &found_key, path->slots[level]);
3835 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
3836 sizeof(found_key)));
3837 /*
3838 * unlock our path, this is safe because only this
3839 * function is allowed to delete this snapshot
3840 */
3841 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3842 if (path->nodes[i] && path->locks[i]) {
3843 path->locks[i] = 0;
3844 btrfs_tree_unlock(path->nodes[i]);
3845 }
3846 }
3847 }
3848 while(1) {
3849 wret = walk_down_tree(trans, root, path, &level);
3850 if (wret > 0)
3851 break;
3852 if (wret < 0)
3853 ret = wret;
3854
3855 wret = walk_up_tree(trans, root, path, &level,
3856 BTRFS_MAX_LEVEL);
3857 if (wret > 0)
3858 break;
3859 if (wret < 0)
3860 ret = wret;
3861 if (trans->transaction->in_commit) {
3862 ret = -EAGAIN;
3863 break;
3864 }
3865 atomic_inc(&root->fs_info->throttle_gen);
3866 wake_up(&root->fs_info->transaction_throttle);
3867 }
3868 for (i = 0; i <= orig_level; i++) {
3869 if (path->nodes[i]) {
3870 free_extent_buffer(path->nodes[i]);
3871 path->nodes[i] = NULL;
3872 }
3873 }
3874out:
3875 btrfs_free_path(path);
3876 return ret;
3877}
3878
3879int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
3880 struct btrfs_root *root,
3881 struct extent_buffer *node,
3882 struct extent_buffer *parent)
3883{
3884 struct btrfs_path *path;
3885 int level;
3886 int parent_level;
3887 int ret = 0;
3888 int wret;
3889
3890 path = btrfs_alloc_path();
3891 BUG_ON(!path);
3892
3893 BUG_ON(!btrfs_tree_locked(parent));
3894 parent_level = btrfs_header_level(parent);
3895 extent_buffer_get(parent);
3896 path->nodes[parent_level] = parent;
3897 path->slots[parent_level] = btrfs_header_nritems(parent);
3898
3899 BUG_ON(!btrfs_tree_locked(node));
3900 level = btrfs_header_level(node);
3901 extent_buffer_get(node);
3902 path->nodes[level] = node;
3903 path->slots[level] = 0;
3904
3905 while (1) {
3906 wret = walk_down_subtree(trans, root, path, &level);
3907 if (wret < 0)
3908 ret = wret;
3909 if (wret != 0)
3910 break;
3911
3912 wret = walk_up_tree(trans, root, path, &level, parent_level);
3913 if (wret < 0)
3914 ret = wret;
3915 if (wret != 0)
3916 break;
3917 }
3918
3919 btrfs_free_path(path);
3920 return ret;
3921}
3922
3923static unsigned long calc_ra(unsigned long start, unsigned long last,
3924 unsigned long nr)
3925{
3926 return min(last, start + nr - 1);
3927}
3928
3929static int noinline relocate_inode_pages(struct inode *inode, u64 start,
3930 u64 len)
3931{
3932 u64 page_start;
3933 u64 page_end;
3934 unsigned long first_index;
3935 unsigned long last_index;
3936 unsigned long i;
3937 struct page *page;
3938 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3939 struct file_ra_state *ra;
3940 struct btrfs_ordered_extent *ordered;
3941 unsigned int total_read = 0;
3942 unsigned int total_dirty = 0;
3943 int ret = 0;
3944
3945 ra = kzalloc(sizeof(*ra), GFP_NOFS);
3946
3947 mutex_lock(&inode->i_mutex);
3948 first_index = start >> PAGE_CACHE_SHIFT;
3949 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
3950
3951 /* make sure the dirty trick played by the caller work */
3952 ret = invalidate_inode_pages2_range(inode->i_mapping,
3953 first_index, last_index);
3954 if (ret)
3955 goto out_unlock;
3956
3957 file_ra_state_init(ra, inode->i_mapping);
3958
3959 for (i = first_index ; i <= last_index; i++) {
3960 if (total_read % ra->ra_pages == 0) {
3961 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
3962 calc_ra(i, last_index, ra->ra_pages));
3963 }
3964 total_read++;
3965again:
3966 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
3967 BUG_ON(1);
3968 page = grab_cache_page(inode->i_mapping, i);
3969 if (!page) {
3970 ret = -ENOMEM;
3971 goto out_unlock;
3972 }
3973 if (!PageUptodate(page)) {
3974 btrfs_readpage(NULL, page);
3975 lock_page(page);
3976 if (!PageUptodate(page)) {
3977 unlock_page(page);
3978 page_cache_release(page);
3979 ret = -EIO;
3980 goto out_unlock;
3981 }
3982 }
3983 wait_on_page_writeback(page);
3984
3985 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
3986 page_end = page_start + PAGE_CACHE_SIZE - 1;
3987 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
3988
3989 ordered = btrfs_lookup_ordered_extent(inode, page_start);
3990 if (ordered) {
3991 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3992 unlock_page(page);
3993 page_cache_release(page);
3994 btrfs_start_ordered_extent(inode, ordered, 1);
3995 btrfs_put_ordered_extent(ordered);
3996 goto again;
3997 }
3998 set_page_extent_mapped(page);
3999
4000 btrfs_set_extent_delalloc(inode, page_start, page_end);
4001 if (i == first_index)
4002 set_extent_bits(io_tree, page_start, page_end,
4003 EXTENT_BOUNDARY, GFP_NOFS);
4004
4005 set_page_dirty(page);
4006 total_dirty++;
4007
4008 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4009 unlock_page(page);
4010 page_cache_release(page);
4011 }
4012
4013out_unlock:
4014 kfree(ra);
4015 mutex_unlock(&inode->i_mutex);
4016 balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
4017 return ret;
4018}
4019
4020static int noinline relocate_data_extent(struct inode *reloc_inode,
4021 struct btrfs_key *extent_key,
4022 u64 offset)
4023{
4024 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
4025 struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
4026 struct extent_map *em;
4027 u64 start = extent_key->objectid - offset;
4028 u64 end = start + extent_key->offset - 1;
4029
4030 em = alloc_extent_map(GFP_NOFS);
4031 BUG_ON(!em || IS_ERR(em));
4032
4033 em->start = start;
4034 em->len = extent_key->offset;
4035 em->block_len = extent_key->offset;
4036 em->block_start = extent_key->objectid;
4037 em->bdev = root->fs_info->fs_devices->latest_bdev;
4038 set_bit(EXTENT_FLAG_PINNED, &em->flags);
4039
4040 /* setup extent map to cheat btrfs_readpage */
4041 lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
4042 while (1) {
4043 int ret;
4044 spin_lock(&em_tree->lock);
4045 ret = add_extent_mapping(em_tree, em);
4046 spin_unlock(&em_tree->lock);
4047 if (ret != -EEXIST) {
4048 free_extent_map(em);
4049 break;
4050 }
4051 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
4052 }
4053 unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
4054
4055 return relocate_inode_pages(reloc_inode, start, extent_key->offset);
4056}
4057
4058struct btrfs_ref_path {
4059 u64 extent_start;
4060 u64 nodes[BTRFS_MAX_LEVEL];
4061 u64 root_objectid;
4062 u64 root_generation;
4063 u64 owner_objectid;
4064 u32 num_refs;
4065 int lowest_level;
4066 int current_level;
4067 int shared_level;
4068
4069 struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
4070 u64 new_nodes[BTRFS_MAX_LEVEL];
4071};
4072
4073struct disk_extent {
4074 u64 ram_bytes;
4075 u64 disk_bytenr;
4076 u64 disk_num_bytes;
4077 u64 offset;
4078 u64 num_bytes;
4079 u8 compression;
4080 u8 encryption;
4081 u16 other_encoding;
4082};
4083
4084static int is_cowonly_root(u64 root_objectid)
4085{
4086 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
4087 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
4088 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
4089 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
4090 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
4091 root_objectid == BTRFS_CSUM_TREE_OBJECTID)
4092 return 1;
4093 return 0;
4094}
4095
4096static int noinline __next_ref_path(struct btrfs_trans_handle *trans,
4097 struct btrfs_root *extent_root,
4098 struct btrfs_ref_path *ref_path,
4099 int first_time)
4100{
4101 struct extent_buffer *leaf;
4102 struct btrfs_path *path;
4103 struct btrfs_extent_ref *ref;
4104 struct btrfs_key key;
4105 struct btrfs_key found_key;
4106 u64 bytenr;
4107 u32 nritems;
4108 int level;
4109 int ret = 1;
4110
4111 path = btrfs_alloc_path();
4112 if (!path)
4113 return -ENOMEM;
4114
4115 if (first_time) {
4116 ref_path->lowest_level = -1;
4117 ref_path->current_level = -1;
4118 ref_path->shared_level = -1;
4119 goto walk_up;
4120 }
4121walk_down:
4122 level = ref_path->current_level - 1;
4123 while (level >= -1) {
4124 u64 parent;
4125 if (level < ref_path->lowest_level)
4126 break;
4127
4128 if (level >= 0) {
4129 bytenr = ref_path->nodes[level];
4130 } else {
4131 bytenr = ref_path->extent_start;
4132 }
4133 BUG_ON(bytenr == 0);
4134
4135 parent = ref_path->nodes[level + 1];
4136 ref_path->nodes[level + 1] = 0;
4137 ref_path->current_level = level;
4138 BUG_ON(parent == 0);
4139
4140 key.objectid = bytenr;
4141 key.offset = parent + 1;
4142 key.type = BTRFS_EXTENT_REF_KEY;
4143
4144 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
4145 if (ret < 0)
4146 goto out;
4147 BUG_ON(ret == 0);
4148
4149 leaf = path->nodes[0];
4150 nritems = btrfs_header_nritems(leaf);
4151 if (path->slots[0] >= nritems) {
4152 ret = btrfs_next_leaf(extent_root, path);
4153 if (ret < 0)
4154 goto out;
4155 if (ret > 0)
4156 goto next;
4157 leaf = path->nodes[0];
4158 }
4159
4160 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4161 if (found_key.objectid == bytenr &&
4162 found_key.type == BTRFS_EXTENT_REF_KEY) {
4163 if (level < ref_path->shared_level)
4164 ref_path->shared_level = level;
4165 goto found;
4166 }
4167next:
4168 level--;
4169 btrfs_release_path(extent_root, path);
4170 cond_resched();
4171 }
4172 /* reached lowest level */
4173 ret = 1;
4174 goto out;
4175walk_up:
4176 level = ref_path->current_level;
4177 while (level < BTRFS_MAX_LEVEL - 1) {
4178 u64 ref_objectid;
4179 if (level >= 0) {
4180 bytenr = ref_path->nodes[level];
4181 } else {
4182 bytenr = ref_path->extent_start;
4183 }
4184 BUG_ON(bytenr == 0);
4185
4186 key.objectid = bytenr;
4187 key.offset = 0;
4188 key.type = BTRFS_EXTENT_REF_KEY;
4189
4190 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
4191 if (ret < 0)
4192 goto out;
4193
4194 leaf = path->nodes[0];
4195 nritems = btrfs_header_nritems(leaf);
4196 if (path->slots[0] >= nritems) {
4197 ret = btrfs_next_leaf(extent_root, path);
4198 if (ret < 0)
4199 goto out;
4200 if (ret > 0) {
4201 /* the extent was freed by someone */
4202 if (ref_path->lowest_level == level)
4203 goto out;
4204 btrfs_release_path(extent_root, path);
4205 goto walk_down;
4206 }
4207 leaf = path->nodes[0];
4208 }
4209
4210 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4211 if (found_key.objectid != bytenr ||
4212 found_key.type != BTRFS_EXTENT_REF_KEY) {
4213 /* the extent was freed by someone */
4214 if (ref_path->lowest_level == level) {
4215 ret = 1;
4216 goto out;
4217 }
4218 btrfs_release_path(extent_root, path);
4219 goto walk_down;
4220 }
4221found:
4222 ref = btrfs_item_ptr(leaf, path->slots[0],
4223 struct btrfs_extent_ref);
4224 ref_objectid = btrfs_ref_objectid(leaf, ref);
4225 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4226 if (first_time) {
4227 level = (int)ref_objectid;
4228 BUG_ON(level >= BTRFS_MAX_LEVEL);
4229 ref_path->lowest_level = level;
4230 ref_path->current_level = level;
4231 ref_path->nodes[level] = bytenr;
4232 } else {
4233 WARN_ON(ref_objectid != level);
4234 }
4235 } else {
4236 WARN_ON(level != -1);
4237 }
4238 first_time = 0;
4239
4240 if (ref_path->lowest_level == level) {
4241 ref_path->owner_objectid = ref_objectid;
4242 ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
4243 }
4244
4245 /*
4246 * the block is tree root or the block isn't in reference
4247 * counted tree.
4248 */
4249 if (found_key.objectid == found_key.offset ||
4250 is_cowonly_root(btrfs_ref_root(leaf, ref))) {
4251 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
4252 ref_path->root_generation =
4253 btrfs_ref_generation(leaf, ref);
4254 if (level < 0) {
4255 /* special reference from the tree log */
4256 ref_path->nodes[0] = found_key.offset;
4257 ref_path->current_level = 0;
4258 }
4259 ret = 0;
4260 goto out;
4261 }
4262
4263 level++;
4264 BUG_ON(ref_path->nodes[level] != 0);
4265 ref_path->nodes[level] = found_key.offset;
4266 ref_path->current_level = level;
4267
4268 /*
4269 * the reference was created in the running transaction,
4270 * no need to continue walking up.
4271 */
4272 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
4273 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
4274 ref_path->root_generation =
4275 btrfs_ref_generation(leaf, ref);
4276 ret = 0;
4277 goto out;
4278 }
4279
4280 btrfs_release_path(extent_root, path);
4281 cond_resched();
4282 }
4283 /* reached max tree level, but no tree root found. */
4284 BUG();
4285out:
4286 btrfs_free_path(path);
4287 return ret;
4288}
4289
4290static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
4291 struct btrfs_root *extent_root,
4292 struct btrfs_ref_path *ref_path,
4293 u64 extent_start)
4294{
4295 memset(ref_path, 0, sizeof(*ref_path));
4296 ref_path->extent_start = extent_start;
4297
4298 return __next_ref_path(trans, extent_root, ref_path, 1);
4299}
4300
4301static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
4302 struct btrfs_root *extent_root,
4303 struct btrfs_ref_path *ref_path)
4304{
4305 return __next_ref_path(trans, extent_root, ref_path, 0);
4306}
4307
4308static int noinline get_new_locations(struct inode *reloc_inode,
4309 struct btrfs_key *extent_key,
4310 u64 offset, int no_fragment,
4311 struct disk_extent **extents,
4312 int *nr_extents)
4313{
4314 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
4315 struct btrfs_path *path;
4316 struct btrfs_file_extent_item *fi;
4317 struct extent_buffer *leaf;
4318 struct disk_extent *exts = *extents;
4319 struct btrfs_key found_key;
4320 u64 cur_pos;
4321 u64 last_byte;
4322 u32 nritems;
4323 int nr = 0;
4324 int max = *nr_extents;
4325 int ret;
4326
4327 WARN_ON(!no_fragment && *extents);
4328 if (!exts) {
4329 max = 1;
4330 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
4331 if (!exts)
4332 return -ENOMEM;
4333 }
4334
4335 path = btrfs_alloc_path();
4336 BUG_ON(!path);
4337
4338 cur_pos = extent_key->objectid - offset;
4339 last_byte = extent_key->objectid + extent_key->offset;
4340 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
4341 cur_pos, 0);
4342 if (ret < 0)
4343 goto out;
4344 if (ret > 0) {
4345 ret = -ENOENT;
4346 goto out;
4347 }
4348
4349 while (1) {
4350 leaf = path->nodes[0];
4351 nritems = btrfs_header_nritems(leaf);
4352 if (path->slots[0] >= nritems) {
4353 ret = btrfs_next_leaf(root, path);
4354 if (ret < 0)
4355 goto out;
4356 if (ret > 0)
4357 break;
4358 leaf = path->nodes[0];
4359 }
4360
4361 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4362 if (found_key.offset != cur_pos ||
4363 found_key.type != BTRFS_EXTENT_DATA_KEY ||
4364 found_key.objectid != reloc_inode->i_ino)
4365 break;
4366
4367 fi = btrfs_item_ptr(leaf, path->slots[0],
4368 struct btrfs_file_extent_item);
4369 if (btrfs_file_extent_type(leaf, fi) !=
4370 BTRFS_FILE_EXTENT_REG ||
4371 btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
4372 break;
4373
4374 if (nr == max) {
4375 struct disk_extent *old = exts;
4376 max *= 2;
4377 exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
4378 memcpy(exts, old, sizeof(*exts) * nr);
4379 if (old != *extents)
4380 kfree(old);
4381 }
4382
4383 exts[nr].disk_bytenr =
4384 btrfs_file_extent_disk_bytenr(leaf, fi);
4385 exts[nr].disk_num_bytes =
4386 btrfs_file_extent_disk_num_bytes(leaf, fi);
4387 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
4388 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4389 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
4390 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
4391 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
4392 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
4393 fi);
4394 BUG_ON(exts[nr].offset > 0);
4395 BUG_ON(exts[nr].compression || exts[nr].encryption);
4396 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
4397
4398 cur_pos += exts[nr].num_bytes;
4399 nr++;
4400
4401 if (cur_pos + offset >= last_byte)
4402 break;
4403
4404 if (no_fragment) {
4405 ret = 1;
4406 goto out;
4407 }
4408 path->slots[0]++;
4409 }
4410
4411 WARN_ON(cur_pos + offset > last_byte);
4412 if (cur_pos + offset < last_byte) {
4413 ret = -ENOENT;
4414 goto out;
4415 }
4416 ret = 0;
4417out:
4418 btrfs_free_path(path);
4419 if (ret) {
4420 if (exts != *extents)
4421 kfree(exts);
4422 } else {
4423 *extents = exts;
4424 *nr_extents = nr;
4425 }
4426 return ret;
4427}
4428
4429static int noinline replace_one_extent(struct btrfs_trans_handle *trans,
4430 struct btrfs_root *root,
4431 struct btrfs_path *path,
4432 struct btrfs_key *extent_key,
4433 struct btrfs_key *leaf_key,
4434 struct btrfs_ref_path *ref_path,
4435 struct disk_extent *new_extents,
4436 int nr_extents)
4437{
4438 struct extent_buffer *leaf;
4439 struct btrfs_file_extent_item *fi;
4440 struct inode *inode = NULL;
4441 struct btrfs_key key;
4442 u64 lock_start = 0;
4443 u64 lock_end = 0;
4444 u64 num_bytes;
4445 u64 ext_offset;
4446 u64 first_pos;
4447 u32 nritems;
4448 int nr_scaned = 0;
4449 int extent_locked = 0;
4450 int extent_type;
4451 int ret;
4452
4453 memcpy(&key, leaf_key, sizeof(key));
4454 first_pos = INT_LIMIT(loff_t) - extent_key->offset;
4455 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
4456 if (key.objectid < ref_path->owner_objectid ||
4457 (key.objectid == ref_path->owner_objectid &&
4458 key.type < BTRFS_EXTENT_DATA_KEY)) {
4459 key.objectid = ref_path->owner_objectid;
4460 key.type = BTRFS_EXTENT_DATA_KEY;
4461 key.offset = 0;
4462 }
4463 }
4464
4465 while (1) {
4466 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4467 if (ret < 0)
4468 goto out;
4469
4470 leaf = path->nodes[0];
4471 nritems = btrfs_header_nritems(leaf);
4472next:
4473 if (extent_locked && ret > 0) {
4474 /*
4475 * the file extent item was modified by someone
4476 * before the extent got locked.
4477 */
4478 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4479 lock_end, GFP_NOFS);
4480 extent_locked = 0;
4481 }
4482
4483 if (path->slots[0] >= nritems) {
4484 if (++nr_scaned > 2)
4485 break;
4486
4487 BUG_ON(extent_locked);
4488 ret = btrfs_next_leaf(root, path);
4489 if (ret < 0)
4490 goto out;
4491 if (ret > 0)
4492 break;
4493 leaf = path->nodes[0];
4494 nritems = btrfs_header_nritems(leaf);
4495 }
4496
4497 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4498
4499 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
4500 if ((key.objectid > ref_path->owner_objectid) ||
4501 (key.objectid == ref_path->owner_objectid &&
4502 key.type > BTRFS_EXTENT_DATA_KEY) ||
4503 (key.offset >= first_pos + extent_key->offset))
4504 break;
4505 }
4506
4507 if (inode && key.objectid != inode->i_ino) {
4508 BUG_ON(extent_locked);
4509 btrfs_release_path(root, path);
4510 mutex_unlock(&inode->i_mutex);
4511 iput(inode);
4512 inode = NULL;
4513 continue;
4514 }
4515
4516 if (key.type != BTRFS_EXTENT_DATA_KEY) {
4517 path->slots[0]++;
4518 ret = 1;
4519 goto next;
4520 }
4521 fi = btrfs_item_ptr(leaf, path->slots[0],
4522 struct btrfs_file_extent_item);
4523 extent_type = btrfs_file_extent_type(leaf, fi);
4524 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
4525 extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
4526 (btrfs_file_extent_disk_bytenr(leaf, fi) !=
4527 extent_key->objectid)) {
4528 path->slots[0]++;
4529 ret = 1;
4530 goto next;
4531 }
4532
4533 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4534 ext_offset = btrfs_file_extent_offset(leaf, fi);
4535
4536 if (first_pos > key.offset - ext_offset)
4537 first_pos = key.offset - ext_offset;
4538
4539 if (!extent_locked) {
4540 lock_start = key.offset;
4541 lock_end = lock_start + num_bytes - 1;
4542 } else {
4543 if (lock_start > key.offset ||
4544 lock_end + 1 < key.offset + num_bytes) {
4545 unlock_extent(&BTRFS_I(inode)->io_tree,
4546 lock_start, lock_end, GFP_NOFS);
4547 extent_locked = 0;
4548 }
4549 }
4550
4551 if (!inode) {
4552 btrfs_release_path(root, path);
4553
4554 inode = btrfs_iget_locked(root->fs_info->sb,
4555 key.objectid, root);
4556 if (inode->i_state & I_NEW) {
4557 BTRFS_I(inode)->root = root;
4558 BTRFS_I(inode)->location.objectid =
4559 key.objectid;
4560 BTRFS_I(inode)->location.type =
4561 BTRFS_INODE_ITEM_KEY;
4562 BTRFS_I(inode)->location.offset = 0;
4563 btrfs_read_locked_inode(inode);
4564 unlock_new_inode(inode);
4565 }
4566 /*
4567 * some code call btrfs_commit_transaction while
4568 * holding the i_mutex, so we can't use mutex_lock
4569 * here.
4570 */
4571 if (is_bad_inode(inode) ||
4572 !mutex_trylock(&inode->i_mutex)) {
4573 iput(inode);
4574 inode = NULL;
4575 key.offset = (u64)-1;
4576 goto skip;
4577 }
4578 }
4579
4580 if (!extent_locked) {
4581 struct btrfs_ordered_extent *ordered;
4582
4583 btrfs_release_path(root, path);
4584
4585 lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4586 lock_end, GFP_NOFS);
4587 ordered = btrfs_lookup_first_ordered_extent(inode,
4588 lock_end);
4589 if (ordered &&
4590 ordered->file_offset <= lock_end &&
4591 ordered->file_offset + ordered->len > lock_start) {
4592 unlock_extent(&BTRFS_I(inode)->io_tree,
4593 lock_start, lock_end, GFP_NOFS);
4594 btrfs_start_ordered_extent(inode, ordered, 1);
4595 btrfs_put_ordered_extent(ordered);
4596 key.offset += num_bytes;
4597 goto skip;
4598 }
4599 if (ordered)
4600 btrfs_put_ordered_extent(ordered);
4601
4602 extent_locked = 1;
4603 continue;
4604 }
4605
4606 if (nr_extents == 1) {
4607 /* update extent pointer in place */
4608 btrfs_set_file_extent_disk_bytenr(leaf, fi,
4609 new_extents[0].disk_bytenr);
4610 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4611 new_extents[0].disk_num_bytes);
4612 btrfs_mark_buffer_dirty(leaf);
4613
4614 btrfs_drop_extent_cache(inode, key.offset,
4615 key.offset + num_bytes - 1, 0);
4616
4617 ret = btrfs_inc_extent_ref(trans, root,
4618 new_extents[0].disk_bytenr,
4619 new_extents[0].disk_num_bytes,
4620 leaf->start,
4621 root->root_key.objectid,
4622 trans->transid,
4623 key.objectid);
4624 BUG_ON(ret);
4625
4626 ret = btrfs_free_extent(trans, root,
4627 extent_key->objectid,
4628 extent_key->offset,
4629 leaf->start,
4630 btrfs_header_owner(leaf),
4631 btrfs_header_generation(leaf),
4632 key.objectid, 0);
4633 BUG_ON(ret);
4634
4635 btrfs_release_path(root, path);
4636 key.offset += num_bytes;
4637 } else {
4638 BUG_ON(1);
4639#if 0
4640 u64 alloc_hint;
4641 u64 extent_len;
4642 int i;
4643 /*
4644 * drop old extent pointer at first, then insert the
4645 * new pointers one bye one
4646 */
4647 btrfs_release_path(root, path);
4648 ret = btrfs_drop_extents(trans, root, inode, key.offset,
4649 key.offset + num_bytes,
4650 key.offset, &alloc_hint);
4651 BUG_ON(ret);
4652
4653 for (i = 0; i < nr_extents; i++) {
4654 if (ext_offset >= new_extents[i].num_bytes) {
4655 ext_offset -= new_extents[i].num_bytes;
4656 continue;
4657 }
4658 extent_len = min(new_extents[i].num_bytes -
4659 ext_offset, num_bytes);
4660
4661 ret = btrfs_insert_empty_item(trans, root,
4662 path, &key,
4663 sizeof(*fi));
4664 BUG_ON(ret);
4665
4666 leaf = path->nodes[0];
4667 fi = btrfs_item_ptr(leaf, path->slots[0],
4668 struct btrfs_file_extent_item);
4669 btrfs_set_file_extent_generation(leaf, fi,
4670 trans->transid);
4671 btrfs_set_file_extent_type(leaf, fi,
4672 BTRFS_FILE_EXTENT_REG);
4673 btrfs_set_file_extent_disk_bytenr(leaf, fi,
4674 new_extents[i].disk_bytenr);
4675 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4676 new_extents[i].disk_num_bytes);
4677 btrfs_set_file_extent_ram_bytes(leaf, fi,
4678 new_extents[i].ram_bytes);
4679
4680 btrfs_set_file_extent_compression(leaf, fi,
4681 new_extents[i].compression);
4682 btrfs_set_file_extent_encryption(leaf, fi,
4683 new_extents[i].encryption);
4684 btrfs_set_file_extent_other_encoding(leaf, fi,
4685 new_extents[i].other_encoding);
4686
4687 btrfs_set_file_extent_num_bytes(leaf, fi,
4688 extent_len);
4689 ext_offset += new_extents[i].offset;
4690 btrfs_set_file_extent_offset(leaf, fi,
4691 ext_offset);
4692 btrfs_mark_buffer_dirty(leaf);
4693
4694 btrfs_drop_extent_cache(inode, key.offset,
4695 key.offset + extent_len - 1, 0);
4696
4697 ret = btrfs_inc_extent_ref(trans, root,
4698 new_extents[i].disk_bytenr,
4699 new_extents[i].disk_num_bytes,
4700 leaf->start,
4701 root->root_key.objectid,
4702 trans->transid, key.objectid);
4703 BUG_ON(ret);
4704 btrfs_release_path(root, path);
4705
4706 inode_add_bytes(inode, extent_len);
4707
4708 ext_offset = 0;
4709 num_bytes -= extent_len;
4710 key.offset += extent_len;
4711
4712 if (num_bytes == 0)
4713 break;
4714 }
4715 BUG_ON(i >= nr_extents);
4716#endif
4717 }
4718
4719 if (extent_locked) {
4720 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4721 lock_end, GFP_NOFS);
4722 extent_locked = 0;
4723 }
4724skip:
4725 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
4726 key.offset >= first_pos + extent_key->offset)
4727 break;
4728
4729 cond_resched();
4730 }
4731 ret = 0;
4732out:
4733 btrfs_release_path(root, path);
4734 if (inode) {
4735 mutex_unlock(&inode->i_mutex);
4736 if (extent_locked) {
4737 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4738 lock_end, GFP_NOFS);
4739 }
4740 iput(inode);
4741 }
4742 return ret;
4743}
4744
4745int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
4746 struct btrfs_root *root,
4747 struct extent_buffer *buf, u64 orig_start)
4748{
4749 int level;
4750 int ret;
4751
4752 BUG_ON(btrfs_header_generation(buf) != trans->transid);
4753 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
4754
4755 level = btrfs_header_level(buf);
4756 if (level == 0) {
4757 struct btrfs_leaf_ref *ref;
4758 struct btrfs_leaf_ref *orig_ref;
4759
4760 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
4761 if (!orig_ref)
4762 return -ENOENT;
4763
4764 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
4765 if (!ref) {
4766 btrfs_free_leaf_ref(root, orig_ref);
4767 return -ENOMEM;
4768 }
4769
4770 ref->nritems = orig_ref->nritems;
4771 memcpy(ref->extents, orig_ref->extents,
4772 sizeof(ref->extents[0]) * ref->nritems);
4773
4774 btrfs_free_leaf_ref(root, orig_ref);
4775
4776 ref->root_gen = trans->transid;
4777 ref->bytenr = buf->start;
4778 ref->owner = btrfs_header_owner(buf);
4779 ref->generation = btrfs_header_generation(buf);
4780 ret = btrfs_add_leaf_ref(root, ref, 0);
4781 WARN_ON(ret);
4782 btrfs_free_leaf_ref(root, ref);
4783 }
4784 return 0;
4785}
4786
4787static int noinline invalidate_extent_cache(struct btrfs_root *root,
4788 struct extent_buffer *leaf,
4789 struct btrfs_block_group_cache *group,
4790 struct btrfs_root *target_root)
4791{
4792 struct btrfs_key key;
4793 struct inode *inode = NULL;
4794 struct btrfs_file_extent_item *fi;
4795 u64 num_bytes;
4796 u64 skip_objectid = 0;
4797 u32 nritems;
4798 u32 i;
4799
4800 nritems = btrfs_header_nritems(leaf);
4801 for (i = 0; i < nritems; i++) {
4802 btrfs_item_key_to_cpu(leaf, &key, i);
4803 if (key.objectid == skip_objectid ||
4804 key.type != BTRFS_EXTENT_DATA_KEY)
4805 continue;
4806 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
4807 if (btrfs_file_extent_type(leaf, fi) ==
4808 BTRFS_FILE_EXTENT_INLINE)
4809 continue;
4810 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
4811 continue;
4812 if (!inode || inode->i_ino != key.objectid) {
4813 iput(inode);
4814 inode = btrfs_ilookup(target_root->fs_info->sb,
4815 key.objectid, target_root, 1);
4816 }
4817 if (!inode) {
4818 skip_objectid = key.objectid;
4819 continue;
4820 }
4821 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4822
4823 lock_extent(&BTRFS_I(inode)->io_tree, key.offset,
4824 key.offset + num_bytes - 1, GFP_NOFS);
4825 btrfs_drop_extent_cache(inode, key.offset,
4826 key.offset + num_bytes - 1, 1);
4827 unlock_extent(&BTRFS_I(inode)->io_tree, key.offset,
4828 key.offset + num_bytes - 1, GFP_NOFS);
4829 cond_resched();
4830 }
4831 iput(inode);
4832 return 0;
4833}
4834
4835static int noinline replace_extents_in_leaf(struct btrfs_trans_handle *trans,
4836 struct btrfs_root *root,
4837 struct extent_buffer *leaf,
4838 struct btrfs_block_group_cache *group,
4839 struct inode *reloc_inode)
4840{
4841 struct btrfs_key key;
4842 struct btrfs_key extent_key;
4843 struct btrfs_file_extent_item *fi;
4844 struct btrfs_leaf_ref *ref;
4845 struct disk_extent *new_extent;
4846 u64 bytenr;
4847 u64 num_bytes;
4848 u32 nritems;
4849 u32 i;
4850 int ext_index;
4851 int nr_extent;
4852 int ret;
4853
4854 new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
4855 BUG_ON(!new_extent);
4856
4857 ref = btrfs_lookup_leaf_ref(root, leaf->start);
4858 BUG_ON(!ref);
4859
4860 ext_index = -1;
4861 nritems = btrfs_header_nritems(leaf);
4862 for (i = 0; i < nritems; i++) {
4863 btrfs_item_key_to_cpu(leaf, &key, i);
4864 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
4865 continue;
4866 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
4867 if (btrfs_file_extent_type(leaf, fi) ==
4868 BTRFS_FILE_EXTENT_INLINE)
4869 continue;
4870 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
4871 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
4872 if (bytenr == 0)
4873 continue;
4874
4875 ext_index++;
4876 if (bytenr >= group->key.objectid + group->key.offset ||
4877 bytenr + num_bytes <= group->key.objectid)
4878 continue;
4879
4880 extent_key.objectid = bytenr;
4881 extent_key.offset = num_bytes;
4882 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
4883 nr_extent = 1;
4884 ret = get_new_locations(reloc_inode, &extent_key,
4885 group->key.objectid, 1,
4886 &new_extent, &nr_extent);
4887 if (ret > 0)
4888 continue;
4889 BUG_ON(ret < 0);
4890
4891 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
4892 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
4893 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
4894 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
4895
4896 btrfs_set_file_extent_disk_bytenr(leaf, fi,
4897 new_extent->disk_bytenr);
4898 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4899 new_extent->disk_num_bytes);
4900 btrfs_mark_buffer_dirty(leaf);
4901
4902 ret = btrfs_inc_extent_ref(trans, root,
4903 new_extent->disk_bytenr,
4904 new_extent->disk_num_bytes,
4905 leaf->start,
4906 root->root_key.objectid,
4907 trans->transid, key.objectid);
4908 BUG_ON(ret);
4909 ret = btrfs_free_extent(trans, root,
4910 bytenr, num_bytes, leaf->start,
4911 btrfs_header_owner(leaf),
4912 btrfs_header_generation(leaf),
4913 key.objectid, 0);
4914 BUG_ON(ret);
4915 cond_resched();
4916 }
4917 kfree(new_extent);
4918 BUG_ON(ext_index + 1 != ref->nritems);
4919 btrfs_free_leaf_ref(root, ref);
4920 return 0;
4921}
4922
4923int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
4924 struct btrfs_root *root)
4925{
4926 struct btrfs_root *reloc_root;
4927 int ret;
4928
4929 if (root->reloc_root) {
4930 reloc_root = root->reloc_root;
4931 root->reloc_root = NULL;
4932 list_add(&reloc_root->dead_list,
4933 &root->fs_info->dead_reloc_roots);
4934
4935 btrfs_set_root_bytenr(&reloc_root->root_item,
4936 reloc_root->node->start);
4937 btrfs_set_root_level(&root->root_item,
4938 btrfs_header_level(reloc_root->node));
4939 memset(&reloc_root->root_item.drop_progress, 0,
4940 sizeof(struct btrfs_disk_key));
4941 reloc_root->root_item.drop_level = 0;
4942
4943 ret = btrfs_update_root(trans, root->fs_info->tree_root,
4944 &reloc_root->root_key,
4945 &reloc_root->root_item);
4946 BUG_ON(ret);
4947 }
4948 return 0;
4949}
4950
4951int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
4952{
4953 struct btrfs_trans_handle *trans;
4954 struct btrfs_root *reloc_root;
4955 struct btrfs_root *prev_root = NULL;
4956 struct list_head dead_roots;
4957 int ret;
4958 unsigned long nr;
4959
4960 INIT_LIST_HEAD(&dead_roots);
4961 list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
4962
4963 while (!list_empty(&dead_roots)) {
4964 reloc_root = list_entry(dead_roots.prev,
4965 struct btrfs_root, dead_list);
4966 list_del_init(&reloc_root->dead_list);
4967
4968 BUG_ON(reloc_root->commit_root != NULL);
4969 while (1) {
4970 trans = btrfs_join_transaction(root, 1);
4971 BUG_ON(!trans);
4972
4973 mutex_lock(&root->fs_info->drop_mutex);
4974 ret = btrfs_drop_snapshot(trans, reloc_root);
4975 if (ret != -EAGAIN)
4976 break;
4977 mutex_unlock(&root->fs_info->drop_mutex);
4978
4979 nr = trans->blocks_used;
4980 ret = btrfs_end_transaction(trans, root);
4981 BUG_ON(ret);
4982 btrfs_btree_balance_dirty(root, nr);
4983 }
4984
4985 free_extent_buffer(reloc_root->node);
4986
4987 ret = btrfs_del_root(trans, root->fs_info->tree_root,
4988 &reloc_root->root_key);
4989 BUG_ON(ret);
4990 mutex_unlock(&root->fs_info->drop_mutex);
4991
4992 nr = trans->blocks_used;
4993 ret = btrfs_end_transaction(trans, root);
4994 BUG_ON(ret);
4995 btrfs_btree_balance_dirty(root, nr);
4996
4997 kfree(prev_root);
4998 prev_root = reloc_root;
4999 }
5000 if (prev_root) {
5001 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
5002 kfree(prev_root);
5003 }
5004 return 0;
5005}
5006
5007int btrfs_add_dead_reloc_root(struct btrfs_root *root)
5008{
5009 list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
5010 return 0;
5011}
5012
5013int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
5014{
5015 struct btrfs_root *reloc_root;
5016 struct btrfs_trans_handle *trans;
5017 struct btrfs_key location;
5018 int found;
5019 int ret;
5020
5021 mutex_lock(&root->fs_info->tree_reloc_mutex);
5022 ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
5023 BUG_ON(ret);
5024 found = !list_empty(&root->fs_info->dead_reloc_roots);
5025 mutex_unlock(&root->fs_info->tree_reloc_mutex);
5026
5027 if (found) {
5028 trans = btrfs_start_transaction(root, 1);
5029 BUG_ON(!trans);
5030 ret = btrfs_commit_transaction(trans, root);
5031 BUG_ON(ret);
5032 }
5033
5034 location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
5035 location.offset = (u64)-1;
5036 location.type = BTRFS_ROOT_ITEM_KEY;
5037
5038 reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
5039 BUG_ON(!reloc_root);
5040 btrfs_orphan_cleanup(reloc_root);
5041 return 0;
5042}
5043
5044static int noinline init_reloc_tree(struct btrfs_trans_handle *trans,
5045 struct btrfs_root *root)
5046{
5047 struct btrfs_root *reloc_root;
5048 struct extent_buffer *eb;
5049 struct btrfs_root_item *root_item;
5050 struct btrfs_key root_key;
5051 int ret;
5052
5053 BUG_ON(!root->ref_cows);
5054 if (root->reloc_root)
5055 return 0;
5056
5057 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
5058 BUG_ON(!root_item);
5059
5060 ret = btrfs_copy_root(trans, root, root->commit_root,
5061 &eb, BTRFS_TREE_RELOC_OBJECTID);
5062 BUG_ON(ret);
5063
5064 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
5065 root_key.offset = root->root_key.objectid;
5066 root_key.type = BTRFS_ROOT_ITEM_KEY;
5067
5068 memcpy(root_item, &root->root_item, sizeof(root_item));
5069 btrfs_set_root_refs(root_item, 0);
5070 btrfs_set_root_bytenr(root_item, eb->start);
5071 btrfs_set_root_level(root_item, btrfs_header_level(eb));
5072 btrfs_set_root_generation(root_item, trans->transid);
5073
5074 btrfs_tree_unlock(eb);
5075 free_extent_buffer(eb);
5076
5077 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
5078 &root_key, root_item);
5079 BUG_ON(ret);
5080 kfree(root_item);
5081
5082 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
5083 &root_key);
5084 BUG_ON(!reloc_root);
5085 reloc_root->last_trans = trans->transid;
5086 reloc_root->commit_root = NULL;
5087 reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
5088
5089 root->reloc_root = reloc_root;
5090 return 0;
5091}
5092
5093/*
5094 * Core function of space balance.
5095 *
5096 * The idea is using reloc trees to relocate tree blocks in reference
5097 * counted roots. There is one reloc tree for each subvol, and all
5098 * reloc trees share same root key objectid. Reloc trees are snapshots
5099 * of the latest committed roots of subvols (root->commit_root).
5100 *
5101 * To relocate a tree block referenced by a subvol, there are two steps.
5102 * COW the block through subvol's reloc tree, then update block pointer
5103 * in the subvol to point to the new block. Since all reloc trees share
5104 * same root key objectid, doing special handing for tree blocks owned
5105 * by them is easy. Once a tree block has been COWed in one reloc tree,
5106 * we can use the resulting new block directly when the same block is
5107 * required to COW again through other reloc trees. By this way, relocated
5108 * tree blocks are shared between reloc trees, so they are also shared
5109 * between subvols.
5110 */
5111static int noinline relocate_one_path(struct btrfs_trans_handle *trans,
5112 struct btrfs_root *root,
5113 struct btrfs_path *path,
5114 struct btrfs_key *first_key,
5115 struct btrfs_ref_path *ref_path,
5116 struct btrfs_block_group_cache *group,
5117 struct inode *reloc_inode)
5118{
5119 struct btrfs_root *reloc_root;
5120 struct extent_buffer *eb = NULL;
5121 struct btrfs_key *keys;
5122 u64 *nodes;
5123 int level;
5124 int shared_level;
5125 int lowest_level = 0;
5126 int ret;
5127
5128 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
5129 lowest_level = ref_path->owner_objectid;
5130
5131 if (!root->ref_cows) {
5132 path->lowest_level = lowest_level;
5133 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
5134 BUG_ON(ret < 0);
5135 path->lowest_level = 0;
5136 btrfs_release_path(root, path);
5137 return 0;
5138 }
5139
5140 mutex_lock(&root->fs_info->tree_reloc_mutex);
5141 ret = init_reloc_tree(trans, root);
5142 BUG_ON(ret);
5143 reloc_root = root->reloc_root;
5144
5145 shared_level = ref_path->shared_level;
5146 ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
5147
5148 keys = ref_path->node_keys;
5149 nodes = ref_path->new_nodes;
5150 memset(&keys[shared_level + 1], 0,
5151 sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
5152 memset(&nodes[shared_level + 1], 0,
5153 sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
5154
5155 if (nodes[lowest_level] == 0) {
5156 path->lowest_level = lowest_level;
5157 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
5158 0, 1);
5159 BUG_ON(ret);
5160 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
5161 eb = path->nodes[level];
5162 if (!eb || eb == reloc_root->node)
5163 break;
5164 nodes[level] = eb->start;
5165 if (level == 0)
5166 btrfs_item_key_to_cpu(eb, &keys[level], 0);
5167 else
5168 btrfs_node_key_to_cpu(eb, &keys[level], 0);
5169 }
5170 if (nodes[0] &&
5171 ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5172 eb = path->nodes[0];
5173 ret = replace_extents_in_leaf(trans, reloc_root, eb,
5174 group, reloc_inode);
5175 BUG_ON(ret);
5176 }
5177 btrfs_release_path(reloc_root, path);
5178 } else {
5179 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
5180 lowest_level);
5181 BUG_ON(ret);
5182 }
5183
5184 /*
5185 * replace tree blocks in the fs tree with tree blocks in
5186 * the reloc tree.
5187 */
5188 ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
5189 BUG_ON(ret < 0);
5190
5191 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5192 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
5193 0, 0);
5194 BUG_ON(ret);
5195 extent_buffer_get(path->nodes[0]);
5196 eb = path->nodes[0];
5197 btrfs_release_path(reloc_root, path);
5198 ret = invalidate_extent_cache(reloc_root, eb, group, root);
5199 BUG_ON(ret);
5200 free_extent_buffer(eb);
5201 }
5202
5203 mutex_unlock(&root->fs_info->tree_reloc_mutex);
5204 path->lowest_level = 0;
5205 return 0;
5206}
5207
5208static int noinline relocate_tree_block(struct btrfs_trans_handle *trans,
5209 struct btrfs_root *root,
5210 struct btrfs_path *path,
5211 struct btrfs_key *first_key,
5212 struct btrfs_ref_path *ref_path)
5213{
5214 int ret;
5215
5216 ret = relocate_one_path(trans, root, path, first_key,
5217 ref_path, NULL, NULL);
5218 BUG_ON(ret);
5219
5220 if (root == root->fs_info->extent_root)
5221 btrfs_extent_post_op(trans, root);
5222
5223 return 0;
5224}
5225
5226static int noinline del_extent_zero(struct btrfs_trans_handle *trans,
5227 struct btrfs_root *extent_root,
5228 struct btrfs_path *path,
5229 struct btrfs_key *extent_key)
5230{
5231 int ret;
5232
5233 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
5234 if (ret)
5235 goto out;
5236 ret = btrfs_del_item(trans, extent_root, path);
5237out:
5238 btrfs_release_path(extent_root, path);
5239 return ret;
5240}
5241
5242static struct btrfs_root noinline *read_ref_root(struct btrfs_fs_info *fs_info,
5243 struct btrfs_ref_path *ref_path)
5244{
5245 struct btrfs_key root_key;
5246
5247 root_key.objectid = ref_path->root_objectid;
5248 root_key.type = BTRFS_ROOT_ITEM_KEY;
5249 if (is_cowonly_root(ref_path->root_objectid))
5250 root_key.offset = 0;
5251 else
5252 root_key.offset = (u64)-1;
5253
5254 return btrfs_read_fs_root_no_name(fs_info, &root_key);
5255}
5256
5257static int noinline relocate_one_extent(struct btrfs_root *extent_root,
5258 struct btrfs_path *path,
5259 struct btrfs_key *extent_key,
5260 struct btrfs_block_group_cache *group,
5261 struct inode *reloc_inode, int pass)
5262{
5263 struct btrfs_trans_handle *trans;
5264 struct btrfs_root *found_root;
5265 struct btrfs_ref_path *ref_path = NULL;
5266 struct disk_extent *new_extents = NULL;
5267 int nr_extents = 0;
5268 int loops;
5269 int ret;
5270 int level;
5271 struct btrfs_key first_key;
5272 u64 prev_block = 0;
5273
5274
5275 trans = btrfs_start_transaction(extent_root, 1);
5276 BUG_ON(!trans);
5277
5278 if (extent_key->objectid == 0) {
5279 ret = del_extent_zero(trans, extent_root, path, extent_key);
5280 goto out;
5281 }
5282
5283 ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
5284 if (!ref_path) {
5285 ret = -ENOMEM;
5286 goto out;
5287 }
5288
5289 for (loops = 0; ; loops++) {
5290 if (loops == 0) {
5291 ret = btrfs_first_ref_path(trans, extent_root, ref_path,
5292 extent_key->objectid);
5293 } else {
5294 ret = btrfs_next_ref_path(trans, extent_root, ref_path);
5295 }
5296 if (ret < 0)
5297 goto out;
5298 if (ret > 0)
5299 break;
5300
5301 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
5302 ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
5303 continue;
5304
5305 found_root = read_ref_root(extent_root->fs_info, ref_path);
5306 BUG_ON(!found_root);
5307 /*
5308 * for reference counted tree, only process reference paths
5309 * rooted at the latest committed root.
5310 */
5311 if (found_root->ref_cows &&
5312 ref_path->root_generation != found_root->root_key.offset)
5313 continue;
5314
5315 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5316 if (pass == 0) {
5317 /*
5318 * copy data extents to new locations
5319 */
5320 u64 group_start = group->key.objectid;
5321 ret = relocate_data_extent(reloc_inode,
5322 extent_key,
5323 group_start);
5324 if (ret < 0)
5325 goto out;
5326 break;
5327 }
5328 level = 0;
5329 } else {
5330 level = ref_path->owner_objectid;
5331 }
5332
5333 if (prev_block != ref_path->nodes[level]) {
5334 struct extent_buffer *eb;
5335 u64 block_start = ref_path->nodes[level];
5336 u64 block_size = btrfs_level_size(found_root, level);
5337
5338 eb = read_tree_block(found_root, block_start,
5339 block_size, 0);
5340 btrfs_tree_lock(eb);
5341 BUG_ON(level != btrfs_header_level(eb));
5342
5343 if (level == 0)
5344 btrfs_item_key_to_cpu(eb, &first_key, 0);
5345 else
5346 btrfs_node_key_to_cpu(eb, &first_key, 0);
5347
5348 btrfs_tree_unlock(eb);
5349 free_extent_buffer(eb);
5350 prev_block = block_start;
5351 }
5352
5353 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
5354 pass >= 2) {
5355 /*
5356 * use fallback method to process the remaining
5357 * references.
5358 */
5359 if (!new_extents) {
5360 u64 group_start = group->key.objectid;
5361 new_extents = kmalloc(sizeof(*new_extents),
5362 GFP_NOFS);
5363 nr_extents = 1;
5364 ret = get_new_locations(reloc_inode,
5365 extent_key,
5366 group_start, 1,
5367 &new_extents,
5368 &nr_extents);
5369 if (ret)
5370 goto out;
5371 }
5372 btrfs_record_root_in_trans(found_root);
5373 ret = replace_one_extent(trans, found_root,
5374 path, extent_key,
5375 &first_key, ref_path,
5376 new_extents, nr_extents);
5377 if (ret < 0)
5378 goto out;
5379 continue;
5380 }
5381
5382 btrfs_record_root_in_trans(found_root);
5383 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
5384 ret = relocate_tree_block(trans, found_root, path,
5385 &first_key, ref_path);
5386 } else {
5387 /*
5388 * try to update data extent references while
5389 * keeping metadata shared between snapshots.
5390 */
5391 ret = relocate_one_path(trans, found_root, path,
5392 &first_key, ref_path,
5393 group, reloc_inode);
5394 }
5395 if (ret < 0)
5396 goto out;
5397 }
5398 ret = 0;
5399out:
5400 btrfs_end_transaction(trans, extent_root);
5401 kfree(new_extents);
5402 kfree(ref_path);
5403 return ret;
5404}
5405
5406static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
5407{
5408 u64 num_devices;
5409 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
5410 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
5411
5412 num_devices = root->fs_info->fs_devices->rw_devices;
5413 if (num_devices == 1) {
5414 stripped |= BTRFS_BLOCK_GROUP_DUP;
5415 stripped = flags & ~stripped;
5416
5417 /* turn raid0 into single device chunks */
5418 if (flags & BTRFS_BLOCK_GROUP_RAID0)
5419 return stripped;
5420
5421 /* turn mirroring into duplication */
5422 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
5423 BTRFS_BLOCK_GROUP_RAID10))
5424 return stripped | BTRFS_BLOCK_GROUP_DUP;
5425 return flags;
5426 } else {
5427 /* they already had raid on here, just return */
5428 if (flags & stripped)
5429 return flags;
5430
5431 stripped |= BTRFS_BLOCK_GROUP_DUP;
5432 stripped = flags & ~stripped;
5433
5434 /* switch duplicated blocks with raid1 */
5435 if (flags & BTRFS_BLOCK_GROUP_DUP)
5436 return stripped | BTRFS_BLOCK_GROUP_RAID1;
5437
5438 /* turn single device chunks into raid0 */
5439 return stripped | BTRFS_BLOCK_GROUP_RAID0;
5440 }
5441 return flags;
5442}
5443
5444static int __alloc_chunk_for_shrink(struct btrfs_root *root,
5445 struct btrfs_block_group_cache *shrink_block_group,
5446 int force)
5447{
5448 struct btrfs_trans_handle *trans;
5449 u64 new_alloc_flags;
5450 u64 calc;
5451
5452 spin_lock(&shrink_block_group->lock);
5453 if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
5454 spin_unlock(&shrink_block_group->lock);
5455
5456 trans = btrfs_start_transaction(root, 1);
5457 spin_lock(&shrink_block_group->lock);
5458
5459 new_alloc_flags = update_block_group_flags(root,
5460 shrink_block_group->flags);
5461 if (new_alloc_flags != shrink_block_group->flags) {
5462 calc =
5463 btrfs_block_group_used(&shrink_block_group->item);
5464 } else {
5465 calc = shrink_block_group->key.offset;
5466 }
5467 spin_unlock(&shrink_block_group->lock);
5468
5469 do_chunk_alloc(trans, root->fs_info->extent_root,
5470 calc + 2 * 1024 * 1024, new_alloc_flags, force);
5471
5472 btrfs_end_transaction(trans, root);
5473 } else
5474 spin_unlock(&shrink_block_group->lock);
5475 return 0;
5476}
5477
5478static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
5479 struct btrfs_root *root,
5480 u64 objectid, u64 size)
5481{
5482 struct btrfs_path *path;
5483 struct btrfs_inode_item *item;
5484 struct extent_buffer *leaf;
5485 int ret;
5486
5487 path = btrfs_alloc_path();
5488 if (!path)
5489 return -ENOMEM;
5490
5491 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
5492 if (ret)
5493 goto out;
5494
5495 leaf = path->nodes[0];
5496 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
5497 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
5498 btrfs_set_inode_generation(leaf, item, 1);
5499 btrfs_set_inode_size(leaf, item, size);
5500 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
5501 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS);
5502 btrfs_mark_buffer_dirty(leaf);
5503 btrfs_release_path(root, path);
5504out:
5505 btrfs_free_path(path);
5506 return ret;
5507}
5508
5509static struct inode noinline *create_reloc_inode(struct btrfs_fs_info *fs_info,
5510 struct btrfs_block_group_cache *group)
5511{
5512 struct inode *inode = NULL;
5513 struct btrfs_trans_handle *trans;
5514 struct btrfs_root *root;
5515 struct btrfs_key root_key;
5516 u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
5517 int err = 0;
5518
5519 root_key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
5520 root_key.type = BTRFS_ROOT_ITEM_KEY;
5521 root_key.offset = (u64)-1;
5522 root = btrfs_read_fs_root_no_name(fs_info, &root_key);
5523 if (IS_ERR(root))
5524 return ERR_CAST(root);
5525
5526 trans = btrfs_start_transaction(root, 1);
5527 BUG_ON(!trans);
5528
5529 err = btrfs_find_free_objectid(trans, root, objectid, &objectid);
5530 if (err)
5531 goto out;
5532
5533 err = __insert_orphan_inode(trans, root, objectid, group->key.offset);
5534 BUG_ON(err);
5535
5536 err = btrfs_insert_file_extent(trans, root, objectid, 0, 0, 0,
5537 group->key.offset, 0, group->key.offset,
5538 0, 0, 0);
5539 BUG_ON(err);
5540
5541 inode = btrfs_iget_locked(root->fs_info->sb, objectid, root);
5542 if (inode->i_state & I_NEW) {
5543 BTRFS_I(inode)->root = root;
5544 BTRFS_I(inode)->location.objectid = objectid;
5545 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
5546 BTRFS_I(inode)->location.offset = 0;
5547 btrfs_read_locked_inode(inode);
5548 unlock_new_inode(inode);
5549 BUG_ON(is_bad_inode(inode));
5550 } else {
5551 BUG_ON(1);
5552 }
5553
5554 err = btrfs_orphan_add(trans, inode);
5555out:
5556 btrfs_end_transaction(trans, root);
5557 if (err) {
5558 if (inode)
5559 iput(inode);
5560 inode = ERR_PTR(err);
5561 }
5562 return inode;
5563}
5564
5565int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start)
5566{
5567 struct btrfs_trans_handle *trans;
5568 struct btrfs_path *path;
5569 struct btrfs_fs_info *info = root->fs_info;
5570 struct extent_buffer *leaf;
5571 struct inode *reloc_inode;
5572 struct btrfs_block_group_cache *block_group;
5573 struct btrfs_key key;
5574 u64 skipped;
5575 u64 cur_byte;
5576 u64 total_found;
5577 u32 nritems;
5578 int ret;
5579 int progress;
5580 int pass = 0;
5581
5582 root = root->fs_info->extent_root;
5583
5584 block_group = btrfs_lookup_block_group(info, group_start);
5585 BUG_ON(!block_group);
5586
5587 printk("btrfs relocating block group %llu flags %llu\n",
5588 (unsigned long long)block_group->key.objectid,
5589 (unsigned long long)block_group->flags);
5590
5591 path = btrfs_alloc_path();
5592 BUG_ON(!path);
5593
5594 reloc_inode = create_reloc_inode(info, block_group);
5595 BUG_ON(IS_ERR(reloc_inode));
5596
5597 __alloc_chunk_for_shrink(root, block_group, 1);
5598 set_block_group_readonly(block_group);
5599
5600 btrfs_start_delalloc_inodes(info->tree_root);
5601 btrfs_wait_ordered_extents(info->tree_root, 0);
5602again:
5603 skipped = 0;
5604 total_found = 0;
5605 progress = 0;
5606 key.objectid = block_group->key.objectid;
5607 key.offset = 0;
5608 key.type = 0;
5609 cur_byte = key.objectid;
5610
5611 trans = btrfs_start_transaction(info->tree_root, 1);
5612 btrfs_commit_transaction(trans, info->tree_root);
5613
5614 mutex_lock(&root->fs_info->cleaner_mutex);
5615 btrfs_clean_old_snapshots(info->tree_root);
5616 btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1);
5617 mutex_unlock(&root->fs_info->cleaner_mutex);
5618
5619 while(1) {
5620 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5621 if (ret < 0)
5622 goto out;
5623next:
5624 leaf = path->nodes[0];
5625 nritems = btrfs_header_nritems(leaf);
5626 if (path->slots[0] >= nritems) {
5627 ret = btrfs_next_leaf(root, path);
5628 if (ret < 0)
5629 goto out;
5630 if (ret == 1) {
5631 ret = 0;
5632 break;
5633 }
5634 leaf = path->nodes[0];
5635 nritems = btrfs_header_nritems(leaf);
5636 }
5637
5638 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5639
5640 if (key.objectid >= block_group->key.objectid +
5641 block_group->key.offset)
5642 break;
5643
5644 if (progress && need_resched()) {
5645 btrfs_release_path(root, path);
5646 cond_resched();
5647 progress = 0;
5648 continue;
5649 }
5650 progress = 1;
5651
5652 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY ||
5653 key.objectid + key.offset <= cur_byte) {
5654 path->slots[0]++;
5655 goto next;
5656 }
5657
5658 total_found++;
5659 cur_byte = key.objectid + key.offset;
5660 btrfs_release_path(root, path);
5661
5662 __alloc_chunk_for_shrink(root, block_group, 0);
5663 ret = relocate_one_extent(root, path, &key, block_group,
5664 reloc_inode, pass);
5665 BUG_ON(ret < 0);
5666 if (ret > 0)
5667 skipped++;
5668
5669 key.objectid = cur_byte;
5670 key.type = 0;
5671 key.offset = 0;
5672 }
5673
5674 btrfs_release_path(root, path);
5675
5676 if (pass == 0) {
5677 btrfs_wait_ordered_range(reloc_inode, 0, (u64)-1);
5678 invalidate_mapping_pages(reloc_inode->i_mapping, 0, -1);
5679 WARN_ON(reloc_inode->i_mapping->nrpages);
5680 }
5681
5682 if (total_found > 0) {
5683 printk("btrfs found %llu extents in pass %d\n",
5684 (unsigned long long)total_found, pass);
5685 pass++;
5686 if (total_found == skipped && pass > 2) {
5687 iput(reloc_inode);
5688 reloc_inode = create_reloc_inode(info, block_group);
5689 pass = 0;
5690 }
5691 goto again;
5692 }
5693
5694 /* delete reloc_inode */
5695 iput(reloc_inode);
5696
5697 /* unpin extents in this range */
5698 trans = btrfs_start_transaction(info->tree_root, 1);
5699 btrfs_commit_transaction(trans, info->tree_root);
5700
5701 spin_lock(&block_group->lock);
5702 WARN_ON(block_group->pinned > 0);
5703 WARN_ON(block_group->reserved > 0);
5704 WARN_ON(btrfs_block_group_used(&block_group->item) > 0);
5705 spin_unlock(&block_group->lock);
5706 ret = 0;
5707out:
5708 btrfs_free_path(path);
5709 return ret;
5710}
5711
5712static int find_first_block_group(struct btrfs_root *root,
5713 struct btrfs_path *path, struct btrfs_key *key)
5714{
5715 int ret = 0;
5716 struct btrfs_key found_key;
5717 struct extent_buffer *leaf;
5718 int slot;
5719
5720 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
5721 if (ret < 0)
5722 goto out;
5723
5724 while(1) {
5725 slot = path->slots[0];
5726 leaf = path->nodes[0];
5727 if (slot >= btrfs_header_nritems(leaf)) {
5728 ret = btrfs_next_leaf(root, path);
5729 if (ret == 0)
5730 continue;
5731 if (ret < 0)
5732 goto out;
5733 break;
5734 }
5735 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5736
5737 if (found_key.objectid >= key->objectid &&
5738 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
5739 ret = 0;
5740 goto out;
5741 }
5742 path->slots[0]++;
5743 }
5744 ret = -ENOENT;
5745out:
5746 return ret;
5747}
5748
5749int btrfs_free_block_groups(struct btrfs_fs_info *info)
5750{
5751 struct btrfs_block_group_cache *block_group;
5752 struct rb_node *n;
5753
5754 spin_lock(&info->block_group_cache_lock);
5755 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
5756 block_group = rb_entry(n, struct btrfs_block_group_cache,
5757 cache_node);
5758 rb_erase(&block_group->cache_node,
5759 &info->block_group_cache_tree);
5760 spin_unlock(&info->block_group_cache_lock);
5761
5762 btrfs_remove_free_space_cache(block_group);
5763 down_write(&block_group->space_info->groups_sem);
5764 list_del(&block_group->list);
5765 up_write(&block_group->space_info->groups_sem);
5766 kfree(block_group);
5767
5768 spin_lock(&info->block_group_cache_lock);
5769 }
5770 spin_unlock(&info->block_group_cache_lock);
5771 return 0;
5772}
5773
5774int btrfs_read_block_groups(struct btrfs_root *root)
5775{
5776 struct btrfs_path *path;
5777 int ret;
5778 struct btrfs_block_group_cache *cache;
5779 struct btrfs_fs_info *info = root->fs_info;
5780 struct btrfs_space_info *space_info;
5781 struct btrfs_key key;
5782 struct btrfs_key found_key;
5783 struct extent_buffer *leaf;
5784
5785 root = info->extent_root;
5786 key.objectid = 0;
5787 key.offset = 0;
5788 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
5789 path = btrfs_alloc_path();
5790 if (!path)
5791 return -ENOMEM;
5792
5793 while(1) {
5794 ret = find_first_block_group(root, path, &key);
5795 if (ret > 0) {
5796 ret = 0;
5797 goto error;
5798 }
5799 if (ret != 0)
5800 goto error;
5801
5802 leaf = path->nodes[0];
5803 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5804 cache = kzalloc(sizeof(*cache), GFP_NOFS);
5805 if (!cache) {
5806 ret = -ENOMEM;
5807 break;
5808 }
5809
5810 spin_lock_init(&cache->lock);
5811 mutex_init(&cache->alloc_mutex);
5812 mutex_init(&cache->cache_mutex);
5813 INIT_LIST_HEAD(&cache->list);
5814 read_extent_buffer(leaf, &cache->item,
5815 btrfs_item_ptr_offset(leaf, path->slots[0]),
5816 sizeof(cache->item));
5817 memcpy(&cache->key, &found_key, sizeof(found_key));
5818
5819 key.objectid = found_key.objectid + found_key.offset;
5820 btrfs_release_path(root, path);
5821 cache->flags = btrfs_block_group_flags(&cache->item);
5822
5823 ret = update_space_info(info, cache->flags, found_key.offset,
5824 btrfs_block_group_used(&cache->item),
5825 &space_info);
5826 BUG_ON(ret);
5827 cache->space_info = space_info;
5828 down_write(&space_info->groups_sem);
5829 list_add_tail(&cache->list, &space_info->block_groups);
5830 up_write(&space_info->groups_sem);
5831
5832 ret = btrfs_add_block_group_cache(root->fs_info, cache);
5833 BUG_ON(ret);
5834
5835 set_avail_alloc_bits(root->fs_info, cache->flags);
5836 if (btrfs_chunk_readonly(root, cache->key.objectid))
5837 set_block_group_readonly(cache);
5838 }
5839 ret = 0;
5840error:
5841 btrfs_free_path(path);
5842 return ret;
5843}
5844
5845int btrfs_make_block_group(struct btrfs_trans_handle *trans,
5846 struct btrfs_root *root, u64 bytes_used,
5847 u64 type, u64 chunk_objectid, u64 chunk_offset,
5848 u64 size)
5849{
5850 int ret;
5851 struct btrfs_root *extent_root;
5852 struct btrfs_block_group_cache *cache;
5853
5854 extent_root = root->fs_info->extent_root;
5855
5856 root->fs_info->last_trans_new_blockgroup = trans->transid;
5857
5858 cache = kzalloc(sizeof(*cache), GFP_NOFS);
5859 if (!cache)
5860 return -ENOMEM;
5861
5862 cache->key.objectid = chunk_offset;
5863 cache->key.offset = size;
5864 spin_lock_init(&cache->lock);
5865 mutex_init(&cache->alloc_mutex);
5866 mutex_init(&cache->cache_mutex);
5867 INIT_LIST_HEAD(&cache->list);
5868 btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
5869
5870 btrfs_set_block_group_used(&cache->item, bytes_used);
5871 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
5872 cache->flags = type;
5873 btrfs_set_block_group_flags(&cache->item, type);
5874
5875 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
5876 &cache->space_info);
5877 BUG_ON(ret);
5878 down_write(&cache->space_info->groups_sem);
5879 list_add_tail(&cache->list, &cache->space_info->block_groups);
5880 up_write(&cache->space_info->groups_sem);
5881
5882 ret = btrfs_add_block_group_cache(root->fs_info, cache);
5883 BUG_ON(ret);
5884
5885 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
5886 sizeof(cache->item));
5887 BUG_ON(ret);
5888
5889 finish_current_insert(trans, extent_root, 0);
5890 ret = del_pending_extents(trans, extent_root, 0);
5891 BUG_ON(ret);
5892 set_avail_alloc_bits(extent_root->fs_info, type);
5893
5894 return 0;
5895}
5896
5897int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
5898 struct btrfs_root *root, u64 group_start)
5899{
5900 struct btrfs_path *path;
5901 struct btrfs_block_group_cache *block_group;
5902 struct btrfs_key key;
5903 int ret;
5904
5905 root = root->fs_info->extent_root;
5906
5907 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
5908 BUG_ON(!block_group);
5909 BUG_ON(!block_group->ro);
5910
5911 memcpy(&key, &block_group->key, sizeof(key));
5912
5913 path = btrfs_alloc_path();
5914 BUG_ON(!path);
5915
5916 btrfs_remove_free_space_cache(block_group);
5917 rb_erase(&block_group->cache_node,
5918 &root->fs_info->block_group_cache_tree);
5919 down_write(&block_group->space_info->groups_sem);
5920 list_del(&block_group->list);
5921 up_write(&block_group->space_info->groups_sem);
5922
5923 spin_lock(&block_group->space_info->lock);
5924 block_group->space_info->total_bytes -= block_group->key.offset;
5925 block_group->space_info->bytes_readonly -= block_group->key.offset;
5926 spin_unlock(&block_group->space_info->lock);
5927 block_group->space_info->full = 0;
5928
5929 /*
5930 memset(shrink_block_group, 0, sizeof(*shrink_block_group));
5931 kfree(shrink_block_group);
5932 */
5933
5934 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
5935 if (ret > 0)
5936 ret = -EIO;
5937 if (ret < 0)
5938 goto out;
5939
5940 ret = btrfs_del_item(trans, root, path);
5941out:
5942 btrfs_free_path(path);
5943 return ret;
5944}