2 * Copyright (C) 2008 Red Hat. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/math64.h>
23 #include <linux/ratelimit.h>
25 #include "free-space-cache.h"
26 #include "transaction.h"
28 #include "extent_io.h"
29 #include "inode-map.h"
31 #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
32 #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
34 static int link_free_space(struct btrfs_free_space_ctl *ctl,
35 struct btrfs_free_space *info);
37 static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
38 struct btrfs_path *path,
42 struct btrfs_key location;
43 struct btrfs_disk_key disk_key;
44 struct btrfs_free_space_header *header;
45 struct extent_buffer *leaf;
46 struct inode *inode = NULL;
49 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
53 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
57 btrfs_release_path(path);
58 return ERR_PTR(-ENOENT);
61 leaf = path->nodes[0];
62 header = btrfs_item_ptr(leaf, path->slots[0],
63 struct btrfs_free_space_header);
64 btrfs_free_space_key(leaf, header, &disk_key);
65 btrfs_disk_key_to_cpu(&location, &disk_key);
66 btrfs_release_path(path);
68 inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
70 return ERR_PTR(-ENOENT);
73 if (is_bad_inode(inode)) {
75 return ERR_PTR(-ENOENT);
78 inode->i_mapping->flags &= ~__GFP_FS;
83 struct inode *lookup_free_space_inode(struct btrfs_root *root,
84 struct btrfs_block_group_cache
85 *block_group, struct btrfs_path *path)
87 struct inode *inode = NULL;
89 spin_lock(&block_group->lock);
90 if (block_group->inode)
91 inode = igrab(block_group->inode);
92 spin_unlock(&block_group->lock);
96 inode = __lookup_free_space_inode(root, path,
97 block_group->key.objectid);
101 spin_lock(&block_group->lock);
102 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) {
103 printk(KERN_INFO "Old style space inode found, converting.\n");
104 BTRFS_I(inode)->flags &= ~BTRFS_INODE_NODATASUM;
105 block_group->disk_cache_state = BTRFS_DC_CLEAR;
108 if (!block_group->iref) {
109 block_group->inode = igrab(inode);
110 block_group->iref = 1;
112 spin_unlock(&block_group->lock);
117 int __create_free_space_inode(struct btrfs_root *root,
118 struct btrfs_trans_handle *trans,
119 struct btrfs_path *path, u64 ino, u64 offset)
121 struct btrfs_key key;
122 struct btrfs_disk_key disk_key;
123 struct btrfs_free_space_header *header;
124 struct btrfs_inode_item *inode_item;
125 struct extent_buffer *leaf;
128 ret = btrfs_insert_empty_inode(trans, root, path, ino);
132 leaf = path->nodes[0];
133 inode_item = btrfs_item_ptr(leaf, path->slots[0],
134 struct btrfs_inode_item);
135 btrfs_item_key(leaf, &disk_key, path->slots[0]);
136 memset_extent_buffer(leaf, 0, (unsigned long)inode_item,
137 sizeof(*inode_item));
138 btrfs_set_inode_generation(leaf, inode_item, trans->transid);
139 btrfs_set_inode_size(leaf, inode_item, 0);
140 btrfs_set_inode_nbytes(leaf, inode_item, 0);
141 btrfs_set_inode_uid(leaf, inode_item, 0);
142 btrfs_set_inode_gid(leaf, inode_item, 0);
143 btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
144 btrfs_set_inode_flags(leaf, inode_item, BTRFS_INODE_NOCOMPRESS |
145 BTRFS_INODE_PREALLOC);
146 btrfs_set_inode_nlink(leaf, inode_item, 1);
147 btrfs_set_inode_transid(leaf, inode_item, trans->transid);
148 btrfs_set_inode_block_group(leaf, inode_item, offset);
149 btrfs_mark_buffer_dirty(leaf);
150 btrfs_release_path(path);
152 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
156 ret = btrfs_insert_empty_item(trans, root, path, &key,
157 sizeof(struct btrfs_free_space_header));
159 btrfs_release_path(path);
162 leaf = path->nodes[0];
163 header = btrfs_item_ptr(leaf, path->slots[0],
164 struct btrfs_free_space_header);
165 memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
166 btrfs_set_free_space_key(leaf, header, &disk_key);
167 btrfs_mark_buffer_dirty(leaf);
168 btrfs_release_path(path);
173 int create_free_space_inode(struct btrfs_root *root,
174 struct btrfs_trans_handle *trans,
175 struct btrfs_block_group_cache *block_group,
176 struct btrfs_path *path)
181 ret = btrfs_find_free_objectid(root, &ino);
185 return __create_free_space_inode(root, trans, path, ino,
186 block_group->key.objectid);
189 int btrfs_truncate_free_space_cache(struct btrfs_root *root,
190 struct btrfs_trans_handle *trans,
191 struct btrfs_path *path,
194 struct btrfs_block_rsv *rsv;
198 rsv = trans->block_rsv;
199 trans->block_rsv = root->orphan_block_rsv;
200 ret = btrfs_block_rsv_check(root, root->orphan_block_rsv, 0, 5, 0);
204 oldsize = i_size_read(inode);
205 btrfs_i_size_write(inode, 0);
206 truncate_pagecache(inode, oldsize, 0);
209 * We don't need an orphan item because truncating the free space cache
210 * will never be split across transactions.
212 ret = btrfs_truncate_inode_items(trans, root, inode,
213 0, BTRFS_EXTENT_DATA_KEY);
215 trans->block_rsv = rsv;
221 ret = btrfs_update_inode(trans, root, inode);
225 static int readahead_cache(struct inode *inode)
227 struct file_ra_state *ra;
228 unsigned long last_index;
230 ra = kzalloc(sizeof(*ra), GFP_NOFS);
234 file_ra_state_init(ra, inode->i_mapping);
235 last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
237 page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
244 int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
245 struct btrfs_free_space_ctl *ctl,
246 struct btrfs_path *path, u64 offset)
248 struct btrfs_free_space_header *header;
249 struct extent_buffer *leaf;
251 struct btrfs_key key;
252 struct list_head bitmaps;
257 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
260 INIT_LIST_HEAD(&bitmaps);
262 /* Nothing in the space cache, goodbye */
263 if (!i_size_read(inode))
266 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
270 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
274 btrfs_release_path(path);
281 leaf = path->nodes[0];
282 header = btrfs_item_ptr(leaf, path->slots[0],
283 struct btrfs_free_space_header);
284 num_entries = btrfs_free_space_entries(leaf, header);
285 num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
286 generation = btrfs_free_space_generation(leaf, header);
287 btrfs_release_path(path);
289 if (BTRFS_I(inode)->generation != generation) {
290 printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
291 " not match free space cache generation (%llu)\n",
292 (unsigned long long)BTRFS_I(inode)->generation,
293 (unsigned long long)generation);
300 ret = readahead_cache(inode);
305 struct btrfs_free_space_entry *entry;
306 struct btrfs_free_space *e;
308 unsigned long offset = 0;
311 if (!num_entries && !num_bitmaps)
314 page = find_or_create_page(inode->i_mapping, index, mask);
318 if (!PageUptodate(page)) {
319 btrfs_readpage(NULL, page);
321 if (!PageUptodate(page)) {
323 page_cache_release(page);
324 printk(KERN_ERR "btrfs: error reading free "
335 * We put a bogus crc in the front of the first page in
336 * case old kernels try to mount a fs with the new
337 * format to make sure they discard the cache.
340 offset += sizeof(u64);
343 if (*gen != BTRFS_I(inode)->generation) {
344 printk_ratelimited(KERN_ERR "btrfs: space cache"
345 " generation (%llu) does not match "
347 (unsigned long long)*gen,
349 BTRFS_I(inode)->generation);
352 page_cache_release(page);
356 offset += sizeof(u64);
365 e = kmem_cache_zalloc(btrfs_free_space_cachep,
370 page_cache_release(page);
374 e->offset = le64_to_cpu(entry->offset);
375 e->bytes = le64_to_cpu(entry->bytes);
378 kmem_cache_free(btrfs_free_space_cachep, e);
380 page_cache_release(page);
384 if (entry->type == BTRFS_FREE_SPACE_EXTENT) {
385 spin_lock(&ctl->tree_lock);
386 ret = link_free_space(ctl, e);
387 spin_unlock(&ctl->tree_lock);
389 printk(KERN_ERR "Duplicate entries in "
390 "free space cache, dumping\n");
393 page_cache_release(page);
397 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
401 btrfs_free_space_cachep, e);
403 page_cache_release(page);
406 spin_lock(&ctl->tree_lock);
407 ret = link_free_space(ctl, e);
408 ctl->total_bitmaps++;
409 ctl->op->recalc_thresholds(ctl);
410 spin_unlock(&ctl->tree_lock);
412 printk(KERN_ERR "Duplicate entries in "
413 "free space cache, dumping\n");
416 page_cache_release(page);
419 list_add_tail(&e->list, &bitmaps);
423 offset += sizeof(struct btrfs_free_space_entry);
424 if (offset + sizeof(struct btrfs_free_space_entry) >=
431 * We read an entry out of this page, we need to move on to the
440 * We add the bitmaps at the end of the entries in order that
441 * the bitmap entries are added to the cache.
443 e = list_entry(bitmaps.next, struct btrfs_free_space, list);
444 list_del_init(&e->list);
445 memcpy(e->bitmap, addr, PAGE_CACHE_SIZE);
450 page_cache_release(page);
458 __btrfs_remove_free_space_cache(ctl);
462 int load_free_space_cache(struct btrfs_fs_info *fs_info,
463 struct btrfs_block_group_cache *block_group)
465 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
466 struct btrfs_root *root = fs_info->tree_root;
468 struct btrfs_path *path;
471 u64 used = btrfs_block_group_used(&block_group->item);
474 * If we're unmounting then just return, since this does a search on the
475 * normal root and not the commit root and we could deadlock.
477 if (btrfs_fs_closing(fs_info))
481 * If this block group has been marked to be cleared for one reason or
482 * another then we can't trust the on disk cache, so just return.
484 spin_lock(&block_group->lock);
485 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
486 spin_unlock(&block_group->lock);
489 spin_unlock(&block_group->lock);
491 path = btrfs_alloc_path();
495 inode = lookup_free_space_inode(root, block_group, path);
497 btrfs_free_path(path);
501 ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
502 path, block_group->key.objectid);
503 btrfs_free_path(path);
507 spin_lock(&ctl->tree_lock);
508 matched = (ctl->free_space == (block_group->key.offset - used -
509 block_group->bytes_super));
510 spin_unlock(&ctl->tree_lock);
513 __btrfs_remove_free_space_cache(ctl);
514 printk(KERN_ERR "block group %llu has an wrong amount of free "
515 "space\n", block_group->key.objectid);
520 /* This cache is bogus, make sure it gets cleared */
521 spin_lock(&block_group->lock);
522 block_group->disk_cache_state = BTRFS_DC_CLEAR;
523 spin_unlock(&block_group->lock);
526 printk(KERN_ERR "btrfs: failed to load free space cache "
527 "for block group %llu\n", block_group->key.objectid);
535 * __btrfs_write_out_cache - write out cached info to an inode
536 * @root - the root the inode belongs to
537 * @ctl - the free space cache we are going to write out
538 * @block_group - the block_group for this cache if it belongs to a block_group
539 * @trans - the trans handle
540 * @path - the path to use
541 * @offset - the offset for the key we'll insert
543 * This function writes out a free space cache struct to disk for quick recovery
544 * on mount. This will return 0 if it was successfull in writing the cache out,
545 * and -1 if it was not.
547 int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
548 struct btrfs_free_space_ctl *ctl,
549 struct btrfs_block_group_cache *block_group,
550 struct btrfs_trans_handle *trans,
551 struct btrfs_path *path, u64 offset)
553 struct btrfs_free_space_header *header;
554 struct extent_buffer *leaf;
555 struct rb_node *node;
556 struct list_head *pos, *n;
559 struct extent_state *cached_state = NULL;
560 struct btrfs_free_cluster *cluster = NULL;
561 struct extent_io_tree *unpin = NULL;
562 struct list_head bitmap_list;
563 struct btrfs_key key;
567 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
568 int index = 0, num_pages = 0;
573 bool next_page = false;
574 bool out_of_space = false;
576 INIT_LIST_HEAD(&bitmap_list);
578 if (!i_size_read(inode))
581 num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
584 filemap_write_and_wait(inode->i_mapping);
585 btrfs_wait_ordered_range(inode, inode->i_size &
586 ~(root->sectorsize - 1), (u64)-1);
588 pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
592 /* Get the cluster for this block_group if it exists */
593 if (block_group && !list_empty(&block_group->cluster_list))
594 cluster = list_entry(block_group->cluster_list.next,
595 struct btrfs_free_cluster,
599 * We shouldn't have switched the pinned extents yet so this is the
602 unpin = root->fs_info->pinned_extents;
605 * Lock all pages first so we can lock the extent safely.
607 * NOTE: Because we hold the ref the entire time we're going to write to
608 * the page find_get_page should never fail, so we don't do a check
609 * after find_get_page at this point. Just putting this here so people
610 * know and don't freak out.
612 while (index < num_pages) {
613 page = find_or_create_page(inode->i_mapping, index, mask);
617 for (i = 0; i < num_pages; i++) {
618 unlock_page(pages[i]);
619 page_cache_release(pages[i]);
628 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
629 0, &cached_state, GFP_NOFS);
632 * When searching for pinned extents, we need to start at our start
636 start = block_group->key.objectid;
638 node = rb_first(&ctl->free_space_offset);
639 if (!node && cluster) {
640 node = rb_first(&cluster->root);
644 /* Write out the extent entries */
646 struct btrfs_free_space_entry *entry;
648 unsigned long offset = 0;
652 if (index >= num_pages) {
659 orig = addr = kmap(page);
664 * We're going to put in a bogus crc for this page to
665 * make sure that old kernels who aren't aware of this
666 * format will be sure to discard the cache.
669 offset += sizeof(u64);
672 *gen = trans->transid;
674 offset += sizeof(u64);
678 memset(addr, 0, PAGE_CACHE_SIZE - offset);
679 while (node && !next_page) {
680 struct btrfs_free_space *e;
682 e = rb_entry(node, struct btrfs_free_space, offset_index);
685 entry->offset = cpu_to_le64(e->offset);
686 entry->bytes = cpu_to_le64(e->bytes);
688 entry->type = BTRFS_FREE_SPACE_BITMAP;
689 list_add_tail(&e->list, &bitmap_list);
692 entry->type = BTRFS_FREE_SPACE_EXTENT;
694 node = rb_next(node);
695 if (!node && cluster) {
696 node = rb_first(&cluster->root);
699 offset += sizeof(struct btrfs_free_space_entry);
700 if (offset + sizeof(struct btrfs_free_space_entry) >=
707 * We want to add any pinned extents to our free space cache
708 * so we don't leak the space
710 while (block_group && !next_page &&
711 (start < block_group->key.objectid +
712 block_group->key.offset)) {
713 ret = find_first_extent_bit(unpin, start, &start, &end,
720 /* This pinned extent is out of our range */
721 if (start >= block_group->key.objectid +
722 block_group->key.offset)
725 len = block_group->key.objectid +
726 block_group->key.offset - start;
727 len = min(len, end + 1 - start);
730 entry->offset = cpu_to_le64(start);
731 entry->bytes = cpu_to_le64(len);
732 entry->type = BTRFS_FREE_SPACE_EXTENT;
735 offset += sizeof(struct btrfs_free_space_entry);
736 if (offset + sizeof(struct btrfs_free_space_entry) >=
742 /* Generate bogus crc value */
745 crc = btrfs_csum_data(root, orig + sizeof(u64), crc,
746 PAGE_CACHE_SIZE - sizeof(u64));
747 btrfs_csum_final(crc, (char *)&crc);
755 bytes += PAGE_CACHE_SIZE;
758 } while (node || next_page);
760 /* Write out the bitmaps */
761 list_for_each_safe(pos, n, &bitmap_list) {
763 struct btrfs_free_space *entry =
764 list_entry(pos, struct btrfs_free_space, list);
766 if (index >= num_pages) {
773 memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE);
775 bytes += PAGE_CACHE_SIZE;
777 list_del_init(&entry->list);
782 btrfs_drop_pages(pages, num_pages);
783 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
784 i_size_read(inode) - 1, &cached_state,
789 /* Zero out the rest of the pages just to make sure */
790 while (index < num_pages) {
795 memset(addr, 0, PAGE_CACHE_SIZE);
797 bytes += PAGE_CACHE_SIZE;
801 ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0,
802 bytes, &cached_state);
803 btrfs_drop_pages(pages, num_pages);
804 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
805 i_size_read(inode) - 1, &cached_state, GFP_NOFS);
810 BTRFS_I(inode)->generation = trans->transid;
812 filemap_write_and_wait(inode->i_mapping);
814 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
818 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
820 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
821 EXTENT_DIRTY | EXTENT_DELALLOC |
822 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
825 leaf = path->nodes[0];
827 struct btrfs_key found_key;
828 BUG_ON(!path->slots[0]);
830 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
831 if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
832 found_key.offset != offset) {
833 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
834 EXTENT_DIRTY | EXTENT_DELALLOC |
835 EXTENT_DO_ACCOUNTING, 0, 0, NULL,
837 btrfs_release_path(path);
841 header = btrfs_item_ptr(leaf, path->slots[0],
842 struct btrfs_free_space_header);
843 btrfs_set_free_space_entries(leaf, header, entries);
844 btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
845 btrfs_set_free_space_generation(leaf, header, trans->transid);
846 btrfs_mark_buffer_dirty(leaf);
847 btrfs_release_path(path);
853 invalidate_inode_pages2_range(inode->i_mapping, 0, index);
854 BTRFS_I(inode)->generation = 0;
856 btrfs_update_inode(trans, root, inode);
860 int btrfs_write_out_cache(struct btrfs_root *root,
861 struct btrfs_trans_handle *trans,
862 struct btrfs_block_group_cache *block_group,
863 struct btrfs_path *path)
865 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
869 root = root->fs_info->tree_root;
871 spin_lock(&block_group->lock);
872 if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
873 spin_unlock(&block_group->lock);
876 spin_unlock(&block_group->lock);
878 inode = lookup_free_space_inode(root, block_group, path);
882 ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
883 path, block_group->key.objectid);
885 btrfs_delalloc_release_metadata(inode, inode->i_size);
886 spin_lock(&block_group->lock);
887 block_group->disk_cache_state = BTRFS_DC_ERROR;
888 spin_unlock(&block_group->lock);
891 printk(KERN_ERR "btrfs: failed to write free space cace "
892 "for block group %llu\n", block_group->key.objectid);
900 static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
903 BUG_ON(offset < bitmap_start);
904 offset -= bitmap_start;
905 return (unsigned long)(div_u64(offset, unit));
908 static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
910 return (unsigned long)(div_u64(bytes, unit));
913 static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
917 u64 bytes_per_bitmap;
919 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
920 bitmap_start = offset - ctl->start;
921 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
922 bitmap_start *= bytes_per_bitmap;
923 bitmap_start += ctl->start;
928 static int tree_insert_offset(struct rb_root *root, u64 offset,
929 struct rb_node *node, int bitmap)
931 struct rb_node **p = &root->rb_node;
932 struct rb_node *parent = NULL;
933 struct btrfs_free_space *info;
937 info = rb_entry(parent, struct btrfs_free_space, offset_index);
939 if (offset < info->offset) {
941 } else if (offset > info->offset) {
945 * we could have a bitmap entry and an extent entry
946 * share the same offset. If this is the case, we want
947 * the extent entry to always be found first if we do a
948 * linear search through the tree, since we want to have
949 * the quickest allocation time, and allocating from an
950 * extent is faster than allocating from a bitmap. So
951 * if we're inserting a bitmap and we find an entry at
952 * this offset, we want to go right, or after this entry
953 * logically. If we are inserting an extent and we've
954 * found a bitmap, we want to go left, or before
973 rb_link_node(node, parent, p);
974 rb_insert_color(node, root);
980 * searches the tree for the given offset.
982 * fuzzy - If this is set, then we are trying to make an allocation, and we just
983 * want a section that has at least bytes size and comes at or after the given
986 static struct btrfs_free_space *
987 tree_search_offset(struct btrfs_free_space_ctl *ctl,
988 u64 offset, int bitmap_only, int fuzzy)
990 struct rb_node *n = ctl->free_space_offset.rb_node;
991 struct btrfs_free_space *entry, *prev = NULL;
993 /* find entry that is closest to the 'offset' */
1000 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1003 if (offset < entry->offset)
1005 else if (offset > entry->offset)
1018 * bitmap entry and extent entry may share same offset,
1019 * in that case, bitmap entry comes after extent entry.
1024 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1025 if (entry->offset != offset)
1028 WARN_ON(!entry->bitmap);
1031 if (entry->bitmap) {
1033 * if previous extent entry covers the offset,
1034 * we should return it instead of the bitmap entry
1036 n = &entry->offset_index;
1041 prev = rb_entry(n, struct btrfs_free_space,
1043 if (!prev->bitmap) {
1044 if (prev->offset + prev->bytes > offset)
1056 /* find last entry before the 'offset' */
1058 if (entry->offset > offset) {
1059 n = rb_prev(&entry->offset_index);
1061 entry = rb_entry(n, struct btrfs_free_space,
1063 BUG_ON(entry->offset > offset);
1072 if (entry->bitmap) {
1073 n = &entry->offset_index;
1078 prev = rb_entry(n, struct btrfs_free_space,
1080 if (!prev->bitmap) {
1081 if (prev->offset + prev->bytes > offset)
1086 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1088 } else if (entry->offset + entry->bytes > offset)
1095 if (entry->bitmap) {
1096 if (entry->offset + BITS_PER_BITMAP *
1100 if (entry->offset + entry->bytes > offset)
1104 n = rb_next(&entry->offset_index);
1107 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1113 __unlink_free_space(struct btrfs_free_space_ctl *ctl,
1114 struct btrfs_free_space *info)
1116 rb_erase(&info->offset_index, &ctl->free_space_offset);
1117 ctl->free_extents--;
1120 static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1121 struct btrfs_free_space *info)
1123 __unlink_free_space(ctl, info);
1124 ctl->free_space -= info->bytes;
1127 static int link_free_space(struct btrfs_free_space_ctl *ctl,
1128 struct btrfs_free_space *info)
1132 BUG_ON(!info->bitmap && !info->bytes);
1133 ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1134 &info->offset_index, (info->bitmap != NULL));
1138 ctl->free_space += info->bytes;
1139 ctl->free_extents++;
1143 static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1145 struct btrfs_block_group_cache *block_group = ctl->private;
1149 u64 size = block_group->key.offset;
1150 u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
1151 int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1153 BUG_ON(ctl->total_bitmaps > max_bitmaps);
1156 * The goal is to keep the total amount of memory used per 1gb of space
1157 * at or below 32k, so we need to adjust how much memory we allow to be
1158 * used by extent based free space tracking
1160 if (size < 1024 * 1024 * 1024)
1161 max_bytes = MAX_CACHE_BYTES_PER_GIG;
1163 max_bytes = MAX_CACHE_BYTES_PER_GIG *
1164 div64_u64(size, 1024 * 1024 * 1024);
1167 * we want to account for 1 more bitmap than what we have so we can make
1168 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1169 * we add more bitmaps.
1171 bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
1173 if (bitmap_bytes >= max_bytes) {
1174 ctl->extents_thresh = 0;
1179 * we want the extent entry threshold to always be at most 1/2 the maxw
1180 * bytes we can have, or whatever is less than that.
1182 extent_bytes = max_bytes - bitmap_bytes;
1183 extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
1185 ctl->extents_thresh =
1186 div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
1189 static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1190 struct btrfs_free_space *info,
1191 u64 offset, u64 bytes)
1193 unsigned long start, count;
1195 start = offset_to_bit(info->offset, ctl->unit, offset);
1196 count = bytes_to_bits(bytes, ctl->unit);
1197 BUG_ON(start + count > BITS_PER_BITMAP);
1199 bitmap_clear(info->bitmap, start, count);
1201 info->bytes -= bytes;
1204 static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1205 struct btrfs_free_space *info, u64 offset,
1208 __bitmap_clear_bits(ctl, info, offset, bytes);
1209 ctl->free_space -= bytes;
1212 static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
1213 struct btrfs_free_space *info, u64 offset,
1216 unsigned long start, count;
1218 start = offset_to_bit(info->offset, ctl->unit, offset);
1219 count = bytes_to_bits(bytes, ctl->unit);
1220 BUG_ON(start + count > BITS_PER_BITMAP);
1222 bitmap_set(info->bitmap, start, count);
1224 info->bytes += bytes;
1225 ctl->free_space += bytes;
1228 static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1229 struct btrfs_free_space *bitmap_info, u64 *offset,
1232 unsigned long found_bits = 0;
1233 unsigned long bits, i;
1234 unsigned long next_zero;
1236 i = offset_to_bit(bitmap_info->offset, ctl->unit,
1237 max_t(u64, *offset, bitmap_info->offset));
1238 bits = bytes_to_bits(*bytes, ctl->unit);
1240 for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
1241 i < BITS_PER_BITMAP;
1242 i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) {
1243 next_zero = find_next_zero_bit(bitmap_info->bitmap,
1244 BITS_PER_BITMAP, i);
1245 if ((next_zero - i) >= bits) {
1246 found_bits = next_zero - i;
1253 *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
1254 *bytes = (u64)(found_bits) * ctl->unit;
1261 static struct btrfs_free_space *
1262 find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
1264 struct btrfs_free_space *entry;
1265 struct rb_node *node;
1268 if (!ctl->free_space_offset.rb_node)
1271 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1275 for (node = &entry->offset_index; node; node = rb_next(node)) {
1276 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1277 if (entry->bytes < *bytes)
1280 if (entry->bitmap) {
1281 ret = search_bitmap(ctl, entry, offset, bytes);
1287 *offset = entry->offset;
1288 *bytes = entry->bytes;
1295 static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1296 struct btrfs_free_space *info, u64 offset)
1298 info->offset = offset_to_bitmap(ctl, offset);
1300 link_free_space(ctl, info);
1301 ctl->total_bitmaps++;
1303 ctl->op->recalc_thresholds(ctl);
1306 static void free_bitmap(struct btrfs_free_space_ctl *ctl,
1307 struct btrfs_free_space *bitmap_info)
1309 unlink_free_space(ctl, bitmap_info);
1310 kfree(bitmap_info->bitmap);
1311 kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1312 ctl->total_bitmaps--;
1313 ctl->op->recalc_thresholds(ctl);
1316 static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
1317 struct btrfs_free_space *bitmap_info,
1318 u64 *offset, u64 *bytes)
1321 u64 search_start, search_bytes;
1325 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
1328 * XXX - this can go away after a few releases.
1330 * since the only user of btrfs_remove_free_space is the tree logging
1331 * stuff, and the only way to test that is under crash conditions, we
1332 * want to have this debug stuff here just in case somethings not
1333 * working. Search the bitmap for the space we are trying to use to
1334 * make sure its actually there. If its not there then we need to stop
1335 * because something has gone wrong.
1337 search_start = *offset;
1338 search_bytes = *bytes;
1339 search_bytes = min(search_bytes, end - search_start + 1);
1340 ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
1341 BUG_ON(ret < 0 || search_start != *offset);
1343 if (*offset > bitmap_info->offset && *offset + *bytes > end) {
1344 bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1);
1345 *bytes -= end - *offset + 1;
1347 } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
1348 bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes);
1353 struct rb_node *next = rb_next(&bitmap_info->offset_index);
1354 if (!bitmap_info->bytes)
1355 free_bitmap(ctl, bitmap_info);
1358 * no entry after this bitmap, but we still have bytes to
1359 * remove, so something has gone wrong.
1364 bitmap_info = rb_entry(next, struct btrfs_free_space,
1368 * if the next entry isn't a bitmap we need to return to let the
1369 * extent stuff do its work.
1371 if (!bitmap_info->bitmap)
1375 * Ok the next item is a bitmap, but it may not actually hold
1376 * the information for the rest of this free space stuff, so
1377 * look for it, and if we don't find it return so we can try
1378 * everything over again.
1380 search_start = *offset;
1381 search_bytes = *bytes;
1382 ret = search_bitmap(ctl, bitmap_info, &search_start,
1384 if (ret < 0 || search_start != *offset)
1388 } else if (!bitmap_info->bytes)
1389 free_bitmap(ctl, bitmap_info);
1394 static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
1395 struct btrfs_free_space *info, u64 offset,
1398 u64 bytes_to_set = 0;
1401 end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
1403 bytes_to_set = min(end - offset, bytes);
1405 bitmap_set_bits(ctl, info, offset, bytes_to_set);
1407 return bytes_to_set;
1411 static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1412 struct btrfs_free_space *info)
1414 struct btrfs_block_group_cache *block_group = ctl->private;
1417 * If we are below the extents threshold then we can add this as an
1418 * extent, and don't have to deal with the bitmap
1420 if (ctl->free_extents < ctl->extents_thresh) {
1422 * If this block group has some small extents we don't want to
1423 * use up all of our free slots in the cache with them, we want
1424 * to reserve them to larger extents, however if we have plent
1425 * of cache left then go ahead an dadd them, no sense in adding
1426 * the overhead of a bitmap if we don't have to.
1428 if (info->bytes <= block_group->sectorsize * 4) {
1429 if (ctl->free_extents * 2 <= ctl->extents_thresh)
1437 * some block groups are so tiny they can't be enveloped by a bitmap, so
1438 * don't even bother to create a bitmap for this
1440 if (BITS_PER_BITMAP * block_group->sectorsize >
1441 block_group->key.offset)
1447 static struct btrfs_free_space_op free_space_op = {
1448 .recalc_thresholds = recalculate_thresholds,
1449 .use_bitmap = use_bitmap,
1452 static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
1453 struct btrfs_free_space *info)
1455 struct btrfs_free_space *bitmap_info;
1456 struct btrfs_block_group_cache *block_group = NULL;
1458 u64 bytes, offset, bytes_added;
1461 bytes = info->bytes;
1462 offset = info->offset;
1464 if (!ctl->op->use_bitmap(ctl, info))
1467 if (ctl->op == &free_space_op)
1468 block_group = ctl->private;
1471 * Since we link bitmaps right into the cluster we need to see if we
1472 * have a cluster here, and if so and it has our bitmap we need to add
1473 * the free space to that bitmap.
1475 if (block_group && !list_empty(&block_group->cluster_list)) {
1476 struct btrfs_free_cluster *cluster;
1477 struct rb_node *node;
1478 struct btrfs_free_space *entry;
1480 cluster = list_entry(block_group->cluster_list.next,
1481 struct btrfs_free_cluster,
1483 spin_lock(&cluster->lock);
1484 node = rb_first(&cluster->root);
1486 spin_unlock(&cluster->lock);
1487 goto no_cluster_bitmap;
1490 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1491 if (!entry->bitmap) {
1492 spin_unlock(&cluster->lock);
1493 goto no_cluster_bitmap;
1496 if (entry->offset == offset_to_bitmap(ctl, offset)) {
1497 bytes_added = add_bytes_to_bitmap(ctl, entry,
1499 bytes -= bytes_added;
1500 offset += bytes_added;
1502 spin_unlock(&cluster->lock);
1510 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1517 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
1518 bytes -= bytes_added;
1519 offset += bytes_added;
1529 if (info && info->bitmap) {
1530 add_new_bitmap(ctl, info, offset);
1535 spin_unlock(&ctl->tree_lock);
1537 /* no pre-allocated info, allocate a new one */
1539 info = kmem_cache_zalloc(btrfs_free_space_cachep,
1542 spin_lock(&ctl->tree_lock);
1548 /* allocate the bitmap */
1549 info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
1550 spin_lock(&ctl->tree_lock);
1551 if (!info->bitmap) {
1561 kfree(info->bitmap);
1562 kmem_cache_free(btrfs_free_space_cachep, info);
1568 static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
1569 struct btrfs_free_space *info, bool update_stat)
1571 struct btrfs_free_space *left_info;
1572 struct btrfs_free_space *right_info;
1573 bool merged = false;
1574 u64 offset = info->offset;
1575 u64 bytes = info->bytes;
1578 * first we want to see if there is free space adjacent to the range we
1579 * are adding, if there is remove that struct and add a new one to
1580 * cover the entire range
1582 right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
1583 if (right_info && rb_prev(&right_info->offset_index))
1584 left_info = rb_entry(rb_prev(&right_info->offset_index),
1585 struct btrfs_free_space, offset_index);
1587 left_info = tree_search_offset(ctl, offset - 1, 0, 0);
1589 if (right_info && !right_info->bitmap) {
1591 unlink_free_space(ctl, right_info);
1593 __unlink_free_space(ctl, right_info);
1594 info->bytes += right_info->bytes;
1595 kmem_cache_free(btrfs_free_space_cachep, right_info);
1599 if (left_info && !left_info->bitmap &&
1600 left_info->offset + left_info->bytes == offset) {
1602 unlink_free_space(ctl, left_info);
1604 __unlink_free_space(ctl, left_info);
1605 info->offset = left_info->offset;
1606 info->bytes += left_info->bytes;
1607 kmem_cache_free(btrfs_free_space_cachep, left_info);
1614 int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
1615 u64 offset, u64 bytes)
1617 struct btrfs_free_space *info;
1620 info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
1624 info->offset = offset;
1625 info->bytes = bytes;
1627 spin_lock(&ctl->tree_lock);
1629 if (try_merge_free_space(ctl, info, true))
1633 * There was no extent directly to the left or right of this new
1634 * extent then we know we're going to have to allocate a new extent, so
1635 * before we do that see if we need to drop this into a bitmap
1637 ret = insert_into_bitmap(ctl, info);
1645 ret = link_free_space(ctl, info);
1647 kmem_cache_free(btrfs_free_space_cachep, info);
1649 spin_unlock(&ctl->tree_lock);
1652 printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
1653 BUG_ON(ret == -EEXIST);
1659 int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
1660 u64 offset, u64 bytes)
1662 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1663 struct btrfs_free_space *info;
1664 struct btrfs_free_space *next_info = NULL;
1667 spin_lock(&ctl->tree_lock);
1670 info = tree_search_offset(ctl, offset, 0, 0);
1673 * oops didn't find an extent that matched the space we wanted
1674 * to remove, look for a bitmap instead
1676 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1684 if (info->bytes < bytes && rb_next(&info->offset_index)) {
1686 next_info = rb_entry(rb_next(&info->offset_index),
1687 struct btrfs_free_space,
1690 if (next_info->bitmap)
1691 end = next_info->offset +
1692 BITS_PER_BITMAP * ctl->unit - 1;
1694 end = next_info->offset + next_info->bytes;
1696 if (next_info->bytes < bytes ||
1697 next_info->offset > offset || offset > end) {
1698 printk(KERN_CRIT "Found free space at %llu, size %llu,"
1699 " trying to use %llu\n",
1700 (unsigned long long)info->offset,
1701 (unsigned long long)info->bytes,
1702 (unsigned long long)bytes);
1711 if (info->bytes == bytes) {
1712 unlink_free_space(ctl, info);
1714 kfree(info->bitmap);
1715 ctl->total_bitmaps--;
1717 kmem_cache_free(btrfs_free_space_cachep, info);
1721 if (!info->bitmap && info->offset == offset) {
1722 unlink_free_space(ctl, info);
1723 info->offset += bytes;
1724 info->bytes -= bytes;
1725 link_free_space(ctl, info);
1729 if (!info->bitmap && info->offset <= offset &&
1730 info->offset + info->bytes >= offset + bytes) {
1731 u64 old_start = info->offset;
1733 * we're freeing space in the middle of the info,
1734 * this can happen during tree log replay
1736 * first unlink the old info and then
1737 * insert it again after the hole we're creating
1739 unlink_free_space(ctl, info);
1740 if (offset + bytes < info->offset + info->bytes) {
1741 u64 old_end = info->offset + info->bytes;
1743 info->offset = offset + bytes;
1744 info->bytes = old_end - info->offset;
1745 ret = link_free_space(ctl, info);
1750 /* the hole we're creating ends at the end
1751 * of the info struct, just free the info
1753 kmem_cache_free(btrfs_free_space_cachep, info);
1755 spin_unlock(&ctl->tree_lock);
1757 /* step two, insert a new info struct to cover
1758 * anything before the hole
1760 ret = btrfs_add_free_space(block_group, old_start,
1761 offset - old_start);
1766 ret = remove_from_bitmap(ctl, info, &offset, &bytes);
1771 spin_unlock(&ctl->tree_lock);
1776 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
1779 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1780 struct btrfs_free_space *info;
1784 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
1785 info = rb_entry(n, struct btrfs_free_space, offset_index);
1786 if (info->bytes >= bytes)
1788 printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
1789 (unsigned long long)info->offset,
1790 (unsigned long long)info->bytes,
1791 (info->bitmap) ? "yes" : "no");
1793 printk(KERN_INFO "block group has cluster?: %s\n",
1794 list_empty(&block_group->cluster_list) ? "no" : "yes");
1795 printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
1799 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
1801 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1803 spin_lock_init(&ctl->tree_lock);
1804 ctl->unit = block_group->sectorsize;
1805 ctl->start = block_group->key.objectid;
1806 ctl->private = block_group;
1807 ctl->op = &free_space_op;
1810 * we only want to have 32k of ram per block group for keeping
1811 * track of free space, and if we pass 1/2 of that we want to
1812 * start converting things over to using bitmaps
1814 ctl->extents_thresh = ((1024 * 32) / 2) /
1815 sizeof(struct btrfs_free_space);
1819 * for a given cluster, put all of its extents back into the free
1820 * space cache. If the block group passed doesn't match the block group
1821 * pointed to by the cluster, someone else raced in and freed the
1822 * cluster already. In that case, we just return without changing anything
1825 __btrfs_return_cluster_to_free_space(
1826 struct btrfs_block_group_cache *block_group,
1827 struct btrfs_free_cluster *cluster)
1829 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1830 struct btrfs_free_space *entry;
1831 struct rb_node *node;
1833 spin_lock(&cluster->lock);
1834 if (cluster->block_group != block_group)
1837 cluster->block_group = NULL;
1838 cluster->window_start = 0;
1839 list_del_init(&cluster->block_group_list);
1841 node = rb_first(&cluster->root);
1845 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1846 node = rb_next(&entry->offset_index);
1847 rb_erase(&entry->offset_index, &cluster->root);
1849 bitmap = (entry->bitmap != NULL);
1851 try_merge_free_space(ctl, entry, false);
1852 tree_insert_offset(&ctl->free_space_offset,
1853 entry->offset, &entry->offset_index, bitmap);
1855 cluster->root = RB_ROOT;
1858 spin_unlock(&cluster->lock);
1859 btrfs_put_block_group(block_group);
1863 void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
1865 struct btrfs_free_space *info;
1866 struct rb_node *node;
1868 while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
1869 info = rb_entry(node, struct btrfs_free_space, offset_index);
1870 if (!info->bitmap) {
1871 unlink_free_space(ctl, info);
1872 kmem_cache_free(btrfs_free_space_cachep, info);
1874 free_bitmap(ctl, info);
1876 if (need_resched()) {
1877 spin_unlock(&ctl->tree_lock);
1879 spin_lock(&ctl->tree_lock);
1884 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
1886 spin_lock(&ctl->tree_lock);
1887 __btrfs_remove_free_space_cache_locked(ctl);
1888 spin_unlock(&ctl->tree_lock);
1891 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
1893 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1894 struct btrfs_free_cluster *cluster;
1895 struct list_head *head;
1897 spin_lock(&ctl->tree_lock);
1898 while ((head = block_group->cluster_list.next) !=
1899 &block_group->cluster_list) {
1900 cluster = list_entry(head, struct btrfs_free_cluster,
1903 WARN_ON(cluster->block_group != block_group);
1904 __btrfs_return_cluster_to_free_space(block_group, cluster);
1905 if (need_resched()) {
1906 spin_unlock(&ctl->tree_lock);
1908 spin_lock(&ctl->tree_lock);
1911 __btrfs_remove_free_space_cache_locked(ctl);
1912 spin_unlock(&ctl->tree_lock);
1916 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
1917 u64 offset, u64 bytes, u64 empty_size)
1919 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1920 struct btrfs_free_space *entry = NULL;
1921 u64 bytes_search = bytes + empty_size;
1924 spin_lock(&ctl->tree_lock);
1925 entry = find_free_space(ctl, &offset, &bytes_search);
1930 if (entry->bitmap) {
1931 bitmap_clear_bits(ctl, entry, offset, bytes);
1933 free_bitmap(ctl, entry);
1935 unlink_free_space(ctl, entry);
1936 entry->offset += bytes;
1937 entry->bytes -= bytes;
1939 kmem_cache_free(btrfs_free_space_cachep, entry);
1941 link_free_space(ctl, entry);
1945 spin_unlock(&ctl->tree_lock);
1951 * given a cluster, put all of its extents back into the free space
1952 * cache. If a block group is passed, this function will only free
1953 * a cluster that belongs to the passed block group.
1955 * Otherwise, it'll get a reference on the block group pointed to by the
1956 * cluster and remove the cluster from it.
1958 int btrfs_return_cluster_to_free_space(
1959 struct btrfs_block_group_cache *block_group,
1960 struct btrfs_free_cluster *cluster)
1962 struct btrfs_free_space_ctl *ctl;
1965 /* first, get a safe pointer to the block group */
1966 spin_lock(&cluster->lock);
1968 block_group = cluster->block_group;
1970 spin_unlock(&cluster->lock);
1973 } else if (cluster->block_group != block_group) {
1974 /* someone else has already freed it don't redo their work */
1975 spin_unlock(&cluster->lock);
1978 atomic_inc(&block_group->count);
1979 spin_unlock(&cluster->lock);
1981 ctl = block_group->free_space_ctl;
1983 /* now return any extents the cluster had on it */
1984 spin_lock(&ctl->tree_lock);
1985 ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
1986 spin_unlock(&ctl->tree_lock);
1988 /* finally drop our ref */
1989 btrfs_put_block_group(block_group);
1993 static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
1994 struct btrfs_free_cluster *cluster,
1995 struct btrfs_free_space *entry,
1996 u64 bytes, u64 min_start)
1998 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2000 u64 search_start = cluster->window_start;
2001 u64 search_bytes = bytes;
2004 search_start = min_start;
2005 search_bytes = bytes;
2007 err = search_bitmap(ctl, entry, &search_start, &search_bytes);
2012 __bitmap_clear_bits(ctl, entry, ret, bytes);
2018 * given a cluster, try to allocate 'bytes' from it, returns 0
2019 * if it couldn't find anything suitably large, or a logical disk offset
2020 * if things worked out
2022 u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
2023 struct btrfs_free_cluster *cluster, u64 bytes,
2026 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2027 struct btrfs_free_space *entry = NULL;
2028 struct rb_node *node;
2031 spin_lock(&cluster->lock);
2032 if (bytes > cluster->max_size)
2035 if (cluster->block_group != block_group)
2038 node = rb_first(&cluster->root);
2042 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2044 if (entry->bytes < bytes ||
2045 (!entry->bitmap && entry->offset < min_start)) {
2046 node = rb_next(&entry->offset_index);
2049 entry = rb_entry(node, struct btrfs_free_space,
2054 if (entry->bitmap) {
2055 ret = btrfs_alloc_from_bitmap(block_group,
2056 cluster, entry, bytes,
2059 node = rb_next(&entry->offset_index);
2062 entry = rb_entry(node, struct btrfs_free_space,
2067 ret = entry->offset;
2069 entry->offset += bytes;
2070 entry->bytes -= bytes;
2073 if (entry->bytes == 0)
2074 rb_erase(&entry->offset_index, &cluster->root);
2078 spin_unlock(&cluster->lock);
2083 spin_lock(&ctl->tree_lock);
2085 ctl->free_space -= bytes;
2086 if (entry->bytes == 0) {
2087 ctl->free_extents--;
2088 if (entry->bitmap) {
2089 kfree(entry->bitmap);
2090 ctl->total_bitmaps--;
2091 ctl->op->recalc_thresholds(ctl);
2093 kmem_cache_free(btrfs_free_space_cachep, entry);
2096 spin_unlock(&ctl->tree_lock);
2101 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
2102 struct btrfs_free_space *entry,
2103 struct btrfs_free_cluster *cluster,
2104 u64 offset, u64 bytes, u64 min_bytes)
2106 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2107 unsigned long next_zero;
2109 unsigned long search_bits;
2110 unsigned long total_bits;
2111 unsigned long found_bits;
2112 unsigned long start = 0;
2113 unsigned long total_found = 0;
2117 i = offset_to_bit(entry->offset, block_group->sectorsize,
2118 max_t(u64, offset, entry->offset));
2119 search_bits = bytes_to_bits(bytes, block_group->sectorsize);
2120 total_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
2124 for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i);
2125 i < BITS_PER_BITMAP;
2126 i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) {
2127 next_zero = find_next_zero_bit(entry->bitmap,
2128 BITS_PER_BITMAP, i);
2129 if (next_zero - i >= search_bits) {
2130 found_bits = next_zero - i;
2144 total_found += found_bits;
2146 if (cluster->max_size < found_bits * block_group->sectorsize)
2147 cluster->max_size = found_bits * block_group->sectorsize;
2149 if (total_found < total_bits) {
2150 i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, next_zero);
2151 if (i - start > total_bits * 2) {
2153 cluster->max_size = 0;
2159 cluster->window_start = start * block_group->sectorsize +
2161 rb_erase(&entry->offset_index, &ctl->free_space_offset);
2162 ret = tree_insert_offset(&cluster->root, entry->offset,
2163 &entry->offset_index, 1);
2170 * This searches the block group for just extents to fill the cluster with.
2173 setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2174 struct btrfs_free_cluster *cluster,
2175 struct list_head *bitmaps, u64 offset, u64 bytes,
2178 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2179 struct btrfs_free_space *first = NULL;
2180 struct btrfs_free_space *entry = NULL;
2181 struct btrfs_free_space *prev = NULL;
2182 struct btrfs_free_space *last;
2183 struct rb_node *node;
2187 u64 max_gap = 128 * 1024;
2189 entry = tree_search_offset(ctl, offset, 0, 1);
2194 * We don't want bitmaps, so just move along until we find a normal
2197 while (entry->bitmap) {
2198 if (list_empty(&entry->list))
2199 list_add_tail(&entry->list, bitmaps);
2200 node = rb_next(&entry->offset_index);
2203 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2206 window_start = entry->offset;
2207 window_free = entry->bytes;
2208 max_extent = entry->bytes;
2213 while (window_free <= min_bytes) {
2214 node = rb_next(&entry->offset_index);
2217 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2219 if (entry->bitmap) {
2220 if (list_empty(&entry->list))
2221 list_add_tail(&entry->list, bitmaps);
2226 * we haven't filled the empty size and the window is
2227 * very large. reset and try again
2229 if (entry->offset - (prev->offset + prev->bytes) > max_gap ||
2230 entry->offset - window_start > (min_bytes * 2)) {
2232 window_start = entry->offset;
2233 window_free = entry->bytes;
2235 max_extent = entry->bytes;
2238 window_free += entry->bytes;
2239 if (entry->bytes > max_extent)
2240 max_extent = entry->bytes;
2245 cluster->window_start = first->offset;
2247 node = &first->offset_index;
2250 * now we've found our entries, pull them out of the free space
2251 * cache and put them into the cluster rbtree
2256 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2257 node = rb_next(&entry->offset_index);
2261 rb_erase(&entry->offset_index, &ctl->free_space_offset);
2262 ret = tree_insert_offset(&cluster->root, entry->offset,
2263 &entry->offset_index, 0);
2265 } while (node && entry != last);
2267 cluster->max_size = max_extent;
2273 * This specifically looks for bitmaps that may work in the cluster, we assume
2274 * that we have already failed to find extents that will work.
2277 setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2278 struct btrfs_free_cluster *cluster,
2279 struct list_head *bitmaps, u64 offset, u64 bytes,
2282 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2283 struct btrfs_free_space *entry;
2284 struct rb_node *node;
2287 if (ctl->total_bitmaps == 0)
2291 * First check our cached list of bitmaps and see if there is an entry
2292 * here that will work.
2294 list_for_each_entry(entry, bitmaps, list) {
2295 if (entry->bytes < min_bytes)
2297 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2304 * If we do have entries on our list and we are here then we didn't find
2305 * anything, so go ahead and get the next entry after the last entry in
2306 * this list and start the search from there.
2308 if (!list_empty(bitmaps)) {
2309 entry = list_entry(bitmaps->prev, struct btrfs_free_space,
2311 node = rb_next(&entry->offset_index);
2314 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2318 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
2323 node = &entry->offset_index;
2325 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2326 node = rb_next(&entry->offset_index);
2329 if (entry->bytes < min_bytes)
2331 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2333 } while (ret && node);
2339 * here we try to find a cluster of blocks in a block group. The goal
2340 * is to find at least bytes free and up to empty_size + bytes free.
2341 * We might not find them all in one contiguous area.
2343 * returns zero and sets up cluster if things worked out, otherwise
2344 * it returns -enospc
2346 int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2347 struct btrfs_root *root,
2348 struct btrfs_block_group_cache *block_group,
2349 struct btrfs_free_cluster *cluster,
2350 u64 offset, u64 bytes, u64 empty_size)
2352 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2353 struct list_head bitmaps;
2354 struct btrfs_free_space *entry, *tmp;
2358 /* for metadata, allow allocates with more holes */
2359 if (btrfs_test_opt(root, SSD_SPREAD)) {
2360 min_bytes = bytes + empty_size;
2361 } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
2363 * we want to do larger allocations when we are
2364 * flushing out the delayed refs, it helps prevent
2365 * making more work as we go along.
2367 if (trans->transaction->delayed_refs.flushing)
2368 min_bytes = max(bytes, (bytes + empty_size) >> 1);
2370 min_bytes = max(bytes, (bytes + empty_size) >> 4);
2372 min_bytes = max(bytes, (bytes + empty_size) >> 2);
2374 spin_lock(&ctl->tree_lock);
2377 * If we know we don't have enough space to make a cluster don't even
2378 * bother doing all the work to try and find one.
2380 if (ctl->free_space < min_bytes) {
2381 spin_unlock(&ctl->tree_lock);
2385 spin_lock(&cluster->lock);
2387 /* someone already found a cluster, hooray */
2388 if (cluster->block_group) {
2393 INIT_LIST_HEAD(&bitmaps);
2394 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
2397 ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
2398 offset, bytes, min_bytes);
2400 /* Clear our temporary list */
2401 list_for_each_entry_safe(entry, tmp, &bitmaps, list)
2402 list_del_init(&entry->list);
2405 atomic_inc(&block_group->count);
2406 list_add_tail(&cluster->block_group_list,
2407 &block_group->cluster_list);
2408 cluster->block_group = block_group;
2411 spin_unlock(&cluster->lock);
2412 spin_unlock(&ctl->tree_lock);
2418 * simple code to zero out a cluster
2420 void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
2422 spin_lock_init(&cluster->lock);
2423 spin_lock_init(&cluster->refill_lock);
2424 cluster->root = RB_ROOT;
2425 cluster->max_size = 0;
2426 INIT_LIST_HEAD(&cluster->block_group_list);
2427 cluster->block_group = NULL;
2430 int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
2431 u64 *trimmed, u64 start, u64 end, u64 minlen)
2433 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2434 struct btrfs_free_space *entry = NULL;
2435 struct btrfs_fs_info *fs_info = block_group->fs_info;
2437 u64 actually_trimmed;
2442 while (start < end) {
2443 spin_lock(&ctl->tree_lock);
2445 if (ctl->free_space < minlen) {
2446 spin_unlock(&ctl->tree_lock);
2450 entry = tree_search_offset(ctl, start, 0, 1);
2452 entry = tree_search_offset(ctl,
2453 offset_to_bitmap(ctl, start),
2456 if (!entry || entry->offset >= end) {
2457 spin_unlock(&ctl->tree_lock);
2461 if (entry->bitmap) {
2462 ret = search_bitmap(ctl, entry, &start, &bytes);
2465 spin_unlock(&ctl->tree_lock);
2468 bytes = min(bytes, end - start);
2469 bitmap_clear_bits(ctl, entry, start, bytes);
2470 if (entry->bytes == 0)
2471 free_bitmap(ctl, entry);
2473 start = entry->offset + BITS_PER_BITMAP *
2474 block_group->sectorsize;
2475 spin_unlock(&ctl->tree_lock);
2480 start = entry->offset;
2481 bytes = min(entry->bytes, end - start);
2482 unlink_free_space(ctl, entry);
2483 kmem_cache_free(btrfs_free_space_cachep, entry);
2486 spin_unlock(&ctl->tree_lock);
2488 if (bytes >= minlen) {
2489 struct btrfs_space_info *space_info;
2492 space_info = block_group->space_info;
2493 spin_lock(&space_info->lock);
2494 spin_lock(&block_group->lock);
2495 if (!block_group->ro) {
2496 block_group->reserved += bytes;
2497 space_info->bytes_reserved += bytes;
2500 spin_unlock(&block_group->lock);
2501 spin_unlock(&space_info->lock);
2503 ret = btrfs_error_discard_extent(fs_info->extent_root,
2508 btrfs_add_free_space(block_group, start, bytes);
2510 spin_lock(&space_info->lock);
2511 spin_lock(&block_group->lock);
2512 if (block_group->ro)
2513 space_info->bytes_readonly += bytes;
2514 block_group->reserved -= bytes;
2515 space_info->bytes_reserved -= bytes;
2516 spin_unlock(&space_info->lock);
2517 spin_unlock(&block_group->lock);
2522 *trimmed += actually_trimmed;
2527 if (fatal_signal_pending(current)) {
2539 * Find the left-most item in the cache tree, and then return the
2540 * smallest inode number in the item.
2542 * Note: the returned inode number may not be the smallest one in
2543 * the tree, if the left-most item is a bitmap.
2545 u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
2547 struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
2548 struct btrfs_free_space *entry = NULL;
2551 spin_lock(&ctl->tree_lock);
2553 if (RB_EMPTY_ROOT(&ctl->free_space_offset))
2556 entry = rb_entry(rb_first(&ctl->free_space_offset),
2557 struct btrfs_free_space, offset_index);
2559 if (!entry->bitmap) {
2560 ino = entry->offset;
2562 unlink_free_space(ctl, entry);
2566 kmem_cache_free(btrfs_free_space_cachep, entry);
2568 link_free_space(ctl, entry);
2574 ret = search_bitmap(ctl, entry, &offset, &count);
2578 bitmap_clear_bits(ctl, entry, offset, 1);
2579 if (entry->bytes == 0)
2580 free_bitmap(ctl, entry);
2583 spin_unlock(&ctl->tree_lock);
2588 struct inode *lookup_free_ino_inode(struct btrfs_root *root,
2589 struct btrfs_path *path)
2591 struct inode *inode = NULL;
2593 spin_lock(&root->cache_lock);
2594 if (root->cache_inode)
2595 inode = igrab(root->cache_inode);
2596 spin_unlock(&root->cache_lock);
2600 inode = __lookup_free_space_inode(root, path, 0);
2604 spin_lock(&root->cache_lock);
2605 if (!btrfs_fs_closing(root->fs_info))
2606 root->cache_inode = igrab(inode);
2607 spin_unlock(&root->cache_lock);
2612 int create_free_ino_inode(struct btrfs_root *root,
2613 struct btrfs_trans_handle *trans,
2614 struct btrfs_path *path)
2616 return __create_free_space_inode(root, trans, path,
2617 BTRFS_FREE_INO_OBJECTID, 0);
2620 int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2622 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2623 struct btrfs_path *path;
2624 struct inode *inode;
2626 u64 root_gen = btrfs_root_generation(&root->root_item);
2628 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2632 * If we're unmounting then just return, since this does a search on the
2633 * normal root and not the commit root and we could deadlock.
2635 if (btrfs_fs_closing(fs_info))
2638 path = btrfs_alloc_path();
2642 inode = lookup_free_ino_inode(root, path);
2646 if (root_gen != BTRFS_I(inode)->generation)
2649 ret = __load_free_space_cache(root, inode, ctl, path, 0);
2652 printk(KERN_ERR "btrfs: failed to load free ino cache for "
2653 "root %llu\n", root->root_key.objectid);
2657 btrfs_free_path(path);
2661 int btrfs_write_out_ino_cache(struct btrfs_root *root,
2662 struct btrfs_trans_handle *trans,
2663 struct btrfs_path *path)
2665 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2666 struct inode *inode;
2669 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2672 inode = lookup_free_ino_inode(root, path);
2676 ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
2678 btrfs_delalloc_release_metadata(inode, inode->i_size);
2680 printk(KERN_ERR "btrfs: failed to write free ino cache "
2681 "for root %llu\n", root->root_key.objectid);