2 * Copyright (C) 2008 Red Hat. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
21 #include "free-space-cache.h"
22 #include "transaction.h"
24 struct btrfs_free_space {
25 struct rb_node bytes_index;
26 struct rb_node offset_index;
31 static int tree_insert_offset(struct rb_root *root, u64 offset,
34 struct rb_node **p = &root->rb_node;
35 struct rb_node *parent = NULL;
36 struct btrfs_free_space *info;
40 info = rb_entry(parent, struct btrfs_free_space, offset_index);
42 if (offset < info->offset)
44 else if (offset > info->offset)
50 rb_link_node(node, parent, p);
51 rb_insert_color(node, root);
56 static int tree_insert_bytes(struct rb_root *root, u64 bytes,
59 struct rb_node **p = &root->rb_node;
60 struct rb_node *parent = NULL;
61 struct btrfs_free_space *info;
65 info = rb_entry(parent, struct btrfs_free_space, bytes_index);
67 if (bytes < info->bytes)
73 rb_link_node(node, parent, p);
74 rb_insert_color(node, root);
80 * searches the tree for the given offset.
82 * fuzzy == 1: this is used for allocations where we are given a hint of where
83 * to look for free space. Because the hint may not be completely on an offset
84 * mark, or the hint may no longer point to free space we need to fudge our
85 * results a bit. So we look for free space starting at or after offset with at
86 * least bytes size. We prefer to find as close to the given offset as we can.
87 * Also if the offset is within a free space range, then we will return the free
88 * space that contains the given offset, which means we can return a free space
89 * chunk with an offset before the provided offset.
91 * fuzzy == 0: this is just a normal tree search. Give us the free space that
92 * starts at the given offset which is at least bytes size, and if its not there
95 static struct btrfs_free_space *tree_search_offset(struct rb_root *root,
96 u64 offset, u64 bytes,
99 struct rb_node *n = root->rb_node;
100 struct btrfs_free_space *entry, *ret = NULL;
103 entry = rb_entry(n, struct btrfs_free_space, offset_index);
105 if (offset < entry->offset) {
107 (!ret || entry->offset < ret->offset) &&
108 (bytes <= entry->bytes))
111 } else if (offset > entry->offset) {
113 (entry->offset + entry->bytes - 1) >= offset &&
114 bytes <= entry->bytes) {
120 if (bytes > entry->bytes) {
133 * return a chunk at least bytes size, as close to offset that we can get.
135 static struct btrfs_free_space *tree_search_bytes(struct rb_root *root,
136 u64 offset, u64 bytes)
138 struct rb_node *n = root->rb_node;
139 struct btrfs_free_space *entry, *ret = NULL;
142 entry = rb_entry(n, struct btrfs_free_space, bytes_index);
144 if (bytes < entry->bytes) {
146 * We prefer to get a hole size as close to the size we
147 * are asking for so we don't take small slivers out of
148 * huge holes, but we also want to get as close to the
149 * offset as possible so we don't have a whole lot of
152 if (offset <= entry->offset) {
155 else if (entry->bytes < ret->bytes)
157 else if (entry->offset < ret->offset)
161 } else if (bytes > entry->bytes) {
165 * Ok we may have multiple chunks of the wanted size,
166 * so we don't want to take the first one we find, we
167 * want to take the one closest to our given offset, so
168 * keep searching just in case theres a better match.
171 if (offset > entry->offset)
173 else if (!ret || entry->offset < ret->offset)
181 static void unlink_free_space(struct btrfs_block_group_cache *block_group,
182 struct btrfs_free_space *info)
184 rb_erase(&info->offset_index, &block_group->free_space_offset);
185 rb_erase(&info->bytes_index, &block_group->free_space_bytes);
188 static int link_free_space(struct btrfs_block_group_cache *block_group,
189 struct btrfs_free_space *info)
194 BUG_ON(!info->bytes);
195 ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
196 &info->offset_index);
200 ret = tree_insert_bytes(&block_group->free_space_bytes, info->bytes,
208 int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
209 u64 offset, u64 bytes)
211 struct btrfs_free_space *right_info;
212 struct btrfs_free_space *left_info;
213 struct btrfs_free_space *info = NULL;
216 info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
220 info->offset = offset;
223 spin_lock(&block_group->tree_lock);
226 * first we want to see if there is free space adjacent to the range we
227 * are adding, if there is remove that struct and add a new one to
228 * cover the entire range
230 right_info = tree_search_offset(&block_group->free_space_offset,
232 left_info = tree_search_offset(&block_group->free_space_offset,
236 unlink_free_space(block_group, right_info);
237 info->bytes += right_info->bytes;
241 if (left_info && left_info->offset + left_info->bytes == offset) {
242 unlink_free_space(block_group, left_info);
243 info->offset = left_info->offset;
244 info->bytes += left_info->bytes;
248 ret = link_free_space(block_group, info);
252 spin_unlock(&block_group->tree_lock);
255 printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret);
263 int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
264 u64 offset, u64 bytes)
266 struct btrfs_free_space *info;
269 spin_lock(&block_group->tree_lock);
271 info = tree_search_offset(&block_group->free_space_offset, offset, 0,
273 if (info && info->offset == offset) {
274 if (info->bytes < bytes) {
275 printk(KERN_ERR "Found free space at %llu, size %llu,"
276 "trying to use %llu\n",
277 (unsigned long long)info->offset,
278 (unsigned long long)info->bytes,
279 (unsigned long long)bytes);
282 spin_unlock(&block_group->tree_lock);
285 unlink_free_space(block_group, info);
287 if (info->bytes == bytes) {
289 spin_unlock(&block_group->tree_lock);
293 info->offset += bytes;
294 info->bytes -= bytes;
296 ret = link_free_space(block_group, info);
297 spin_unlock(&block_group->tree_lock);
299 } else if (info && info->offset < offset &&
300 info->offset + info->bytes >= offset + bytes) {
301 u64 old_start = info->offset;
303 * we're freeing space in the middle of the info,
304 * this can happen during tree log replay
306 * first unlink the old info and then
307 * insert it again after the hole we're creating
309 unlink_free_space(block_group, info);
310 if (offset + bytes < info->offset + info->bytes) {
311 u64 old_end = info->offset + info->bytes;
313 info->offset = offset + bytes;
314 info->bytes = old_end - info->offset;
315 ret = link_free_space(block_group, info);
318 /* the hole we're creating ends at the end
319 * of the info struct, just free the info
323 spin_unlock(&block_group->tree_lock);
324 /* step two, insert a new info struct to cover anything
327 ret = btrfs_add_free_space(block_group, old_start,
331 spin_unlock(&block_group->tree_lock);
333 printk(KERN_ERR "couldn't find space %llu to free\n",
334 (unsigned long long)offset);
335 printk(KERN_ERR "cached is %d, offset %llu bytes %llu\n",
336 block_group->cached, block_group->key.objectid,
337 block_group->key.offset);
338 btrfs_dump_free_space(block_group, bytes);
340 printk(KERN_ERR "hmm, found offset=%llu bytes=%llu, "
341 "but wanted offset=%llu bytes=%llu\n",
342 info->offset, info->bytes, offset, bytes);
350 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
353 struct btrfs_free_space *info;
357 for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) {
358 info = rb_entry(n, struct btrfs_free_space, offset_index);
359 if (info->bytes >= bytes)
361 printk(KERN_ERR "entry offset %llu, bytes %llu\n", info->offset,
364 printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
368 u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group)
370 struct btrfs_free_space *info;
374 for (n = rb_first(&block_group->free_space_offset); n;
376 info = rb_entry(n, struct btrfs_free_space, offset_index);
384 * for a given cluster, put all of its extents back into the free
385 * space cache. If the block group passed doesn't match the block group
386 * pointed to by the cluster, someone else raced in and freed the
387 * cluster already. In that case, we just return without changing anything
390 __btrfs_return_cluster_to_free_space(
391 struct btrfs_block_group_cache *block_group,
392 struct btrfs_free_cluster *cluster)
394 struct btrfs_free_space *entry;
395 struct rb_node *node;
397 spin_lock(&cluster->lock);
398 if (cluster->block_group != block_group)
401 cluster->window_start = 0;
402 node = rb_first(&cluster->root);
404 entry = rb_entry(node, struct btrfs_free_space, offset_index);
405 node = rb_next(&entry->offset_index);
406 rb_erase(&entry->offset_index, &cluster->root);
407 link_free_space(block_group, entry);
409 list_del_init(&cluster->block_group_list);
411 btrfs_put_block_group(cluster->block_group);
412 cluster->block_group = NULL;
413 cluster->root.rb_node = NULL;
415 spin_unlock(&cluster->lock);
419 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
421 struct btrfs_free_space *info;
422 struct rb_node *node;
423 struct btrfs_free_cluster *cluster;
424 struct btrfs_free_cluster *safe;
426 spin_lock(&block_group->tree_lock);
428 list_for_each_entry_safe(cluster, safe, &block_group->cluster_list,
431 WARN_ON(cluster->block_group != block_group);
432 __btrfs_return_cluster_to_free_space(block_group, cluster);
435 while ((node = rb_last(&block_group->free_space_bytes)) != NULL) {
436 info = rb_entry(node, struct btrfs_free_space, bytes_index);
437 unlink_free_space(block_group, info);
439 if (need_resched()) {
440 spin_unlock(&block_group->tree_lock);
442 spin_lock(&block_group->tree_lock);
445 spin_unlock(&block_group->tree_lock);
448 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
449 u64 offset, u64 bytes, u64 empty_size)
451 struct btrfs_free_space *entry = NULL;
454 spin_lock(&block_group->tree_lock);
455 entry = tree_search_offset(&block_group->free_space_offset, offset,
456 bytes + empty_size, 1);
458 entry = tree_search_bytes(&block_group->free_space_bytes,
459 offset, bytes + empty_size);
461 unlink_free_space(block_group, entry);
463 entry->offset += bytes;
464 entry->bytes -= bytes;
469 link_free_space(block_group, entry);
471 spin_unlock(&block_group->tree_lock);
477 * given a cluster, put all of its extents back into the free space
478 * cache. If a block group is passed, this function will only free
479 * a cluster that belongs to the passed block group.
481 * Otherwise, it'll get a reference on the block group pointed to by the
482 * cluster and remove the cluster from it.
484 int btrfs_return_cluster_to_free_space(
485 struct btrfs_block_group_cache *block_group,
486 struct btrfs_free_cluster *cluster)
490 /* first, get a safe pointer to the block group */
491 spin_lock(&cluster->lock);
493 block_group = cluster->block_group;
495 spin_unlock(&cluster->lock);
498 } else if (cluster->block_group != block_group) {
499 /* someone else has already freed it don't redo their work */
500 spin_unlock(&cluster->lock);
503 atomic_inc(&block_group->count);
504 spin_unlock(&cluster->lock);
506 /* now return any extents the cluster had on it */
507 spin_lock(&block_group->tree_lock);
508 ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
509 spin_unlock(&block_group->tree_lock);
511 /* finally drop our ref */
512 btrfs_put_block_group(block_group);
517 * given a cluster, try to allocate 'bytes' from it, returns 0
518 * if it couldn't find anything suitably large, or a logical disk offset
519 * if things worked out
521 u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
522 struct btrfs_free_cluster *cluster, u64 bytes,
525 struct btrfs_free_space *entry = NULL;
526 struct rb_node *node;
529 spin_lock(&cluster->lock);
530 if (bytes > cluster->max_size)
533 if (cluster->block_group != block_group)
536 node = rb_first(&cluster->root);
540 entry = rb_entry(node, struct btrfs_free_space, offset_index);
543 if (entry->bytes < bytes || entry->offset < min_start) {
544 struct rb_node *node;
546 node = rb_next(&entry->offset_index);
549 entry = rb_entry(node, struct btrfs_free_space,
555 entry->offset += bytes;
556 entry->bytes -= bytes;
558 if (entry->bytes == 0) {
559 rb_erase(&entry->offset_index, &cluster->root);
565 spin_unlock(&cluster->lock);
570 * here we try to find a cluster of blocks in a block group. The goal
571 * is to find at least bytes free and up to empty_size + bytes free.
572 * We might not find them all in one contiguous area.
574 * returns zero and sets up cluster if things worked out, otherwise
577 int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
578 struct btrfs_block_group_cache *block_group,
579 struct btrfs_free_cluster *cluster,
580 u64 offset, u64 bytes, u64 empty_size)
582 struct btrfs_free_space *entry = NULL;
583 struct rb_node *node;
584 struct btrfs_free_space *next;
585 struct btrfs_free_space *last;
590 int total_retries = 0;
593 /* for metadata, allow allocates with more holes */
594 if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
596 * we want to do larger allocations when we are
597 * flushing out the delayed refs, it helps prevent
598 * making more work as we go along.
600 if (trans->transaction->delayed_refs.flushing)
601 min_bytes = max(bytes, (bytes + empty_size) >> 1);
603 min_bytes = max(bytes, (bytes + empty_size) >> 4);
605 min_bytes = max(bytes, (bytes + empty_size) >> 2);
607 spin_lock(&block_group->tree_lock);
608 spin_lock(&cluster->lock);
610 /* someone already found a cluster, hooray */
611 if (cluster->block_group) {
616 min_bytes = min(min_bytes, bytes + empty_size);
617 entry = tree_search_bytes(&block_group->free_space_bytes,
623 window_start = entry->offset;
624 window_free = entry->bytes;
626 max_extent = entry->bytes;
629 /* out window is just right, lets fill it */
630 if (window_free >= bytes + empty_size)
633 node = rb_next(&last->offset_index);
638 next = rb_entry(node, struct btrfs_free_space, offset_index);
641 * we haven't filled the empty size and the window is
642 * very large. reset and try again
644 if (next->offset - window_start > (bytes + empty_size) * 2) {
646 window_start = entry->offset;
647 window_free = entry->bytes;
651 if (total_retries % 256 == 0) {
652 if (min_bytes >= (bytes + empty_size)) {
657 * grow our allocation a bit, we're not having
665 window_free += next->bytes;
666 if (entry->bytes > max_extent)
667 max_extent = entry->bytes;
671 cluster->window_start = entry->offset;
674 * now we've found our entries, pull them out of the free space
675 * cache and put them into the cluster rbtree
677 * The cluster includes an rbtree, but only uses the offset index
678 * of each free space cache entry.
681 node = rb_next(&entry->offset_index);
682 unlink_free_space(block_group, entry);
683 ret = tree_insert_offset(&cluster->root, entry->offset,
684 &entry->offset_index);
687 if (!node || entry == last)
690 entry = rb_entry(node, struct btrfs_free_space, offset_index);
693 cluster->max_size = max_extent;
694 atomic_inc(&block_group->count);
695 list_add_tail(&cluster->block_group_list, &block_group->cluster_list);
696 cluster->block_group = block_group;
698 spin_unlock(&cluster->lock);
699 spin_unlock(&block_group->tree_lock);
705 * simple code to zero out a cluster
707 void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
709 spin_lock_init(&cluster->lock);
710 spin_lock_init(&cluster->refill_lock);
711 cluster->root.rb_node = NULL;
712 cluster->max_size = 0;
713 INIT_LIST_HEAD(&cluster->block_group_list);
714 cluster->block_group = NULL;