Btrfs: make things static and include the right headers
[linux-2.6-block.git] / fs / btrfs / free-space-cache.c
CommitLineData
0f9dd46c
JB
1/*
2 * Copyright (C) 2008 Red Hat. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include "ctree.h"
21
22static int tree_insert_offset(struct rb_root *root, u64 offset,
23 struct rb_node *node)
24{
25 struct rb_node **p = &root->rb_node;
26 struct rb_node *parent = NULL;
27 struct btrfs_free_space *info;
28
29 while (*p) {
30 parent = *p;
31 info = rb_entry(parent, struct btrfs_free_space, offset_index);
32
33 if (offset < info->offset)
34 p = &(*p)->rb_left;
35 else if (offset > info->offset)
36 p = &(*p)->rb_right;
37 else
38 return -EEXIST;
39 }
40
41 rb_link_node(node, parent, p);
42 rb_insert_color(node, root);
43
44 return 0;
45}
46
47static int tree_insert_bytes(struct rb_root *root, u64 bytes,
48 struct rb_node *node)
49{
50 struct rb_node **p = &root->rb_node;
51 struct rb_node *parent = NULL;
52 struct btrfs_free_space *info;
53
54 while (*p) {
55 parent = *p;
56 info = rb_entry(parent, struct btrfs_free_space, bytes_index);
57
58 if (bytes < info->bytes)
59 p = &(*p)->rb_left;
60 else
61 p = &(*p)->rb_right;
62 }
63
64 rb_link_node(node, parent, p);
65 rb_insert_color(node, root);
66
67 return 0;
68}
69
70/*
71 * searches the tree for the given offset. If contains is set we will return
72 * the free space that contains the given offset. If contains is not set we
73 * will return the free space that starts at or after the given offset and is
74 * at least bytes long.
75 */
76static struct btrfs_free_space *tree_search_offset(struct rb_root *root,
77 u64 offset, u64 bytes,
78 int contains)
79{
80 struct rb_node *n = root->rb_node;
81 struct btrfs_free_space *entry, *ret = NULL;
82
83 while (n) {
84 entry = rb_entry(n, struct btrfs_free_space, offset_index);
85
86 if (offset < entry->offset) {
87 if (!contains &&
88 (!ret || entry->offset < ret->offset) &&
89 (bytes <= entry->bytes))
90 ret = entry;
91 n = n->rb_left;
92 } else if (offset > entry->offset) {
37d3cddd
JB
93 if ((entry->offset + entry->bytes - 1) >= offset &&
94 bytes <= entry->bytes) {
0f9dd46c
JB
95 ret = entry;
96 break;
97 }
98 n = n->rb_right;
99 } else {
100 if (bytes > entry->bytes) {
101 n = n->rb_right;
102 continue;
103 }
104 ret = entry;
105 break;
106 }
107 }
108
109 return ret;
110}
111
112/*
113 * return a chunk at least bytes size, as close to offset that we can get.
114 */
115static struct btrfs_free_space *tree_search_bytes(struct rb_root *root,
116 u64 offset, u64 bytes)
117{
118 struct rb_node *n = root->rb_node;
119 struct btrfs_free_space *entry, *ret = NULL;
120
121 while (n) {
122 entry = rb_entry(n, struct btrfs_free_space, bytes_index);
123
124 if (bytes < entry->bytes) {
125 /*
126 * We prefer to get a hole size as close to the size we
127 * are asking for so we don't take small slivers out of
128 * huge holes, but we also want to get as close to the
129 * offset as possible so we don't have a whole lot of
130 * fragmentation.
131 */
132 if (offset <= entry->offset) {
133 if (!ret)
134 ret = entry;
135 else if (entry->bytes < ret->bytes)
136 ret = entry;
137 else if (entry->offset < ret->offset)
138 ret = entry;
139 }
140 n = n->rb_left;
141 } else if (bytes > entry->bytes) {
142 n = n->rb_right;
143 } else {
144 /*
145 * Ok we may have multiple chunks of the wanted size,
146 * so we don't want to take the first one we find, we
147 * want to take the one closest to our given offset, so
148 * keep searching just in case theres a better match.
149 */
150 n = n->rb_right;
151 if (offset > entry->offset)
152 continue;
153 else if (!ret || entry->offset < ret->offset)
154 ret = entry;
155 }
156 }
157
158 return ret;
159}
160
161static void unlink_free_space(struct btrfs_block_group_cache *block_group,
162 struct btrfs_free_space *info)
163{
164 rb_erase(&info->offset_index, &block_group->free_space_offset);
165 rb_erase(&info->bytes_index, &block_group->free_space_bytes);
166}
167
168static int link_free_space(struct btrfs_block_group_cache *block_group,
169 struct btrfs_free_space *info)
170{
171 int ret = 0;
172
173
174 ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
175 &info->offset_index);
176 if (ret)
177 return ret;
178
179 ret = tree_insert_bytes(&block_group->free_space_bytes, info->bytes,
180 &info->bytes_index);
181 if (ret)
182 return ret;
183
184 return ret;
185}
186
25179201
JB
187static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
188 u64 offset, u64 bytes)
0f9dd46c
JB
189{
190 struct btrfs_free_space *right_info;
191 struct btrfs_free_space *left_info;
192 struct btrfs_free_space *info = NULL;
193 struct btrfs_free_space *alloc_info;
194 int ret = 0;
195
196 alloc_info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
197 if (!alloc_info)
198 return -ENOMEM;
199
200 /*
201 * first we want to see if there is free space adjacent to the range we
202 * are adding, if there is remove that struct and add a new one to
203 * cover the entire range
204 */
0f9dd46c
JB
205 right_info = tree_search_offset(&block_group->free_space_offset,
206 offset+bytes, 0, 1);
207 left_info = tree_search_offset(&block_group->free_space_offset,
208 offset-1, 0, 1);
209
210 if (right_info && right_info->offset == offset+bytes) {
211 unlink_free_space(block_group, right_info);
212 info = right_info;
213 info->offset = offset;
214 info->bytes += bytes;
215 } else if (right_info && right_info->offset != offset+bytes) {
216 printk(KERN_ERR "adding space in the middle of an existing "
217 "free space area. existing: offset=%Lu, bytes=%Lu. "
218 "new: offset=%Lu, bytes=%Lu\n", right_info->offset,
219 right_info->bytes, offset, bytes);
220 BUG();
221 }
222
223 if (left_info) {
224 unlink_free_space(block_group, left_info);
225
226 if (unlikely((left_info->offset + left_info->bytes) !=
227 offset)) {
228 printk(KERN_ERR "free space to the left of new free "
229 "space isn't quite right. existing: offset=%Lu,"
230 " bytes=%Lu. new: offset=%Lu, bytes=%Lu\n",
231 left_info->offset, left_info->bytes, offset,
232 bytes);
233 BUG();
234 }
235
236 if (info) {
237 info->offset = left_info->offset;
238 info->bytes += left_info->bytes;
239 kfree(left_info);
240 } else {
241 info = left_info;
242 info->bytes += bytes;
243 }
244 }
245
246 if (info) {
247 ret = link_free_space(block_group, info);
248 if (!ret)
249 info = NULL;
250 goto out;
251 }
252
253 info = alloc_info;
254 alloc_info = NULL;
255 info->offset = offset;
256 info->bytes = bytes;
257
258 ret = link_free_space(block_group, info);
259 if (ret)
260 kfree(info);
261out:
0f9dd46c
JB
262 if (ret) {
263 printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret);
264 if (ret == -EEXIST)
265 BUG();
266 }
267
268 if (alloc_info)
269 kfree(alloc_info);
270
271 return ret;
272}
273
25179201
JB
274static int
275__btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
276 u64 offset, u64 bytes)
0f9dd46c
JB
277{
278 struct btrfs_free_space *info;
279 int ret = 0;
280
0f9dd46c
JB
281 info = tree_search_offset(&block_group->free_space_offset, offset, 0,
282 1);
283
284 if (info && info->offset == offset) {
285 if (info->bytes < bytes) {
286 printk(KERN_ERR "Found free space at %Lu, size %Lu,"
287 "trying to use %Lu\n",
288 info->offset, info->bytes, bytes);
289 WARN_ON(1);
290 ret = -EINVAL;
291 goto out;
292 }
293
294 unlink_free_space(block_group, info);
295
296 if (info->bytes == bytes) {
297 kfree(info);
298 goto out;
299 }
300
301 info->offset += bytes;
302 info->bytes -= bytes;
303
304 ret = link_free_space(block_group, info);
305 BUG_ON(ret);
9b49c9b9
CM
306 } else if (info && info->offset < offset &&
307 info->offset + info->bytes >= offset + bytes) {
308 u64 old_start = info->offset;
309 /*
310 * we're freeing space in the middle of the info,
311 * this can happen during tree log replay
312 *
313 * first unlink the old info and then
314 * insert it again after the hole we're creating
315 */
316 unlink_free_space(block_group, info);
317 if (offset + bytes < info->offset + info->bytes) {
318 u64 old_end = info->offset + info->bytes;
319
320 info->offset = offset + bytes;
321 info->bytes = old_end - info->offset;
322 ret = link_free_space(block_group, info);
323 BUG_ON(ret);
324 } else {
325 /* the hole we're creating ends at the end
326 * of the info struct, just free the info
327 */
328 kfree(info);
329 }
330
331 /* step two, insert a new info struct to cover anything
332 * before the hole
333 */
25179201
JB
334 ret = __btrfs_add_free_space(block_group, old_start,
335 offset - old_start);
9b49c9b9 336 BUG_ON(ret);
0f9dd46c
JB
337 } else {
338 WARN_ON(1);
339 }
340out:
25179201
JB
341 return ret;
342}
343
344int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
345 u64 offset, u64 bytes)
346{
347 int ret;
348 struct btrfs_free_space *sp;
349
350 mutex_lock(&block_group->alloc_mutex);
351 ret = __btrfs_add_free_space(block_group, offset, bytes);
352 sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
353 BUG_ON(!sp);
354 mutex_unlock(&block_group->alloc_mutex);
355
356 return ret;
357}
358
359int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group,
360 u64 offset, u64 bytes)
361{
362 int ret;
363 struct btrfs_free_space *sp;
364
365 ret = __btrfs_add_free_space(block_group, offset, bytes);
366 sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
367 BUG_ON(!sp);
368
369 return ret;
370}
371
372int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
373 u64 offset, u64 bytes)
374{
375 int ret = 0;
376
377 mutex_lock(&block_group->alloc_mutex);
378 ret = __btrfs_remove_free_space(block_group, offset, bytes);
379 mutex_unlock(&block_group->alloc_mutex);
380
381 return ret;
382}
383
384int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group,
385 u64 offset, u64 bytes)
386{
387 int ret;
388
389 ret = __btrfs_remove_free_space(block_group, offset, bytes);
390
0f9dd46c
JB
391 return ret;
392}
393
394void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
395 u64 bytes)
396{
397 struct btrfs_free_space *info;
398 struct rb_node *n;
399 int count = 0;
400
401 for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) {
402 info = rb_entry(n, struct btrfs_free_space, offset_index);
403 if (info->bytes >= bytes)
404 count++;
405 //printk(KERN_INFO "offset=%Lu, bytes=%Lu\n", info->offset,
406 // info->bytes);
407 }
408 printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
409 "\n", count);
410}
411
412u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group)
413{
414 struct btrfs_free_space *info;
415 struct rb_node *n;
416 u64 ret = 0;
417
418 for (n = rb_first(&block_group->free_space_offset); n;
419 n = rb_next(n)) {
420 info = rb_entry(n, struct btrfs_free_space, offset_index);
421 ret += info->bytes;
422 }
423
424 return ret;
425}
426
427void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
428{
429 struct btrfs_free_space *info;
430 struct rb_node *node;
431
25179201 432 mutex_lock(&block_group->alloc_mutex);
0f9dd46c
JB
433 while ((node = rb_last(&block_group->free_space_bytes)) != NULL) {
434 info = rb_entry(node, struct btrfs_free_space, bytes_index);
435 unlink_free_space(block_group, info);
436 kfree(info);
437 if (need_resched()) {
25179201 438 mutex_unlock(&block_group->alloc_mutex);
0f9dd46c 439 cond_resched();
25179201 440 mutex_lock(&block_group->alloc_mutex);
0f9dd46c
JB
441 }
442 }
25179201 443 mutex_unlock(&block_group->alloc_mutex);
0f9dd46c
JB
444}
445
b2950863
CH
446#if 0
447static struct btrfs_free_space *btrfs_find_free_space_offset(struct
0f9dd46c
JB
448 btrfs_block_group_cache
449 *block_group, u64 offset,
450 u64 bytes)
451{
452 struct btrfs_free_space *ret;
453
25179201 454 mutex_lock(&block_group->alloc_mutex);
0f9dd46c
JB
455 ret = tree_search_offset(&block_group->free_space_offset, offset,
456 bytes, 0);
25179201 457 mutex_unlock(&block_group->alloc_mutex);
0f9dd46c
JB
458
459 return ret;
460}
461
b2950863 462static struct btrfs_free_space *btrfs_find_free_space_bytes(struct
0f9dd46c
JB
463 btrfs_block_group_cache
464 *block_group, u64 offset,
465 u64 bytes)
466{
467 struct btrfs_free_space *ret;
468
25179201 469 mutex_lock(&block_group->alloc_mutex);
0f9dd46c
JB
470
471 ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes);
25179201 472 mutex_unlock(&block_group->alloc_mutex);
0f9dd46c
JB
473
474 return ret;
475}
b2950863 476#endif
0f9dd46c
JB
477
478struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache
479 *block_group, u64 offset,
480 u64 bytes)
481{
25179201 482 struct btrfs_free_space *ret = NULL;
0f9dd46c 483
0f9dd46c
JB
484 ret = tree_search_offset(&block_group->free_space_offset, offset,
485 bytes, 0);
486 if (!ret)
487 ret = tree_search_bytes(&block_group->free_space_bytes,
488 offset, bytes);
489
0f9dd46c
JB
490 return ret;
491}