fix leak in btrfs_drop_snapshot
[linux-2.6-block.git] / fs / btrfs / extent-tree.c
... / ...
CommitLineData
1#include <stdio.h>
2#include <stdlib.h>
3#include "kerncompat.h"
4#include "radix-tree.h"
5#include "ctree.h"
6#include "disk-io.h"
7#include "print-tree.h"
8
9static int find_free_extent(struct ctree_root *orig_root, u64 num_blocks,
10 u64 search_start, u64 search_end, struct key *ins);
11static int finish_current_insert(struct ctree_root *extent_root);
12static int run_pending(struct ctree_root *extent_root);
13
14/*
15 * pending extents are blocks that we're trying to allocate in the extent
16 * map while trying to grow the map because of other allocations. To avoid
17 * recursing, they are tagged in the radix tree and cleaned up after
18 * other allocations are done. The pending tag is also used in the same
19 * manner for deletes.
20 */
21#define CTREE_EXTENT_PENDING_DEL 0
22
23static int inc_block_ref(struct ctree_root *root, u64 blocknr)
24{
25 struct ctree_path path;
26 int ret;
27 struct key key;
28 struct leaf *l;
29 struct extent_item *item;
30 struct key ins;
31
32 find_free_extent(root->extent_root, 0, 0, (u64)-1, &ins);
33 init_path(&path);
34 key.objectid = blocknr;
35 key.flags = 0;
36 key.offset = 1;
37 ret = search_slot(root->extent_root, &key, &path, 0, 1);
38 if (ret != 0)
39 BUG();
40 BUG_ON(ret != 0);
41 l = &path.nodes[0]->leaf;
42 item = (struct extent_item *)(l->data +
43 l->items[path.slots[0]].offset);
44 item->refs++;
45
46 BUG_ON(list_empty(&path.nodes[0]->dirty));
47 release_path(root->extent_root, &path);
48 finish_current_insert(root->extent_root);
49 run_pending(root->extent_root);
50 return 0;
51}
52
53static int lookup_block_ref(struct ctree_root *root, u64 blocknr, u32 *refs)
54{
55 struct ctree_path path;
56 int ret;
57 struct key key;
58 struct leaf *l;
59 struct extent_item *item;
60 init_path(&path);
61 key.objectid = blocknr;
62 key.flags = 0;
63 key.offset = 1;
64 ret = search_slot(root->extent_root, &key, &path, 0, 0);
65 if (ret != 0)
66 BUG();
67 l = &path.nodes[0]->leaf;
68 item = (struct extent_item *)(l->data +
69 l->items[path.slots[0]].offset);
70 *refs = item->refs;
71 release_path(root->extent_root, &path);
72 return 0;
73}
74
75int btrfs_inc_ref(struct ctree_root *root, struct tree_buffer *buf)
76{
77 u64 blocknr;
78 int i;
79
80 if (root == root->extent_root)
81 return 0;
82 if (is_leaf(buf->node.header.flags))
83 return 0;
84
85 for (i = 0; i < buf->node.header.nritems; i++) {
86 blocknr = buf->node.blockptrs[i];
87 inc_block_ref(root, blocknr);
88 }
89 return 0;
90}
91
92int btrfs_finish_extent_commit(struct ctree_root *root)
93{
94 struct ctree_root *extent_root = root->extent_root;
95 unsigned long gang[8];
96 int ret;
97 int i;
98
99 while(1) {
100 ret = radix_tree_gang_lookup(&extent_root->pinned_radix,
101 (void **)gang, 0,
102 ARRAY_SIZE(gang));
103 if (!ret)
104 break;
105 for (i = 0; i < ret; i++) {
106 radix_tree_delete(&extent_root->pinned_radix, gang[i]);
107 }
108 }
109 extent_root->last_insert.objectid = 0;
110 extent_root->last_insert.offset = 0;
111 return 0;
112}
113
114static int finish_current_insert(struct ctree_root *extent_root)
115{
116 struct key ins;
117 struct extent_item extent_item;
118 int i;
119 int ret;
120
121 extent_item.refs = 1;
122 extent_item.owner = extent_root->node->node.header.parentid;
123 ins.offset = 1;
124 ins.flags = 0;
125
126 for (i = 0; i < extent_root->current_insert.flags; i++) {
127 ins.objectid = extent_root->current_insert.objectid + i;
128 ret = insert_item(extent_root, &ins, &extent_item,
129 sizeof(extent_item));
130 BUG_ON(ret);
131 }
132 extent_root->current_insert.offset = 0;
133 return 0;
134}
135
136/*
137 * remove an extent from the root, returns 0 on success
138 */
139int __free_extent(struct ctree_root *root, u64 blocknr, u64 num_blocks)
140{
141 struct ctree_path path;
142 struct key key;
143 struct ctree_root *extent_root = root->extent_root;
144 int ret;
145 struct item *item;
146 struct extent_item *ei;
147 struct key ins;
148
149 key.objectid = blocknr;
150 key.flags = 0;
151 key.offset = num_blocks;
152
153 find_free_extent(root, 0, 0, (u64)-1, &ins);
154 init_path(&path);
155 ret = search_slot(extent_root, &key, &path, -1, 1);
156 if (ret) {
157 printf("failed to find %Lu\n", key.objectid);
158 print_tree(extent_root, extent_root->node);
159 printf("failed to find %Lu\n", key.objectid);
160 BUG();
161 }
162 item = path.nodes[0]->leaf.items + path.slots[0];
163 ei = (struct extent_item *)(path.nodes[0]->leaf.data + item->offset);
164 BUG_ON(ei->refs == 0);
165 ei->refs--;
166 if (ei->refs == 0) {
167 if (root == extent_root) {
168 int err;
169 radix_tree_preload(GFP_KERNEL);
170 err = radix_tree_insert(&extent_root->pinned_radix,
171 blocknr, (void *)blocknr);
172 BUG_ON(err);
173 radix_tree_preload_end();
174 }
175 ret = del_item(extent_root, &path);
176 if (root != extent_root &&
177 extent_root->last_insert.objectid < blocknr)
178 extent_root->last_insert.objectid = blocknr;
179 if (ret)
180 BUG();
181 }
182 release_path(extent_root, &path);
183 finish_current_insert(extent_root);
184 return ret;
185}
186
187/*
188 * find all the blocks marked as pending in the radix tree and remove
189 * them from the extent map
190 */
191static int del_pending_extents(struct ctree_root *extent_root)
192{
193 int ret;
194 struct tree_buffer *gang[4];
195 int i;
196
197 while(1) {
198 ret = radix_tree_gang_lookup_tag(&extent_root->cache_radix,
199 (void **)gang, 0,
200 ARRAY_SIZE(gang),
201 CTREE_EXTENT_PENDING_DEL);
202 if (!ret)
203 break;
204 for (i = 0; i < ret; i++) {
205 ret = __free_extent(extent_root, gang[i]->blocknr, 1);
206 radix_tree_tag_clear(&extent_root->cache_radix,
207 gang[i]->blocknr,
208 CTREE_EXTENT_PENDING_DEL);
209 tree_block_release(extent_root, gang[i]);
210 }
211 }
212 return 0;
213}
214
215static int run_pending(struct ctree_root *extent_root)
216{
217 while(radix_tree_tagged(&extent_root->cache_radix,
218 CTREE_EXTENT_PENDING_DEL))
219 del_pending_extents(extent_root);
220 return 0;
221}
222
223
224/*
225 * remove an extent from the root, returns 0 on success
226 */
227int free_extent(struct ctree_root *root, u64 blocknr, u64 num_blocks)
228{
229 struct key key;
230 struct ctree_root *extent_root = root->extent_root;
231 struct tree_buffer *t;
232 int pending_ret;
233 int ret;
234
235 if (root == extent_root) {
236 t = find_tree_block(root, blocknr);
237 radix_tree_tag_set(&root->cache_radix, blocknr,
238 CTREE_EXTENT_PENDING_DEL);
239 return 0;
240 }
241 key.objectid = blocknr;
242 key.flags = 0;
243 key.offset = num_blocks;
244 ret = __free_extent(root, blocknr, num_blocks);
245 pending_ret = run_pending(root->extent_root);
246 return ret ? ret : pending_ret;
247}
248
249/*
250 * walks the btree of allocated extents and find a hole of a given size.
251 * The key ins is changed to record the hole:
252 * ins->objectid == block start
253 * ins->flags = 0
254 * ins->offset == number of blocks
255 * Any available blocks before search_start are skipped.
256 */
257static int find_free_extent(struct ctree_root *orig_root, u64 num_blocks,
258 u64 search_start, u64 search_end, struct key *ins)
259{
260 struct ctree_path path;
261 struct key *key;
262 int ret;
263 u64 hole_size = 0;
264 int slot = 0;
265 u64 last_block;
266 u64 test_block;
267 int start_found;
268 struct leaf *l;
269 struct ctree_root * root = orig_root->extent_root;
270 int total_needed = num_blocks;
271
272 total_needed += (node_level(root->node->node.header.flags) + 1) * 3;
273 if (root->last_insert.objectid > search_start)
274 search_start = root->last_insert.objectid;
275check_failed:
276 init_path(&path);
277 ins->objectid = search_start;
278 ins->offset = 0;
279 ins->flags = 0;
280 start_found = 0;
281 ret = search_slot(root, ins, &path, 0, 0);
282 if (ret < 0)
283 goto error;
284
285 if (path.slots[0] > 0)
286 path.slots[0]--;
287
288 while (1) {
289 l = &path.nodes[0]->leaf;
290 slot = path.slots[0];
291 if (slot >= l->header.nritems) {
292 ret = next_leaf(root, &path);
293 if (ret == 0)
294 continue;
295 if (ret < 0)
296 goto error;
297 if (!start_found) {
298 ins->objectid = search_start;
299 ins->offset = (u64)-1;
300 start_found = 1;
301 goto check_pending;
302 }
303 ins->objectid = last_block > search_start ?
304 last_block : search_start;
305 ins->offset = (u64)-1;
306 goto check_pending;
307 }
308 key = &l->items[slot].key;
309 if (key->objectid >= search_start) {
310 if (start_found) {
311 if (last_block < search_start)
312 last_block = search_start;
313 hole_size = key->objectid - last_block;
314 if (hole_size > total_needed) {
315 ins->objectid = last_block;
316 ins->offset = hole_size;
317 goto check_pending;
318 }
319 }
320 }
321 start_found = 1;
322 last_block = key->objectid + key->offset;
323 path.slots[0]++;
324 }
325 // FIXME -ENOSPC
326check_pending:
327 /* we have to make sure we didn't find an extent that has already
328 * been allocated by the map tree or the original allocation
329 */
330 release_path(root, &path);
331 BUG_ON(ins->objectid < search_start);
332 for (test_block = ins->objectid;
333 test_block < ins->objectid + total_needed; test_block++) {
334 if (radix_tree_lookup(&root->pinned_radix, test_block)) {
335 search_start = test_block + 1;
336 goto check_failed;
337 }
338 }
339 BUG_ON(root->current_insert.offset);
340 root->current_insert.offset = total_needed - num_blocks;
341 root->current_insert.objectid = ins->objectid + num_blocks;
342 root->current_insert.flags = 0;
343 root->last_insert.objectid = ins->objectid;
344 ins->offset = num_blocks;
345 return 0;
346error:
347 release_path(root, &path);
348 return ret;
349}
350
351/*
352 * finds a free extent and does all the dirty work required for allocation
353 * returns the key for the extent through ins, and a tree buffer for
354 * the first block of the extent through buf.
355 *
356 * returns 0 if everything worked, non-zero otherwise.
357 */
358int alloc_extent(struct ctree_root *root, u64 num_blocks, u64 search_start,
359 u64 search_end, u64 owner, struct key *ins)
360{
361 int ret;
362 int pending_ret;
363 struct ctree_root *extent_root = root->extent_root;
364 struct extent_item extent_item;
365
366 extent_item.refs = 1;
367 extent_item.owner = owner;
368
369 if (root == extent_root) {
370 BUG_ON(extent_root->current_insert.offset == 0);
371 BUG_ON(num_blocks != 1);
372 BUG_ON(extent_root->current_insert.flags ==
373 extent_root->current_insert.offset);
374 ins->offset = 1;
375 ins->objectid = extent_root->current_insert.objectid +
376 extent_root->current_insert.flags++;
377 return 0;
378 }
379 ret = find_free_extent(root, num_blocks, search_start,
380 search_end, ins);
381 if (ret)
382 return ret;
383
384 ret = insert_item(extent_root, ins, &extent_item,
385 sizeof(extent_item));
386
387 finish_current_insert(extent_root);
388 pending_ret = run_pending(extent_root);
389 if (ret)
390 return ret;
391 if (pending_ret)
392 return pending_ret;
393 return 0;
394}
395
396/*
397 * helper function to allocate a block for a given tree
398 * returns the tree buffer or NULL.
399 */
400struct tree_buffer *alloc_free_block(struct ctree_root *root)
401{
402 struct key ins;
403 int ret;
404 struct tree_buffer *buf;
405
406 ret = alloc_extent(root, 1, 0, (unsigned long)-1,
407 root->node->node.header.parentid,
408 &ins);
409 if (ret) {
410 BUG();
411 return NULL;
412 }
413 buf = find_tree_block(root, ins.objectid);
414 dirty_tree_block(root, buf);
415 return buf;
416}
417
418int walk_down_tree(struct ctree_root *root, struct ctree_path *path, int *level)
419{
420 struct tree_buffer *next;
421 struct tree_buffer *cur;
422 u64 blocknr;
423 int ret;
424 u32 refs;
425
426 ret = lookup_block_ref(root, path->nodes[*level]->blocknr, &refs);
427 BUG_ON(ret);
428 if (refs > 1)
429 goto out;
430 while(*level > 0) {
431 cur = path->nodes[*level];
432 if (path->slots[*level] >= cur->node.header.nritems)
433 break;
434 blocknr = cur->node.blockptrs[path->slots[*level]];
435 ret = lookup_block_ref(root, blocknr, &refs);
436 if (refs != 1 || *level == 1) {
437 path->slots[*level]++;
438 ret = free_extent(root, blocknr, 1);
439 BUG_ON(ret);
440 continue;
441 }
442 BUG_ON(ret);
443 next = read_tree_block(root, blocknr);
444 if (path->nodes[*level-1])
445 tree_block_release(root, path->nodes[*level-1]);
446 path->nodes[*level-1] = next;
447 *level = node_level(next->node.header.flags);
448 path->slots[*level] = 0;
449 }
450out:
451 ret = free_extent(root, path->nodes[*level]->blocknr, 1);
452 tree_block_release(root, path->nodes[*level]);
453 path->nodes[*level] = NULL;
454 *level += 1;
455 BUG_ON(ret);
456 return 0;
457}
458
459int walk_up_tree(struct ctree_root *root, struct ctree_path *path, int *level)
460{
461 int i;
462 int slot;
463 int ret;
464 for(i = *level; i < MAX_LEVEL - 1 && path->nodes[i]; i++) {
465 slot = path->slots[i];
466 if (slot < path->nodes[i]->node.header.nritems - 1) {
467 path->slots[i]++;
468 *level = i;
469 return 0;
470 } else {
471 ret = free_extent(root,
472 path->nodes[*level]->blocknr, 1);
473 tree_block_release(root, path->nodes[*level]);
474 path->nodes[*level] = NULL;
475 *level = i + 1;
476 BUG_ON(ret);
477 }
478 }
479 return 1;
480}
481
482int btrfs_drop_snapshot(struct ctree_root *root, struct tree_buffer *snap)
483{
484 int ret;
485 int level;
486 struct ctree_path path;
487 int i;
488 int orig_level;
489
490 init_path(&path);
491
492 level = node_level(snap->node.header.flags);
493 orig_level = level;
494 path.nodes[level] = snap;
495 path.slots[level] = 0;
496 while(1) {
497 ret = walk_down_tree(root, &path, &level);
498 if (ret > 0)
499 break;
500 ret = walk_up_tree(root, &path, &level);
501 if (ret > 0)
502 break;
503 }
504 for (i = 0; i <= orig_level; i++) {
505 if (path.nodes[i]) {
506 tree_block_release(root, path.nodes[i]);
507 }
508 }
509
510 return 0;
511}
512
513
514#if 0
515int btrfs_drop_snapshot(struct ctree_root *root, struct tree_buffer *snap)
516{
517 int ret;
518 int level;
519 int refs;
520 u64 blocknr = snap->blocknr;
521
522 level = node_level(snap->node.header.flags);
523 ret = lookup_block_ref(root, snap->blocknr, &refs);
524 BUG_ON(ret);
525 if (refs == 1 && level != 0) {
526 struct node *n = &snap->node;
527 struct tree_buffer *b;
528 int i;
529 for (i = 0; i < n->header.nritems; i++) {
530 b = read_tree_block(root, n->blockptrs[i]);
531 /* FIXME, don't recurse here */
532 ret = btrfs_drop_snapshot(root, b);
533 BUG_ON(ret);
534 tree_block_release(root, b);
535 }
536 }
537 ret = free_extent(root, blocknr, 1);
538 BUG_ON(ret);
539 return 0;
540}
541#endif