Btrfs: Mixed back reference (FORWARD ROLLING FORMAT CHANGE)
[linux-block.git] / fs / btrfs / ctree.c
CommitLineData
6cbd5570 1/*
d352ac68 2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
6cbd5570
CM
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
a6b6e75e 19#include <linux/sched.h>
eb60ceac
CM
20#include "ctree.h"
21#include "disk-io.h"
7f5c1516 22#include "transaction.h"
5f39d397 23#include "print-tree.h"
925baedd 24#include "locking.h"
9a8dd150 25
e089f05c
CM
26static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
27 *root, struct btrfs_path *path, int level);
28static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
d4dbff95 29 *root, struct btrfs_key *ins_key,
cc0c5538 30 struct btrfs_path *path, int data_size, int extend);
5f39d397
CM
31static int push_node_left(struct btrfs_trans_handle *trans,
32 struct btrfs_root *root, struct extent_buffer *dst,
971a1f66 33 struct extent_buffer *src, int empty);
5f39d397
CM
34static int balance_node_right(struct btrfs_trans_handle *trans,
35 struct btrfs_root *root,
36 struct extent_buffer *dst_buf,
37 struct extent_buffer *src_buf);
e089f05c
CM
38static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
39 struct btrfs_path *path, int level, int slot);
d97e63b6 40
df24a2b9 41struct btrfs_path *btrfs_alloc_path(void)
2c90e5d6 42{
df24a2b9 43 struct btrfs_path *path;
e00f7308
JM
44 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
45 if (path)
2cc58cf2 46 path->reada = 1;
df24a2b9 47 return path;
2c90e5d6
CM
48}
49
b4ce94de
CM
50/*
51 * set all locked nodes in the path to blocking locks. This should
52 * be done before scheduling
53 */
54noinline void btrfs_set_path_blocking(struct btrfs_path *p)
55{
56 int i;
57 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
58 if (p->nodes[i] && p->locks[i])
59 btrfs_set_lock_blocking(p->nodes[i]);
60 }
61}
62
63/*
64 * reset all the locked nodes in the patch to spinning locks.
4008c04a
CM
65 *
66 * held is used to keep lockdep happy, when lockdep is enabled
67 * we set held to a blocking lock before we go around and
68 * retake all the spinlocks in the path. You can safely use NULL
69 * for held
b4ce94de 70 */
4008c04a
CM
71noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
72 struct extent_buffer *held)
b4ce94de
CM
73{
74 int i;
4008c04a
CM
75
76#ifdef CONFIG_DEBUG_LOCK_ALLOC
77 /* lockdep really cares that we take all of these spinlocks
78 * in the right order. If any of the locks in the path are not
79 * currently blocking, it is going to complain. So, make really
80 * really sure by forcing the path to blocking before we clear
81 * the path blocking.
82 */
83 if (held)
84 btrfs_set_lock_blocking(held);
85 btrfs_set_path_blocking(p);
86#endif
87
88 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
b4ce94de
CM
89 if (p->nodes[i] && p->locks[i])
90 btrfs_clear_lock_blocking(p->nodes[i]);
91 }
4008c04a
CM
92
93#ifdef CONFIG_DEBUG_LOCK_ALLOC
94 if (held)
95 btrfs_clear_lock_blocking(held);
96#endif
b4ce94de
CM
97}
98
d352ac68 99/* this also releases the path */
df24a2b9 100void btrfs_free_path(struct btrfs_path *p)
be0e5c09 101{
df24a2b9
CM
102 btrfs_release_path(NULL, p);
103 kmem_cache_free(btrfs_path_cachep, p);
be0e5c09
CM
104}
105
d352ac68
CM
106/*
107 * path release drops references on the extent buffers in the path
108 * and it drops any locks held by this path
109 *
110 * It is safe to call this on paths that no locks or extent buffers held.
111 */
d397712b 112noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
eb60ceac
CM
113{
114 int i;
a2135011 115
234b63a0 116 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3f157a2f 117 p->slots[i] = 0;
eb60ceac 118 if (!p->nodes[i])
925baedd
CM
119 continue;
120 if (p->locks[i]) {
121 btrfs_tree_unlock(p->nodes[i]);
122 p->locks[i] = 0;
123 }
5f39d397 124 free_extent_buffer(p->nodes[i]);
3f157a2f 125 p->nodes[i] = NULL;
eb60ceac
CM
126 }
127}
128
d352ac68
CM
129/*
130 * safely gets a reference on the root node of a tree. A lock
131 * is not taken, so a concurrent writer may put a different node
132 * at the root of the tree. See btrfs_lock_root_node for the
133 * looping required.
134 *
135 * The extent buffer returned by this has a reference taken, so
136 * it won't disappear. It may stop being the root of the tree
137 * at any time because there are no locks held.
138 */
925baedd
CM
139struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
140{
141 struct extent_buffer *eb;
142 spin_lock(&root->node_lock);
143 eb = root->node;
144 extent_buffer_get(eb);
145 spin_unlock(&root->node_lock);
146 return eb;
147}
148
d352ac68
CM
149/* loop around taking references on and locking the root node of the
150 * tree until you end up with a lock on the root. A locked buffer
151 * is returned, with a reference held.
152 */
925baedd
CM
153struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
154{
155 struct extent_buffer *eb;
156
d397712b 157 while (1) {
925baedd
CM
158 eb = btrfs_root_node(root);
159 btrfs_tree_lock(eb);
160
161 spin_lock(&root->node_lock);
162 if (eb == root->node) {
163 spin_unlock(&root->node_lock);
164 break;
165 }
166 spin_unlock(&root->node_lock);
167
168 btrfs_tree_unlock(eb);
169 free_extent_buffer(eb);
170 }
171 return eb;
172}
173
d352ac68
CM
174/* cowonly root (everything not a reference counted cow subvolume), just get
175 * put onto a simple dirty list. transaction.c walks this to make sure they
176 * get properly updated on disk.
177 */
0b86a832
CM
178static void add_root_to_dirty_list(struct btrfs_root *root)
179{
180 if (root->track_dirty && list_empty(&root->dirty_list)) {
181 list_add(&root->dirty_list,
182 &root->fs_info->dirty_cowonly_roots);
183 }
184}
185
d352ac68
CM
186/*
187 * used by snapshot creation to make a copy of a root for a tree with
188 * a given objectid. The buffer with the new root node is returned in
189 * cow_ret, and this func returns zero on success or a negative error code.
190 */
be20aa9d
CM
191int btrfs_copy_root(struct btrfs_trans_handle *trans,
192 struct btrfs_root *root,
193 struct extent_buffer *buf,
194 struct extent_buffer **cow_ret, u64 new_root_objectid)
195{
196 struct extent_buffer *cow;
197 u32 nritems;
198 int ret = 0;
199 int level;
5d4f98a2 200 struct btrfs_disk_key disk_key;
be20aa9d
CM
201
202 WARN_ON(root->ref_cows && trans->transid !=
203 root->fs_info->running_transaction->transid);
204 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
205
206 level = btrfs_header_level(buf);
207 nritems = btrfs_header_nritems(buf);
5d4f98a2
YZ
208 if (level == 0)
209 btrfs_item_key(buf, &disk_key, 0);
210 else
211 btrfs_node_key(buf, &disk_key, 0);
31840ae1 212
5d4f98a2
YZ
213 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
214 new_root_objectid, &disk_key, level,
215 buf->start, 0);
216 if (IS_ERR(cow))
be20aa9d
CM
217 return PTR_ERR(cow);
218
219 copy_extent_buffer(cow, buf, 0, 0, cow->len);
220 btrfs_set_header_bytenr(cow, cow->start);
221 btrfs_set_header_generation(cow, trans->transid);
5d4f98a2
YZ
222 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
223 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
224 BTRFS_HEADER_FLAG_RELOC);
225 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
226 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
227 else
228 btrfs_set_header_owner(cow, new_root_objectid);
be20aa9d 229
2b82032c
YZ
230 write_extent_buffer(cow, root->fs_info->fsid,
231 (unsigned long)btrfs_header_fsid(cow),
232 BTRFS_FSID_SIZE);
233
be20aa9d 234 WARN_ON(btrfs_header_generation(buf) > trans->transid);
5d4f98a2
YZ
235 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
236 ret = btrfs_inc_ref(trans, root, cow, 1);
237 else
238 ret = btrfs_inc_ref(trans, root, cow, 0);
4aec2b52 239
be20aa9d
CM
240 if (ret)
241 return ret;
242
243 btrfs_mark_buffer_dirty(cow);
244 *cow_ret = cow;
245 return 0;
246}
247
5d4f98a2
YZ
248/*
249 * check if the tree block can be shared by multiple trees
250 */
251int btrfs_block_can_be_shared(struct btrfs_root *root,
252 struct extent_buffer *buf)
253{
254 /*
255 * Tree blocks not in refernece counted trees and tree roots
256 * are never shared. If a block was allocated after the last
257 * snapshot and the block was not allocated by tree relocation,
258 * we know the block is not shared.
259 */
260 if (root->ref_cows &&
261 buf != root->node && buf != root->commit_root &&
262 (btrfs_header_generation(buf) <=
263 btrfs_root_last_snapshot(&root->root_item) ||
264 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
265 return 1;
266#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
267 if (root->ref_cows &&
268 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
269 return 1;
270#endif
271 return 0;
272}
273
274static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
275 struct btrfs_root *root,
276 struct extent_buffer *buf,
277 struct extent_buffer *cow)
278{
279 u64 refs;
280 u64 owner;
281 u64 flags;
282 u64 new_flags = 0;
283 int ret;
284
285 /*
286 * Backrefs update rules:
287 *
288 * Always use full backrefs for extent pointers in tree block
289 * allocated by tree relocation.
290 *
291 * If a shared tree block is no longer referenced by its owner
292 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
293 * use full backrefs for extent pointers in tree block.
294 *
295 * If a tree block is been relocating
296 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
297 * use full backrefs for extent pointers in tree block.
298 * The reason for this is some operations (such as drop tree)
299 * are only allowed for blocks use full backrefs.
300 */
301
302 if (btrfs_block_can_be_shared(root, buf)) {
303 ret = btrfs_lookup_extent_info(trans, root, buf->start,
304 buf->len, &refs, &flags);
305 BUG_ON(ret);
306 BUG_ON(refs == 0);
307 } else {
308 refs = 1;
309 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
310 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
311 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
312 else
313 flags = 0;
314 }
315
316 owner = btrfs_header_owner(buf);
317 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
318 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
319
320 if (refs > 1) {
321 if ((owner == root->root_key.objectid ||
322 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
323 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
324 ret = btrfs_inc_ref(trans, root, buf, 1);
325 BUG_ON(ret);
326
327 if (root->root_key.objectid ==
328 BTRFS_TREE_RELOC_OBJECTID) {
329 ret = btrfs_dec_ref(trans, root, buf, 0);
330 BUG_ON(ret);
331 ret = btrfs_inc_ref(trans, root, cow, 1);
332 BUG_ON(ret);
333 }
334 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
335 } else {
336
337 if (root->root_key.objectid ==
338 BTRFS_TREE_RELOC_OBJECTID)
339 ret = btrfs_inc_ref(trans, root, cow, 1);
340 else
341 ret = btrfs_inc_ref(trans, root, cow, 0);
342 BUG_ON(ret);
343 }
344 if (new_flags != 0) {
345 ret = btrfs_set_disk_extent_flags(trans, root,
346 buf->start,
347 buf->len,
348 new_flags, 0);
349 BUG_ON(ret);
350 }
351 } else {
352 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
353 if (root->root_key.objectid ==
354 BTRFS_TREE_RELOC_OBJECTID)
355 ret = btrfs_inc_ref(trans, root, cow, 1);
356 else
357 ret = btrfs_inc_ref(trans, root, cow, 0);
358 BUG_ON(ret);
359 ret = btrfs_dec_ref(trans, root, buf, 1);
360 BUG_ON(ret);
361 }
362 clean_tree_block(trans, root, buf);
363 }
364 return 0;
365}
366
d352ac68 367/*
d397712b
CM
368 * does the dirty work in cow of a single block. The parent block (if
369 * supplied) is updated to point to the new cow copy. The new buffer is marked
370 * dirty and returned locked. If you modify the block it needs to be marked
371 * dirty again.
d352ac68
CM
372 *
373 * search_start -- an allocation hint for the new block
374 *
d397712b
CM
375 * empty_size -- a hint that you plan on doing more cow. This is the size in
376 * bytes the allocator should try to find free next to the block it returns.
377 * This is just a hint and may be ignored by the allocator.
d352ac68 378 */
d397712b 379static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
5f39d397
CM
380 struct btrfs_root *root,
381 struct extent_buffer *buf,
382 struct extent_buffer *parent, int parent_slot,
383 struct extent_buffer **cow_ret,
9fa8cfe7 384 u64 search_start, u64 empty_size)
02217ed2 385{
5d4f98a2 386 struct btrfs_disk_key disk_key;
5f39d397 387 struct extent_buffer *cow;
7bb86316 388 int level;
925baedd 389 int unlock_orig = 0;
5d4f98a2 390 u64 parent_start;
7bb86316 391
925baedd
CM
392 if (*cow_ret == buf)
393 unlock_orig = 1;
394
b9447ef8 395 btrfs_assert_tree_locked(buf);
925baedd 396
7bb86316
CM
397 WARN_ON(root->ref_cows && trans->transid !=
398 root->fs_info->running_transaction->transid);
6702ed49 399 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
5f39d397 400
7bb86316 401 level = btrfs_header_level(buf);
31840ae1 402
5d4f98a2
YZ
403 if (level == 0)
404 btrfs_item_key(buf, &disk_key, 0);
405 else
406 btrfs_node_key(buf, &disk_key, 0);
407
408 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
409 if (parent)
410 parent_start = parent->start;
411 else
412 parent_start = 0;
413 } else
414 parent_start = 0;
415
416 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
417 root->root_key.objectid, &disk_key,
418 level, search_start, empty_size);
54aa1f4d
CM
419 if (IS_ERR(cow))
420 return PTR_ERR(cow);
6702ed49 421
b4ce94de
CM
422 /* cow is set to blocking by btrfs_init_new_buffer */
423
5f39d397 424 copy_extent_buffer(cow, buf, 0, 0, cow->len);
db94535d 425 btrfs_set_header_bytenr(cow, cow->start);
5f39d397 426 btrfs_set_header_generation(cow, trans->transid);
5d4f98a2
YZ
427 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
428 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
429 BTRFS_HEADER_FLAG_RELOC);
430 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
431 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
432 else
433 btrfs_set_header_owner(cow, root->root_key.objectid);
6702ed49 434
2b82032c
YZ
435 write_extent_buffer(cow, root->fs_info->fsid,
436 (unsigned long)btrfs_header_fsid(cow),
437 BTRFS_FSID_SIZE);
438
5d4f98a2 439 update_ref_for_cow(trans, root, buf, cow);
1a40e23b 440
02217ed2 441 if (buf == root->node) {
925baedd 442 WARN_ON(parent && parent != buf);
5d4f98a2
YZ
443 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
444 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
445 parent_start = buf->start;
446 else
447 parent_start = 0;
925baedd
CM
448
449 spin_lock(&root->node_lock);
02217ed2 450 root->node = cow;
5f39d397 451 extent_buffer_get(cow);
925baedd
CM
452 spin_unlock(&root->node_lock);
453
5d4f98a2
YZ
454 btrfs_free_extent(trans, root, buf->start, buf->len,
455 parent_start, root->root_key.objectid,
456 level, 0);
5f39d397 457 free_extent_buffer(buf);
0b86a832 458 add_root_to_dirty_list(root);
02217ed2 459 } else {
5d4f98a2
YZ
460 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
461 parent_start = parent->start;
462 else
463 parent_start = 0;
464
465 WARN_ON(trans->transid != btrfs_header_generation(parent));
5f39d397 466 btrfs_set_node_blockptr(parent, parent_slot,
db94535d 467 cow->start);
74493f7a
CM
468 btrfs_set_node_ptr_generation(parent, parent_slot,
469 trans->transid);
d6025579 470 btrfs_mark_buffer_dirty(parent);
7bb86316 471 btrfs_free_extent(trans, root, buf->start, buf->len,
5d4f98a2
YZ
472 parent_start, root->root_key.objectid,
473 level, 0);
02217ed2 474 }
925baedd
CM
475 if (unlock_orig)
476 btrfs_tree_unlock(buf);
5f39d397 477 free_extent_buffer(buf);
ccd467d6 478 btrfs_mark_buffer_dirty(cow);
2c90e5d6 479 *cow_ret = cow;
02217ed2
CM
480 return 0;
481}
482
5d4f98a2
YZ
483static inline int should_cow_block(struct btrfs_trans_handle *trans,
484 struct btrfs_root *root,
485 struct extent_buffer *buf)
486{
487 if (btrfs_header_generation(buf) == trans->transid &&
488 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
489 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
490 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
491 return 0;
492 return 1;
493}
494
d352ac68
CM
495/*
496 * cows a single block, see __btrfs_cow_block for the real work.
497 * This version of it has extra checks so that a block isn't cow'd more than
498 * once per transaction, as long as it hasn't been written yet
499 */
d397712b 500noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
5f39d397
CM
501 struct btrfs_root *root, struct extent_buffer *buf,
502 struct extent_buffer *parent, int parent_slot,
9fa8cfe7 503 struct extent_buffer **cow_ret)
6702ed49
CM
504{
505 u64 search_start;
f510cfec 506 int ret;
dc17ff8f 507
6702ed49 508 if (trans->transaction != root->fs_info->running_transaction) {
d397712b
CM
509 printk(KERN_CRIT "trans %llu running %llu\n",
510 (unsigned long long)trans->transid,
511 (unsigned long long)
6702ed49
CM
512 root->fs_info->running_transaction->transid);
513 WARN_ON(1);
514 }
515 if (trans->transid != root->fs_info->generation) {
d397712b
CM
516 printk(KERN_CRIT "trans %llu running %llu\n",
517 (unsigned long long)trans->transid,
518 (unsigned long long)root->fs_info->generation);
6702ed49
CM
519 WARN_ON(1);
520 }
dc17ff8f 521
5d4f98a2 522 if (!should_cow_block(trans, root, buf)) {
6702ed49
CM
523 *cow_ret = buf;
524 return 0;
525 }
c487685d 526
0b86a832 527 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
b4ce94de
CM
528
529 if (parent)
530 btrfs_set_lock_blocking(parent);
531 btrfs_set_lock_blocking(buf);
532
f510cfec 533 ret = __btrfs_cow_block(trans, root, buf, parent,
9fa8cfe7 534 parent_slot, cow_ret, search_start, 0);
f510cfec 535 return ret;
6702ed49
CM
536}
537
d352ac68
CM
538/*
539 * helper function for defrag to decide if two blocks pointed to by a
540 * node are actually close by
541 */
6b80053d 542static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
6702ed49 543{
6b80053d 544 if (blocknr < other && other - (blocknr + blocksize) < 32768)
6702ed49 545 return 1;
6b80053d 546 if (blocknr > other && blocknr - (other + blocksize) < 32768)
6702ed49
CM
547 return 1;
548 return 0;
549}
550
081e9573
CM
551/*
552 * compare two keys in a memcmp fashion
553 */
554static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
555{
556 struct btrfs_key k1;
557
558 btrfs_disk_key_to_cpu(&k1, disk);
559
560 if (k1.objectid > k2->objectid)
561 return 1;
562 if (k1.objectid < k2->objectid)
563 return -1;
564 if (k1.type > k2->type)
565 return 1;
566 if (k1.type < k2->type)
567 return -1;
568 if (k1.offset > k2->offset)
569 return 1;
570 if (k1.offset < k2->offset)
571 return -1;
572 return 0;
573}
574
f3465ca4
JB
575/*
576 * same as comp_keys only with two btrfs_key's
577 */
5d4f98a2 578int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
f3465ca4
JB
579{
580 if (k1->objectid > k2->objectid)
581 return 1;
582 if (k1->objectid < k2->objectid)
583 return -1;
584 if (k1->type > k2->type)
585 return 1;
586 if (k1->type < k2->type)
587 return -1;
588 if (k1->offset > k2->offset)
589 return 1;
590 if (k1->offset < k2->offset)
591 return -1;
592 return 0;
593}
081e9573 594
d352ac68
CM
595/*
596 * this is used by the defrag code to go through all the
597 * leaves pointed to by a node and reallocate them so that
598 * disk order is close to key order
599 */
6702ed49 600int btrfs_realloc_node(struct btrfs_trans_handle *trans,
5f39d397 601 struct btrfs_root *root, struct extent_buffer *parent,
a6b6e75e
CM
602 int start_slot, int cache_only, u64 *last_ret,
603 struct btrfs_key *progress)
6702ed49 604{
6b80053d 605 struct extent_buffer *cur;
6702ed49 606 u64 blocknr;
ca7a79ad 607 u64 gen;
e9d0b13b
CM
608 u64 search_start = *last_ret;
609 u64 last_block = 0;
6702ed49
CM
610 u64 other;
611 u32 parent_nritems;
6702ed49
CM
612 int end_slot;
613 int i;
614 int err = 0;
f2183bde 615 int parent_level;
6b80053d
CM
616 int uptodate;
617 u32 blocksize;
081e9573
CM
618 int progress_passed = 0;
619 struct btrfs_disk_key disk_key;
6702ed49 620
5708b959
CM
621 parent_level = btrfs_header_level(parent);
622 if (cache_only && parent_level != 1)
623 return 0;
624
d397712b 625 if (trans->transaction != root->fs_info->running_transaction)
6702ed49 626 WARN_ON(1);
d397712b 627 if (trans->transid != root->fs_info->generation)
6702ed49 628 WARN_ON(1);
86479a04 629
6b80053d 630 parent_nritems = btrfs_header_nritems(parent);
6b80053d 631 blocksize = btrfs_level_size(root, parent_level - 1);
6702ed49
CM
632 end_slot = parent_nritems;
633
634 if (parent_nritems == 1)
635 return 0;
636
b4ce94de
CM
637 btrfs_set_lock_blocking(parent);
638
6702ed49
CM
639 for (i = start_slot; i < end_slot; i++) {
640 int close = 1;
a6b6e75e 641
5708b959
CM
642 if (!parent->map_token) {
643 map_extent_buffer(parent,
644 btrfs_node_key_ptr_offset(i),
645 sizeof(struct btrfs_key_ptr),
646 &parent->map_token, &parent->kaddr,
647 &parent->map_start, &parent->map_len,
648 KM_USER1);
649 }
081e9573
CM
650 btrfs_node_key(parent, &disk_key, i);
651 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
652 continue;
653
654 progress_passed = 1;
6b80053d 655 blocknr = btrfs_node_blockptr(parent, i);
ca7a79ad 656 gen = btrfs_node_ptr_generation(parent, i);
e9d0b13b
CM
657 if (last_block == 0)
658 last_block = blocknr;
5708b959 659
6702ed49 660 if (i > 0) {
6b80053d
CM
661 other = btrfs_node_blockptr(parent, i - 1);
662 close = close_blocks(blocknr, other, blocksize);
6702ed49 663 }
0ef3e66b 664 if (!close && i < end_slot - 2) {
6b80053d
CM
665 other = btrfs_node_blockptr(parent, i + 1);
666 close = close_blocks(blocknr, other, blocksize);
6702ed49 667 }
e9d0b13b
CM
668 if (close) {
669 last_block = blocknr;
6702ed49 670 continue;
e9d0b13b 671 }
5708b959
CM
672 if (parent->map_token) {
673 unmap_extent_buffer(parent, parent->map_token,
674 KM_USER1);
675 parent->map_token = NULL;
676 }
6702ed49 677
6b80053d
CM
678 cur = btrfs_find_tree_block(root, blocknr, blocksize);
679 if (cur)
1259ab75 680 uptodate = btrfs_buffer_uptodate(cur, gen);
6b80053d
CM
681 else
682 uptodate = 0;
5708b959 683 if (!cur || !uptodate) {
6702ed49 684 if (cache_only) {
6b80053d 685 free_extent_buffer(cur);
6702ed49
CM
686 continue;
687 }
6b80053d
CM
688 if (!cur) {
689 cur = read_tree_block(root, blocknr,
ca7a79ad 690 blocksize, gen);
6b80053d 691 } else if (!uptodate) {
ca7a79ad 692 btrfs_read_buffer(cur, gen);
f2183bde 693 }
6702ed49 694 }
e9d0b13b 695 if (search_start == 0)
6b80053d 696 search_start = last_block;
e9d0b13b 697
e7a84565 698 btrfs_tree_lock(cur);
b4ce94de 699 btrfs_set_lock_blocking(cur);
6b80053d 700 err = __btrfs_cow_block(trans, root, cur, parent, i,
e7a84565 701 &cur, search_start,
6b80053d 702 min(16 * blocksize,
9fa8cfe7 703 (end_slot - i) * blocksize));
252c38f0 704 if (err) {
e7a84565 705 btrfs_tree_unlock(cur);
6b80053d 706 free_extent_buffer(cur);
6702ed49 707 break;
252c38f0 708 }
e7a84565
CM
709 search_start = cur->start;
710 last_block = cur->start;
f2183bde 711 *last_ret = search_start;
e7a84565
CM
712 btrfs_tree_unlock(cur);
713 free_extent_buffer(cur);
6702ed49 714 }
5708b959
CM
715 if (parent->map_token) {
716 unmap_extent_buffer(parent, parent->map_token,
717 KM_USER1);
718 parent->map_token = NULL;
719 }
6702ed49
CM
720 return err;
721}
722
74123bd7
CM
723/*
724 * The leaf data grows from end-to-front in the node.
725 * this returns the address of the start of the last item,
726 * which is the stop of the leaf data stack
727 */
123abc88 728static inline unsigned int leaf_data_end(struct btrfs_root *root,
5f39d397 729 struct extent_buffer *leaf)
be0e5c09 730{
5f39d397 731 u32 nr = btrfs_header_nritems(leaf);
be0e5c09 732 if (nr == 0)
123abc88 733 return BTRFS_LEAF_DATA_SIZE(root);
5f39d397 734 return btrfs_item_offset_nr(leaf, nr - 1);
be0e5c09
CM
735}
736
d352ac68
CM
737/*
738 * extra debugging checks to make sure all the items in a key are
739 * well formed and in the proper order
740 */
123abc88
CM
741static int check_node(struct btrfs_root *root, struct btrfs_path *path,
742 int level)
aa5d6bed 743{
5f39d397
CM
744 struct extent_buffer *parent = NULL;
745 struct extent_buffer *node = path->nodes[level];
746 struct btrfs_disk_key parent_key;
747 struct btrfs_disk_key node_key;
aa5d6bed 748 int parent_slot;
8d7be552
CM
749 int slot;
750 struct btrfs_key cpukey;
5f39d397 751 u32 nritems = btrfs_header_nritems(node);
aa5d6bed
CM
752
753 if (path->nodes[level + 1])
5f39d397 754 parent = path->nodes[level + 1];
a1f39630 755
8d7be552 756 slot = path->slots[level];
7518a238
CM
757 BUG_ON(nritems == 0);
758 if (parent) {
a1f39630 759 parent_slot = path->slots[level + 1];
5f39d397
CM
760 btrfs_node_key(parent, &parent_key, parent_slot);
761 btrfs_node_key(node, &node_key, 0);
762 BUG_ON(memcmp(&parent_key, &node_key,
e2fa7227 763 sizeof(struct btrfs_disk_key)));
1d4f8a0c 764 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
db94535d 765 btrfs_header_bytenr(node));
aa5d6bed 766 }
123abc88 767 BUG_ON(nritems > BTRFS_NODEPTRS_PER_BLOCK(root));
8d7be552 768 if (slot != 0) {
5f39d397
CM
769 btrfs_node_key_to_cpu(node, &cpukey, slot - 1);
770 btrfs_node_key(node, &node_key, slot);
771 BUG_ON(comp_keys(&node_key, &cpukey) <= 0);
8d7be552
CM
772 }
773 if (slot < nritems - 1) {
5f39d397
CM
774 btrfs_node_key_to_cpu(node, &cpukey, slot + 1);
775 btrfs_node_key(node, &node_key, slot);
776 BUG_ON(comp_keys(&node_key, &cpukey) >= 0);
aa5d6bed
CM
777 }
778 return 0;
779}
780
d352ac68
CM
781/*
782 * extra checking to make sure all the items in a leaf are
783 * well formed and in the proper order
784 */
123abc88
CM
785static int check_leaf(struct btrfs_root *root, struct btrfs_path *path,
786 int level)
aa5d6bed 787{
5f39d397
CM
788 struct extent_buffer *leaf = path->nodes[level];
789 struct extent_buffer *parent = NULL;
aa5d6bed 790 int parent_slot;
8d7be552 791 struct btrfs_key cpukey;
5f39d397
CM
792 struct btrfs_disk_key parent_key;
793 struct btrfs_disk_key leaf_key;
794 int slot = path->slots[0];
8d7be552 795
5f39d397 796 u32 nritems = btrfs_header_nritems(leaf);
aa5d6bed
CM
797
798 if (path->nodes[level + 1])
5f39d397 799 parent = path->nodes[level + 1];
7518a238
CM
800
801 if (nritems == 0)
802 return 0;
803
804 if (parent) {
a1f39630 805 parent_slot = path->slots[level + 1];
5f39d397
CM
806 btrfs_node_key(parent, &parent_key, parent_slot);
807 btrfs_item_key(leaf, &leaf_key, 0);
6702ed49 808
5f39d397 809 BUG_ON(memcmp(&parent_key, &leaf_key,
e2fa7227 810 sizeof(struct btrfs_disk_key)));
1d4f8a0c 811 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
db94535d 812 btrfs_header_bytenr(leaf));
5f39d397 813 }
5f39d397
CM
814 if (slot != 0 && slot < nritems - 1) {
815 btrfs_item_key(leaf, &leaf_key, slot);
816 btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1);
817 if (comp_keys(&leaf_key, &cpukey) <= 0) {
818 btrfs_print_leaf(root, leaf);
d397712b 819 printk(KERN_CRIT "slot %d offset bad key\n", slot);
5f39d397
CM
820 BUG_ON(1);
821 }
822 if (btrfs_item_offset_nr(leaf, slot - 1) !=
823 btrfs_item_end_nr(leaf, slot)) {
824 btrfs_print_leaf(root, leaf);
d397712b 825 printk(KERN_CRIT "slot %d offset bad\n", slot);
5f39d397
CM
826 BUG_ON(1);
827 }
8d7be552
CM
828 }
829 if (slot < nritems - 1) {
5f39d397
CM
830 btrfs_item_key(leaf, &leaf_key, slot);
831 btrfs_item_key_to_cpu(leaf, &cpukey, slot + 1);
832 BUG_ON(comp_keys(&leaf_key, &cpukey) >= 0);
833 if (btrfs_item_offset_nr(leaf, slot) !=
834 btrfs_item_end_nr(leaf, slot + 1)) {
835 btrfs_print_leaf(root, leaf);
d397712b 836 printk(KERN_CRIT "slot %d offset bad\n", slot);
5f39d397
CM
837 BUG_ON(1);
838 }
aa5d6bed 839 }
5f39d397
CM
840 BUG_ON(btrfs_item_offset_nr(leaf, 0) +
841 btrfs_item_size_nr(leaf, 0) != BTRFS_LEAF_DATA_SIZE(root));
aa5d6bed
CM
842 return 0;
843}
844
d397712b 845static noinline int check_block(struct btrfs_root *root,
98ed5174 846 struct btrfs_path *path, int level)
aa5d6bed 847{
85d824c4 848 return 0;
aa5d6bed 849 if (level == 0)
123abc88
CM
850 return check_leaf(root, path, level);
851 return check_node(root, path, level);
aa5d6bed
CM
852}
853
74123bd7 854/*
5f39d397
CM
855 * search for key in the extent_buffer. The items start at offset p,
856 * and they are item_size apart. There are 'max' items in p.
857 *
74123bd7
CM
858 * the slot in the array is returned via slot, and it points to
859 * the place where you would insert key if it is not found in
860 * the array.
861 *
862 * slot may point to max if the key is bigger than all of the keys
863 */
e02119d5
CM
864static noinline int generic_bin_search(struct extent_buffer *eb,
865 unsigned long p,
866 int item_size, struct btrfs_key *key,
867 int max, int *slot)
be0e5c09
CM
868{
869 int low = 0;
870 int high = max;
871 int mid;
872 int ret;
479965d6 873 struct btrfs_disk_key *tmp = NULL;
5f39d397
CM
874 struct btrfs_disk_key unaligned;
875 unsigned long offset;
876 char *map_token = NULL;
877 char *kaddr = NULL;
878 unsigned long map_start = 0;
879 unsigned long map_len = 0;
479965d6 880 int err;
be0e5c09 881
d397712b 882 while (low < high) {
be0e5c09 883 mid = (low + high) / 2;
5f39d397
CM
884 offset = p + mid * item_size;
885
886 if (!map_token || offset < map_start ||
887 (offset + sizeof(struct btrfs_disk_key)) >
888 map_start + map_len) {
479965d6 889 if (map_token) {
5f39d397 890 unmap_extent_buffer(eb, map_token, KM_USER0);
479965d6
CM
891 map_token = NULL;
892 }
934d375b
CM
893
894 err = map_private_extent_buffer(eb, offset,
479965d6
CM
895 sizeof(struct btrfs_disk_key),
896 &map_token, &kaddr,
897 &map_start, &map_len, KM_USER0);
898
899 if (!err) {
900 tmp = (struct btrfs_disk_key *)(kaddr + offset -
901 map_start);
902 } else {
903 read_extent_buffer(eb, &unaligned,
904 offset, sizeof(unaligned));
905 tmp = &unaligned;
906 }
5f39d397 907
5f39d397
CM
908 } else {
909 tmp = (struct btrfs_disk_key *)(kaddr + offset -
910 map_start);
911 }
be0e5c09
CM
912 ret = comp_keys(tmp, key);
913
914 if (ret < 0)
915 low = mid + 1;
916 else if (ret > 0)
917 high = mid;
918 else {
919 *slot = mid;
479965d6
CM
920 if (map_token)
921 unmap_extent_buffer(eb, map_token, KM_USER0);
be0e5c09
CM
922 return 0;
923 }
924 }
925 *slot = low;
5f39d397
CM
926 if (map_token)
927 unmap_extent_buffer(eb, map_token, KM_USER0);
be0e5c09
CM
928 return 1;
929}
930
97571fd0
CM
931/*
932 * simple bin_search frontend that does the right thing for
933 * leaves vs nodes
934 */
5f39d397
CM
935static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
936 int level, int *slot)
be0e5c09 937{
5f39d397
CM
938 if (level == 0) {
939 return generic_bin_search(eb,
940 offsetof(struct btrfs_leaf, items),
0783fcfc 941 sizeof(struct btrfs_item),
5f39d397 942 key, btrfs_header_nritems(eb),
7518a238 943 slot);
be0e5c09 944 } else {
5f39d397
CM
945 return generic_bin_search(eb,
946 offsetof(struct btrfs_node, ptrs),
123abc88 947 sizeof(struct btrfs_key_ptr),
5f39d397 948 key, btrfs_header_nritems(eb),
7518a238 949 slot);
be0e5c09
CM
950 }
951 return -1;
952}
953
5d4f98a2
YZ
954int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
955 int level, int *slot)
956{
957 return bin_search(eb, key, level, slot);
958}
959
d352ac68
CM
960/* given a node and slot number, this reads the blocks it points to. The
961 * extent buffer is returned with a reference taken (but unlocked).
962 * NULL is returned on error.
963 */
e02119d5 964static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
5f39d397 965 struct extent_buffer *parent, int slot)
bb803951 966{
ca7a79ad 967 int level = btrfs_header_level(parent);
bb803951
CM
968 if (slot < 0)
969 return NULL;
5f39d397 970 if (slot >= btrfs_header_nritems(parent))
bb803951 971 return NULL;
ca7a79ad
CM
972
973 BUG_ON(level == 0);
974
db94535d 975 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
ca7a79ad
CM
976 btrfs_level_size(root, level - 1),
977 btrfs_node_ptr_generation(parent, slot));
bb803951
CM
978}
979
d352ac68
CM
980/*
981 * node level balancing, used to make sure nodes are in proper order for
982 * item deletion. We balance from the top down, so we have to make sure
983 * that a deletion won't leave an node completely empty later on.
984 */
e02119d5 985static noinline int balance_level(struct btrfs_trans_handle *trans,
98ed5174
CM
986 struct btrfs_root *root,
987 struct btrfs_path *path, int level)
bb803951 988{
5f39d397
CM
989 struct extent_buffer *right = NULL;
990 struct extent_buffer *mid;
991 struct extent_buffer *left = NULL;
992 struct extent_buffer *parent = NULL;
bb803951
CM
993 int ret = 0;
994 int wret;
995 int pslot;
bb803951 996 int orig_slot = path->slots[level];
54aa1f4d 997 int err_on_enospc = 0;
79f95c82 998 u64 orig_ptr;
bb803951
CM
999
1000 if (level == 0)
1001 return 0;
1002
5f39d397 1003 mid = path->nodes[level];
b4ce94de 1004
925baedd 1005 WARN_ON(!path->locks[level]);
7bb86316
CM
1006 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1007
1d4f8a0c 1008 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
79f95c82 1009
234b63a0 1010 if (level < BTRFS_MAX_LEVEL - 1)
5f39d397 1011 parent = path->nodes[level + 1];
bb803951
CM
1012 pslot = path->slots[level + 1];
1013
40689478
CM
1014 /*
1015 * deal with the case where there is only one pointer in the root
1016 * by promoting the node below to a root
1017 */
5f39d397
CM
1018 if (!parent) {
1019 struct extent_buffer *child;
bb803951 1020
5f39d397 1021 if (btrfs_header_nritems(mid) != 1)
bb803951
CM
1022 return 0;
1023
1024 /* promote the child to a root */
5f39d397 1025 child = read_node_slot(root, mid, 0);
7951f3ce 1026 BUG_ON(!child);
925baedd 1027 btrfs_tree_lock(child);
b4ce94de 1028 btrfs_set_lock_blocking(child);
9fa8cfe7 1029 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
2f375ab9
Y
1030 BUG_ON(ret);
1031
925baedd 1032 spin_lock(&root->node_lock);
bb803951 1033 root->node = child;
925baedd
CM
1034 spin_unlock(&root->node_lock);
1035
0b86a832 1036 add_root_to_dirty_list(root);
925baedd 1037 btrfs_tree_unlock(child);
b4ce94de 1038
925baedd 1039 path->locks[level] = 0;
bb803951 1040 path->nodes[level] = NULL;
5f39d397 1041 clean_tree_block(trans, root, mid);
925baedd 1042 btrfs_tree_unlock(mid);
bb803951 1043 /* once for the path */
5f39d397 1044 free_extent_buffer(mid);
7bb86316 1045 ret = btrfs_free_extent(trans, root, mid->start, mid->len,
5d4f98a2 1046 0, root->root_key.objectid, level, 1);
bb803951 1047 /* once for the root ptr */
5f39d397 1048 free_extent_buffer(mid);
db94535d 1049 return ret;
bb803951 1050 }
5f39d397 1051 if (btrfs_header_nritems(mid) >
123abc88 1052 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
bb803951
CM
1053 return 0;
1054
a4b6e07d
CM
1055 if (trans->transaction->delayed_refs.flushing &&
1056 btrfs_header_nritems(mid) > 2)
1057 return 0;
1058
5f39d397 1059 if (btrfs_header_nritems(mid) < 2)
54aa1f4d
CM
1060 err_on_enospc = 1;
1061
5f39d397
CM
1062 left = read_node_slot(root, parent, pslot - 1);
1063 if (left) {
925baedd 1064 btrfs_tree_lock(left);
b4ce94de 1065 btrfs_set_lock_blocking(left);
5f39d397 1066 wret = btrfs_cow_block(trans, root, left,
9fa8cfe7 1067 parent, pslot - 1, &left);
54aa1f4d
CM
1068 if (wret) {
1069 ret = wret;
1070 goto enospc;
1071 }
2cc58cf2 1072 }
5f39d397
CM
1073 right = read_node_slot(root, parent, pslot + 1);
1074 if (right) {
925baedd 1075 btrfs_tree_lock(right);
b4ce94de 1076 btrfs_set_lock_blocking(right);
5f39d397 1077 wret = btrfs_cow_block(trans, root, right,
9fa8cfe7 1078 parent, pslot + 1, &right);
2cc58cf2
CM
1079 if (wret) {
1080 ret = wret;
1081 goto enospc;
1082 }
1083 }
1084
1085 /* first, try to make some room in the middle buffer */
5f39d397
CM
1086 if (left) {
1087 orig_slot += btrfs_header_nritems(left);
bce4eae9 1088 wret = push_node_left(trans, root, left, mid, 1);
79f95c82
CM
1089 if (wret < 0)
1090 ret = wret;
5f39d397 1091 if (btrfs_header_nritems(mid) < 2)
54aa1f4d 1092 err_on_enospc = 1;
bb803951 1093 }
79f95c82
CM
1094
1095 /*
1096 * then try to empty the right most buffer into the middle
1097 */
5f39d397 1098 if (right) {
971a1f66 1099 wret = push_node_left(trans, root, mid, right, 1);
54aa1f4d 1100 if (wret < 0 && wret != -ENOSPC)
79f95c82 1101 ret = wret;
5f39d397 1102 if (btrfs_header_nritems(right) == 0) {
db94535d
CM
1103 u64 bytenr = right->start;
1104 u32 blocksize = right->len;
1105
5f39d397 1106 clean_tree_block(trans, root, right);
925baedd 1107 btrfs_tree_unlock(right);
5f39d397 1108 free_extent_buffer(right);
bb803951 1109 right = NULL;
e089f05c
CM
1110 wret = del_ptr(trans, root, path, level + 1, pslot +
1111 1);
bb803951
CM
1112 if (wret)
1113 ret = wret;
db94535d 1114 wret = btrfs_free_extent(trans, root, bytenr,
5d4f98a2
YZ
1115 blocksize, 0,
1116 root->root_key.objectid,
1117 level, 0);
bb803951
CM
1118 if (wret)
1119 ret = wret;
1120 } else {
5f39d397
CM
1121 struct btrfs_disk_key right_key;
1122 btrfs_node_key(right, &right_key, 0);
1123 btrfs_set_node_key(parent, &right_key, pslot + 1);
1124 btrfs_mark_buffer_dirty(parent);
bb803951
CM
1125 }
1126 }
5f39d397 1127 if (btrfs_header_nritems(mid) == 1) {
79f95c82
CM
1128 /*
1129 * we're not allowed to leave a node with one item in the
1130 * tree during a delete. A deletion from lower in the tree
1131 * could try to delete the only pointer in this node.
1132 * So, pull some keys from the left.
1133 * There has to be a left pointer at this point because
1134 * otherwise we would have pulled some pointers from the
1135 * right
1136 */
5f39d397
CM
1137 BUG_ON(!left);
1138 wret = balance_node_right(trans, root, mid, left);
54aa1f4d 1139 if (wret < 0) {
79f95c82 1140 ret = wret;
54aa1f4d
CM
1141 goto enospc;
1142 }
bce4eae9
CM
1143 if (wret == 1) {
1144 wret = push_node_left(trans, root, left, mid, 1);
1145 if (wret < 0)
1146 ret = wret;
1147 }
79f95c82
CM
1148 BUG_ON(wret == 1);
1149 }
5f39d397 1150 if (btrfs_header_nritems(mid) == 0) {
79f95c82 1151 /* we've managed to empty the middle node, drop it */
db94535d
CM
1152 u64 bytenr = mid->start;
1153 u32 blocksize = mid->len;
925baedd 1154
5f39d397 1155 clean_tree_block(trans, root, mid);
925baedd 1156 btrfs_tree_unlock(mid);
5f39d397 1157 free_extent_buffer(mid);
bb803951 1158 mid = NULL;
e089f05c 1159 wret = del_ptr(trans, root, path, level + 1, pslot);
bb803951
CM
1160 if (wret)
1161 ret = wret;
7bb86316 1162 wret = btrfs_free_extent(trans, root, bytenr, blocksize,
5d4f98a2
YZ
1163 0, root->root_key.objectid,
1164 level, 0);
bb803951
CM
1165 if (wret)
1166 ret = wret;
79f95c82
CM
1167 } else {
1168 /* update the parent key to reflect our changes */
5f39d397
CM
1169 struct btrfs_disk_key mid_key;
1170 btrfs_node_key(mid, &mid_key, 0);
1171 btrfs_set_node_key(parent, &mid_key, pslot);
1172 btrfs_mark_buffer_dirty(parent);
79f95c82 1173 }
bb803951 1174
79f95c82 1175 /* update the path */
5f39d397
CM
1176 if (left) {
1177 if (btrfs_header_nritems(left) > orig_slot) {
1178 extent_buffer_get(left);
925baedd 1179 /* left was locked after cow */
5f39d397 1180 path->nodes[level] = left;
bb803951
CM
1181 path->slots[level + 1] -= 1;
1182 path->slots[level] = orig_slot;
925baedd
CM
1183 if (mid) {
1184 btrfs_tree_unlock(mid);
5f39d397 1185 free_extent_buffer(mid);
925baedd 1186 }
bb803951 1187 } else {
5f39d397 1188 orig_slot -= btrfs_header_nritems(left);
bb803951
CM
1189 path->slots[level] = orig_slot;
1190 }
1191 }
79f95c82 1192 /* double check we haven't messed things up */
123abc88 1193 check_block(root, path, level);
e20d96d6 1194 if (orig_ptr !=
5f39d397 1195 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
79f95c82 1196 BUG();
54aa1f4d 1197enospc:
925baedd
CM
1198 if (right) {
1199 btrfs_tree_unlock(right);
5f39d397 1200 free_extent_buffer(right);
925baedd
CM
1201 }
1202 if (left) {
1203 if (path->nodes[level] != left)
1204 btrfs_tree_unlock(left);
5f39d397 1205 free_extent_buffer(left);
925baedd 1206 }
bb803951
CM
1207 return ret;
1208}
1209
d352ac68
CM
1210/* Node balancing for insertion. Here we only split or push nodes around
1211 * when they are completely full. This is also done top down, so we
1212 * have to be pessimistic.
1213 */
d397712b 1214static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
98ed5174
CM
1215 struct btrfs_root *root,
1216 struct btrfs_path *path, int level)
e66f709b 1217{
5f39d397
CM
1218 struct extent_buffer *right = NULL;
1219 struct extent_buffer *mid;
1220 struct extent_buffer *left = NULL;
1221 struct extent_buffer *parent = NULL;
e66f709b
CM
1222 int ret = 0;
1223 int wret;
1224 int pslot;
1225 int orig_slot = path->slots[level];
1226 u64 orig_ptr;
1227
1228 if (level == 0)
1229 return 1;
1230
5f39d397 1231 mid = path->nodes[level];
7bb86316 1232 WARN_ON(btrfs_header_generation(mid) != trans->transid);
e66f709b
CM
1233 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1234
1235 if (level < BTRFS_MAX_LEVEL - 1)
5f39d397 1236 parent = path->nodes[level + 1];
e66f709b
CM
1237 pslot = path->slots[level + 1];
1238
5f39d397 1239 if (!parent)
e66f709b 1240 return 1;
e66f709b 1241
5f39d397 1242 left = read_node_slot(root, parent, pslot - 1);
e66f709b
CM
1243
1244 /* first, try to make some room in the middle buffer */
5f39d397 1245 if (left) {
e66f709b 1246 u32 left_nr;
925baedd
CM
1247
1248 btrfs_tree_lock(left);
b4ce94de
CM
1249 btrfs_set_lock_blocking(left);
1250
5f39d397 1251 left_nr = btrfs_header_nritems(left);
33ade1f8
CM
1252 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1253 wret = 1;
1254 } else {
5f39d397 1255 ret = btrfs_cow_block(trans, root, left, parent,
9fa8cfe7 1256 pslot - 1, &left);
54aa1f4d
CM
1257 if (ret)
1258 wret = 1;
1259 else {
54aa1f4d 1260 wret = push_node_left(trans, root,
971a1f66 1261 left, mid, 0);
54aa1f4d 1262 }
33ade1f8 1263 }
e66f709b
CM
1264 if (wret < 0)
1265 ret = wret;
1266 if (wret == 0) {
5f39d397 1267 struct btrfs_disk_key disk_key;
e66f709b 1268 orig_slot += left_nr;
5f39d397
CM
1269 btrfs_node_key(mid, &disk_key, 0);
1270 btrfs_set_node_key(parent, &disk_key, pslot);
1271 btrfs_mark_buffer_dirty(parent);
1272 if (btrfs_header_nritems(left) > orig_slot) {
1273 path->nodes[level] = left;
e66f709b
CM
1274 path->slots[level + 1] -= 1;
1275 path->slots[level] = orig_slot;
925baedd 1276 btrfs_tree_unlock(mid);
5f39d397 1277 free_extent_buffer(mid);
e66f709b
CM
1278 } else {
1279 orig_slot -=
5f39d397 1280 btrfs_header_nritems(left);
e66f709b 1281 path->slots[level] = orig_slot;
925baedd 1282 btrfs_tree_unlock(left);
5f39d397 1283 free_extent_buffer(left);
e66f709b 1284 }
e66f709b
CM
1285 return 0;
1286 }
925baedd 1287 btrfs_tree_unlock(left);
5f39d397 1288 free_extent_buffer(left);
e66f709b 1289 }
925baedd 1290 right = read_node_slot(root, parent, pslot + 1);
e66f709b
CM
1291
1292 /*
1293 * then try to empty the right most buffer into the middle
1294 */
5f39d397 1295 if (right) {
33ade1f8 1296 u32 right_nr;
b4ce94de 1297
925baedd 1298 btrfs_tree_lock(right);
b4ce94de
CM
1299 btrfs_set_lock_blocking(right);
1300
5f39d397 1301 right_nr = btrfs_header_nritems(right);
33ade1f8
CM
1302 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1303 wret = 1;
1304 } else {
5f39d397
CM
1305 ret = btrfs_cow_block(trans, root, right,
1306 parent, pslot + 1,
9fa8cfe7 1307 &right);
54aa1f4d
CM
1308 if (ret)
1309 wret = 1;
1310 else {
54aa1f4d 1311 wret = balance_node_right(trans, root,
5f39d397 1312 right, mid);
54aa1f4d 1313 }
33ade1f8 1314 }
e66f709b
CM
1315 if (wret < 0)
1316 ret = wret;
1317 if (wret == 0) {
5f39d397
CM
1318 struct btrfs_disk_key disk_key;
1319
1320 btrfs_node_key(right, &disk_key, 0);
1321 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1322 btrfs_mark_buffer_dirty(parent);
1323
1324 if (btrfs_header_nritems(mid) <= orig_slot) {
1325 path->nodes[level] = right;
e66f709b
CM
1326 path->slots[level + 1] += 1;
1327 path->slots[level] = orig_slot -
5f39d397 1328 btrfs_header_nritems(mid);
925baedd 1329 btrfs_tree_unlock(mid);
5f39d397 1330 free_extent_buffer(mid);
e66f709b 1331 } else {
925baedd 1332 btrfs_tree_unlock(right);
5f39d397 1333 free_extent_buffer(right);
e66f709b 1334 }
e66f709b
CM
1335 return 0;
1336 }
925baedd 1337 btrfs_tree_unlock(right);
5f39d397 1338 free_extent_buffer(right);
e66f709b 1339 }
e66f709b
CM
1340 return 1;
1341}
1342
3c69faec 1343/*
d352ac68
CM
1344 * readahead one full node of leaves, finding things that are close
1345 * to the block in 'slot', and triggering ra on them.
3c69faec 1346 */
c8c42864
CM
1347static void reada_for_search(struct btrfs_root *root,
1348 struct btrfs_path *path,
1349 int level, int slot, u64 objectid)
3c69faec 1350{
5f39d397 1351 struct extent_buffer *node;
01f46658 1352 struct btrfs_disk_key disk_key;
3c69faec 1353 u32 nritems;
3c69faec 1354 u64 search;
a7175319 1355 u64 target;
6b80053d 1356 u64 nread = 0;
3c69faec 1357 int direction = path->reada;
5f39d397 1358 struct extent_buffer *eb;
6b80053d
CM
1359 u32 nr;
1360 u32 blocksize;
1361 u32 nscan = 0;
db94535d 1362
a6b6e75e 1363 if (level != 1)
6702ed49
CM
1364 return;
1365
1366 if (!path->nodes[level])
3c69faec
CM
1367 return;
1368
5f39d397 1369 node = path->nodes[level];
925baedd 1370
3c69faec 1371 search = btrfs_node_blockptr(node, slot);
6b80053d
CM
1372 blocksize = btrfs_level_size(root, level - 1);
1373 eb = btrfs_find_tree_block(root, search, blocksize);
5f39d397
CM
1374 if (eb) {
1375 free_extent_buffer(eb);
3c69faec
CM
1376 return;
1377 }
1378
a7175319 1379 target = search;
6b80053d 1380
5f39d397 1381 nritems = btrfs_header_nritems(node);
6b80053d 1382 nr = slot;
d397712b 1383 while (1) {
6b80053d
CM
1384 if (direction < 0) {
1385 if (nr == 0)
1386 break;
1387 nr--;
1388 } else if (direction > 0) {
1389 nr++;
1390 if (nr >= nritems)
1391 break;
3c69faec 1392 }
01f46658
CM
1393 if (path->reada < 0 && objectid) {
1394 btrfs_node_key(node, &disk_key, nr);
1395 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1396 break;
1397 }
6b80053d 1398 search = btrfs_node_blockptr(node, nr);
a7175319
CM
1399 if ((search <= target && target - search <= 65536) ||
1400 (search > target && search - target <= 65536)) {
ca7a79ad
CM
1401 readahead_tree_block(root, search, blocksize,
1402 btrfs_node_ptr_generation(node, nr));
6b80053d
CM
1403 nread += blocksize;
1404 }
1405 nscan++;
a7175319 1406 if ((nread > 65536 || nscan > 32))
6b80053d 1407 break;
3c69faec
CM
1408 }
1409}
925baedd 1410
b4ce94de
CM
1411/*
1412 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1413 * cache
1414 */
1415static noinline int reada_for_balance(struct btrfs_root *root,
1416 struct btrfs_path *path, int level)
1417{
1418 int slot;
1419 int nritems;
1420 struct extent_buffer *parent;
1421 struct extent_buffer *eb;
1422 u64 gen;
1423 u64 block1 = 0;
1424 u64 block2 = 0;
1425 int ret = 0;
1426 int blocksize;
1427
8c594ea8 1428 parent = path->nodes[level + 1];
b4ce94de
CM
1429 if (!parent)
1430 return 0;
1431
1432 nritems = btrfs_header_nritems(parent);
8c594ea8 1433 slot = path->slots[level + 1];
b4ce94de
CM
1434 blocksize = btrfs_level_size(root, level);
1435
1436 if (slot > 0) {
1437 block1 = btrfs_node_blockptr(parent, slot - 1);
1438 gen = btrfs_node_ptr_generation(parent, slot - 1);
1439 eb = btrfs_find_tree_block(root, block1, blocksize);
1440 if (eb && btrfs_buffer_uptodate(eb, gen))
1441 block1 = 0;
1442 free_extent_buffer(eb);
1443 }
8c594ea8 1444 if (slot + 1 < nritems) {
b4ce94de
CM
1445 block2 = btrfs_node_blockptr(parent, slot + 1);
1446 gen = btrfs_node_ptr_generation(parent, slot + 1);
1447 eb = btrfs_find_tree_block(root, block2, blocksize);
1448 if (eb && btrfs_buffer_uptodate(eb, gen))
1449 block2 = 0;
1450 free_extent_buffer(eb);
1451 }
1452 if (block1 || block2) {
1453 ret = -EAGAIN;
8c594ea8
CM
1454
1455 /* release the whole path */
b4ce94de 1456 btrfs_release_path(root, path);
8c594ea8
CM
1457
1458 /* read the blocks */
b4ce94de
CM
1459 if (block1)
1460 readahead_tree_block(root, block1, blocksize, 0);
1461 if (block2)
1462 readahead_tree_block(root, block2, blocksize, 0);
1463
1464 if (block1) {
1465 eb = read_tree_block(root, block1, blocksize, 0);
1466 free_extent_buffer(eb);
1467 }
8c594ea8 1468 if (block2) {
b4ce94de
CM
1469 eb = read_tree_block(root, block2, blocksize, 0);
1470 free_extent_buffer(eb);
1471 }
1472 }
1473 return ret;
1474}
1475
1476
d352ac68 1477/*
d397712b
CM
1478 * when we walk down the tree, it is usually safe to unlock the higher layers
1479 * in the tree. The exceptions are when our path goes through slot 0, because
1480 * operations on the tree might require changing key pointers higher up in the
1481 * tree.
d352ac68 1482 *
d397712b
CM
1483 * callers might also have set path->keep_locks, which tells this code to keep
1484 * the lock if the path points to the last slot in the block. This is part of
1485 * walking through the tree, and selecting the next slot in the higher block.
d352ac68 1486 *
d397712b
CM
1487 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1488 * if lowest_unlock is 1, level 0 won't be unlocked
d352ac68 1489 */
e02119d5
CM
1490static noinline void unlock_up(struct btrfs_path *path, int level,
1491 int lowest_unlock)
925baedd
CM
1492{
1493 int i;
1494 int skip_level = level;
051e1b9f 1495 int no_skips = 0;
925baedd
CM
1496 struct extent_buffer *t;
1497
1498 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1499 if (!path->nodes[i])
1500 break;
1501 if (!path->locks[i])
1502 break;
051e1b9f 1503 if (!no_skips && path->slots[i] == 0) {
925baedd
CM
1504 skip_level = i + 1;
1505 continue;
1506 }
051e1b9f 1507 if (!no_skips && path->keep_locks) {
925baedd
CM
1508 u32 nritems;
1509 t = path->nodes[i];
1510 nritems = btrfs_header_nritems(t);
051e1b9f 1511 if (nritems < 1 || path->slots[i] >= nritems - 1) {
925baedd
CM
1512 skip_level = i + 1;
1513 continue;
1514 }
1515 }
051e1b9f
CM
1516 if (skip_level < i && i >= lowest_unlock)
1517 no_skips = 1;
1518
925baedd
CM
1519 t = path->nodes[i];
1520 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
1521 btrfs_tree_unlock(t);
1522 path->locks[i] = 0;
1523 }
1524 }
1525}
1526
b4ce94de
CM
1527/*
1528 * This releases any locks held in the path starting at level and
1529 * going all the way up to the root.
1530 *
1531 * btrfs_search_slot will keep the lock held on higher nodes in a few
1532 * corner cases, such as COW of the block at slot zero in the node. This
1533 * ignores those rules, and it should only be called when there are no
1534 * more updates to be done higher up in the tree.
1535 */
1536noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
1537{
1538 int i;
1539
5d4f98a2 1540 if (path->keep_locks)
b4ce94de
CM
1541 return;
1542
1543 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1544 if (!path->nodes[i])
12f4dacc 1545 continue;
b4ce94de 1546 if (!path->locks[i])
12f4dacc 1547 continue;
b4ce94de
CM
1548 btrfs_tree_unlock(path->nodes[i]);
1549 path->locks[i] = 0;
1550 }
1551}
1552
c8c42864
CM
1553/*
1554 * helper function for btrfs_search_slot. The goal is to find a block
1555 * in cache without setting the path to blocking. If we find the block
1556 * we return zero and the path is unchanged.
1557 *
1558 * If we can't find the block, we set the path blocking and do some
1559 * reada. -EAGAIN is returned and the search must be repeated.
1560 */
1561static int
1562read_block_for_search(struct btrfs_trans_handle *trans,
1563 struct btrfs_root *root, struct btrfs_path *p,
1564 struct extent_buffer **eb_ret, int level, int slot,
1565 struct btrfs_key *key)
1566{
1567 u64 blocknr;
1568 u64 gen;
1569 u32 blocksize;
1570 struct extent_buffer *b = *eb_ret;
1571 struct extent_buffer *tmp;
76a05b35 1572 int ret;
c8c42864
CM
1573
1574 blocknr = btrfs_node_blockptr(b, slot);
1575 gen = btrfs_node_ptr_generation(b, slot);
1576 blocksize = btrfs_level_size(root, level - 1);
1577
1578 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
1579 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
76a05b35
CM
1580 /*
1581 * we found an up to date block without sleeping, return
1582 * right away
1583 */
c8c42864
CM
1584 *eb_ret = tmp;
1585 return 0;
1586 }
1587
1588 /*
1589 * reduce lock contention at high levels
1590 * of the btree by dropping locks before
76a05b35
CM
1591 * we read. Don't release the lock on the current
1592 * level because we need to walk this node to figure
1593 * out which blocks to read.
c8c42864 1594 */
8c594ea8
CM
1595 btrfs_unlock_up_safe(p, level + 1);
1596 btrfs_set_path_blocking(p);
1597
c8c42864
CM
1598 if (tmp)
1599 free_extent_buffer(tmp);
1600 if (p->reada)
1601 reada_for_search(root, p, level, slot, key->objectid);
1602
8c594ea8 1603 btrfs_release_path(NULL, p);
76a05b35
CM
1604
1605 ret = -EAGAIN;
c8c42864 1606 tmp = read_tree_block(root, blocknr, blocksize, gen);
76a05b35
CM
1607 if (tmp) {
1608 /*
1609 * If the read above didn't mark this buffer up to date,
1610 * it will never end up being up to date. Set ret to EIO now
1611 * and give up so that our caller doesn't loop forever
1612 * on our EAGAINs.
1613 */
1614 if (!btrfs_buffer_uptodate(tmp, 0))
1615 ret = -EIO;
c8c42864 1616 free_extent_buffer(tmp);
76a05b35
CM
1617 }
1618 return ret;
c8c42864
CM
1619}
1620
1621/*
1622 * helper function for btrfs_search_slot. This does all of the checks
1623 * for node-level blocks and does any balancing required based on
1624 * the ins_len.
1625 *
1626 * If no extra work was required, zero is returned. If we had to
1627 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1628 * start over
1629 */
1630static int
1631setup_nodes_for_search(struct btrfs_trans_handle *trans,
1632 struct btrfs_root *root, struct btrfs_path *p,
1633 struct extent_buffer *b, int level, int ins_len)
1634{
1635 int ret;
1636 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1637 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
1638 int sret;
1639
1640 sret = reada_for_balance(root, p, level);
1641 if (sret)
1642 goto again;
1643
1644 btrfs_set_path_blocking(p);
1645 sret = split_node(trans, root, p, level);
1646 btrfs_clear_path_blocking(p, NULL);
1647
1648 BUG_ON(sret > 0);
1649 if (sret) {
1650 ret = sret;
1651 goto done;
1652 }
1653 b = p->nodes[level];
1654 } else if (ins_len < 0 && btrfs_header_nritems(b) <
1655 BTRFS_NODEPTRS_PER_BLOCK(root) / 4) {
1656 int sret;
1657
1658 sret = reada_for_balance(root, p, level);
1659 if (sret)
1660 goto again;
1661
1662 btrfs_set_path_blocking(p);
1663 sret = balance_level(trans, root, p, level);
1664 btrfs_clear_path_blocking(p, NULL);
1665
1666 if (sret) {
1667 ret = sret;
1668 goto done;
1669 }
1670 b = p->nodes[level];
1671 if (!b) {
1672 btrfs_release_path(NULL, p);
1673 goto again;
1674 }
1675 BUG_ON(btrfs_header_nritems(b) == 1);
1676 }
1677 return 0;
1678
1679again:
1680 ret = -EAGAIN;
1681done:
1682 return ret;
1683}
1684
74123bd7
CM
1685/*
1686 * look for key in the tree. path is filled in with nodes along the way
1687 * if key is found, we return zero and you can find the item in the leaf
1688 * level of the path (level 0)
1689 *
1690 * If the key isn't found, the path points to the slot where it should
aa5d6bed
CM
1691 * be inserted, and 1 is returned. If there are other errors during the
1692 * search a negative error number is returned.
97571fd0
CM
1693 *
1694 * if ins_len > 0, nodes and leaves will be split as we walk down the
1695 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1696 * possible)
74123bd7 1697 */
e089f05c
CM
1698int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1699 *root, struct btrfs_key *key, struct btrfs_path *p, int
1700 ins_len, int cow)
be0e5c09 1701{
5f39d397 1702 struct extent_buffer *b;
be0e5c09
CM
1703 int slot;
1704 int ret;
1705 int level;
925baedd 1706 int lowest_unlock = 1;
9f3a7427
CM
1707 u8 lowest_level = 0;
1708
6702ed49 1709 lowest_level = p->lowest_level;
323ac95b 1710 WARN_ON(lowest_level && ins_len > 0);
22b0ebda 1711 WARN_ON(p->nodes[0] != NULL);
25179201 1712
925baedd
CM
1713 if (ins_len < 0)
1714 lowest_unlock = 2;
65b51a00 1715
bb803951 1716again:
5d4f98a2
YZ
1717 if (p->search_commit_root) {
1718 b = root->commit_root;
1719 extent_buffer_get(b);
1720 if (!p->skip_locking)
1721 btrfs_tree_lock(b);
1722 } else {
1723 if (p->skip_locking)
1724 b = btrfs_root_node(root);
1725 else
1726 b = btrfs_lock_root_node(root);
1727 }
925baedd 1728
eb60ceac 1729 while (b) {
5f39d397 1730 level = btrfs_header_level(b);
65b51a00
CM
1731
1732 /*
1733 * setup the path here so we can release it under lock
1734 * contention with the cow code
1735 */
1736 p->nodes[level] = b;
1737 if (!p->skip_locking)
1738 p->locks[level] = 1;
1739
02217ed2
CM
1740 if (cow) {
1741 int wret;
65b51a00 1742
c8c42864
CM
1743 /*
1744 * if we don't really need to cow this block
1745 * then we don't want to set the path blocking,
1746 * so we test it here
1747 */
5d4f98a2 1748 if (!should_cow_block(trans, root, b))
65b51a00 1749 goto cow_done;
5d4f98a2 1750
b4ce94de
CM
1751 btrfs_set_path_blocking(p);
1752
e20d96d6
CM
1753 wret = btrfs_cow_block(trans, root, b,
1754 p->nodes[level + 1],
9fa8cfe7 1755 p->slots[level + 1], &b);
54aa1f4d 1756 if (wret) {
5f39d397 1757 free_extent_buffer(b);
65b51a00
CM
1758 ret = wret;
1759 goto done;
54aa1f4d 1760 }
02217ed2 1761 }
65b51a00 1762cow_done:
02217ed2 1763 BUG_ON(!cow && ins_len);
5f39d397 1764 if (level != btrfs_header_level(b))
2c90e5d6 1765 WARN_ON(1);
5f39d397 1766 level = btrfs_header_level(b);
65b51a00 1767
eb60ceac 1768 p->nodes[level] = b;
5cd57b2c
CM
1769 if (!p->skip_locking)
1770 p->locks[level] = 1;
65b51a00 1771
4008c04a 1772 btrfs_clear_path_blocking(p, NULL);
b4ce94de
CM
1773
1774 /*
1775 * we have a lock on b and as long as we aren't changing
1776 * the tree, there is no way to for the items in b to change.
1777 * It is safe to drop the lock on our parent before we
1778 * go through the expensive btree search on b.
1779 *
1780 * If cow is true, then we might be changing slot zero,
1781 * which may require changing the parent. So, we can't
1782 * drop the lock until after we know which slot we're
1783 * operating on.
1784 */
1785 if (!cow)
1786 btrfs_unlock_up_safe(p, level + 1);
1787
123abc88 1788 ret = check_block(root, p, level);
65b51a00
CM
1789 if (ret) {
1790 ret = -1;
1791 goto done;
1792 }
925baedd 1793
5f39d397 1794 ret = bin_search(b, key, level, &slot);
b4ce94de 1795
5f39d397 1796 if (level != 0) {
be0e5c09
CM
1797 if (ret && slot > 0)
1798 slot -= 1;
1799 p->slots[level] = slot;
c8c42864
CM
1800 ret = setup_nodes_for_search(trans, root, p, b, level,
1801 ins_len);
1802 if (ret == -EAGAIN)
1803 goto again;
1804 else if (ret)
1805 goto done;
1806 b = p->nodes[level];
1807 slot = p->slots[level];
b4ce94de 1808
f9efa9c7
CM
1809 unlock_up(p, level, lowest_unlock);
1810
9f3a7427 1811 /* this is only true while dropping a snapshot */
925baedd 1812 if (level == lowest_level) {
5b21f2ed
ZY
1813 ret = 0;
1814 goto done;
925baedd 1815 }
ca7a79ad 1816
c8c42864
CM
1817 ret = read_block_for_search(trans, root, p,
1818 &b, level, slot, key);
1819 if (ret == -EAGAIN)
1820 goto again;
594a24eb 1821
76a05b35
CM
1822 if (ret == -EIO)
1823 goto done;
1824
b4ce94de
CM
1825 if (!p->skip_locking) {
1826 int lret;
1827
4008c04a 1828 btrfs_clear_path_blocking(p, NULL);
b4ce94de
CM
1829 lret = btrfs_try_spin_lock(b);
1830
1831 if (!lret) {
1832 btrfs_set_path_blocking(p);
1833 btrfs_tree_lock(b);
4008c04a 1834 btrfs_clear_path_blocking(p, b);
b4ce94de
CM
1835 }
1836 }
be0e5c09
CM
1837 } else {
1838 p->slots[level] = slot;
87b29b20
YZ
1839 if (ins_len > 0 &&
1840 btrfs_leaf_free_space(root, b) < ins_len) {
b4ce94de
CM
1841 int sret;
1842
1843 btrfs_set_path_blocking(p);
1844 sret = split_leaf(trans, root, key,
cc0c5538 1845 p, ins_len, ret == 0);
4008c04a 1846 btrfs_clear_path_blocking(p, NULL);
b4ce94de 1847
5c680ed6 1848 BUG_ON(sret > 0);
65b51a00
CM
1849 if (sret) {
1850 ret = sret;
1851 goto done;
1852 }
5c680ed6 1853 }
459931ec
CM
1854 if (!p->search_for_split)
1855 unlock_up(p, level, lowest_unlock);
65b51a00 1856 goto done;
be0e5c09
CM
1857 }
1858 }
65b51a00
CM
1859 ret = 1;
1860done:
b4ce94de
CM
1861 /*
1862 * we don't really know what they plan on doing with the path
1863 * from here on, so for now just mark it as blocking
1864 */
b9473439
CM
1865 if (!p->leave_spinning)
1866 btrfs_set_path_blocking(p);
76a05b35
CM
1867 if (ret < 0)
1868 btrfs_release_path(root, p);
65b51a00 1869 return ret;
be0e5c09
CM
1870}
1871
74123bd7
CM
1872/*
1873 * adjust the pointers going up the tree, starting at level
1874 * making sure the right key of each node is points to 'key'.
1875 * This is used after shifting pointers to the left, so it stops
1876 * fixing up pointers when a given leaf/node is not in slot 0 of the
1877 * higher levels
aa5d6bed
CM
1878 *
1879 * If this fails to write a tree block, it returns -1, but continues
1880 * fixing up the blocks in ram so the tree is consistent.
74123bd7 1881 */
5f39d397
CM
1882static int fixup_low_keys(struct btrfs_trans_handle *trans,
1883 struct btrfs_root *root, struct btrfs_path *path,
1884 struct btrfs_disk_key *key, int level)
be0e5c09
CM
1885{
1886 int i;
aa5d6bed 1887 int ret = 0;
5f39d397
CM
1888 struct extent_buffer *t;
1889
234b63a0 1890 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
be0e5c09 1891 int tslot = path->slots[i];
eb60ceac 1892 if (!path->nodes[i])
be0e5c09 1893 break;
5f39d397
CM
1894 t = path->nodes[i];
1895 btrfs_set_node_key(t, key, tslot);
d6025579 1896 btrfs_mark_buffer_dirty(path->nodes[i]);
be0e5c09
CM
1897 if (tslot != 0)
1898 break;
1899 }
aa5d6bed 1900 return ret;
be0e5c09
CM
1901}
1902
31840ae1
ZY
1903/*
1904 * update item key.
1905 *
1906 * This function isn't completely safe. It's the caller's responsibility
1907 * that the new key won't break the order
1908 */
1909int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
1910 struct btrfs_root *root, struct btrfs_path *path,
1911 struct btrfs_key *new_key)
1912{
1913 struct btrfs_disk_key disk_key;
1914 struct extent_buffer *eb;
1915 int slot;
1916
1917 eb = path->nodes[0];
1918 slot = path->slots[0];
1919 if (slot > 0) {
1920 btrfs_item_key(eb, &disk_key, slot - 1);
1921 if (comp_keys(&disk_key, new_key) >= 0)
1922 return -1;
1923 }
1924 if (slot < btrfs_header_nritems(eb) - 1) {
1925 btrfs_item_key(eb, &disk_key, slot + 1);
1926 if (comp_keys(&disk_key, new_key) <= 0)
1927 return -1;
1928 }
1929
1930 btrfs_cpu_key_to_disk(&disk_key, new_key);
1931 btrfs_set_item_key(eb, &disk_key, slot);
1932 btrfs_mark_buffer_dirty(eb);
1933 if (slot == 0)
1934 fixup_low_keys(trans, root, path, &disk_key, 1);
1935 return 0;
1936}
1937
74123bd7
CM
1938/*
1939 * try to push data from one node into the next node left in the
79f95c82 1940 * tree.
aa5d6bed
CM
1941 *
1942 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1943 * error, and > 0 if there was no room in the left hand block.
74123bd7 1944 */
98ed5174
CM
1945static int push_node_left(struct btrfs_trans_handle *trans,
1946 struct btrfs_root *root, struct extent_buffer *dst,
971a1f66 1947 struct extent_buffer *src, int empty)
be0e5c09 1948{
be0e5c09 1949 int push_items = 0;
bb803951
CM
1950 int src_nritems;
1951 int dst_nritems;
aa5d6bed 1952 int ret = 0;
be0e5c09 1953
5f39d397
CM
1954 src_nritems = btrfs_header_nritems(src);
1955 dst_nritems = btrfs_header_nritems(dst);
123abc88 1956 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
7bb86316
CM
1957 WARN_ON(btrfs_header_generation(src) != trans->transid);
1958 WARN_ON(btrfs_header_generation(dst) != trans->transid);
54aa1f4d 1959
bce4eae9 1960 if (!empty && src_nritems <= 8)
971a1f66
CM
1961 return 1;
1962
d397712b 1963 if (push_items <= 0)
be0e5c09
CM
1964 return 1;
1965
bce4eae9 1966 if (empty) {
971a1f66 1967 push_items = min(src_nritems, push_items);
bce4eae9
CM
1968 if (push_items < src_nritems) {
1969 /* leave at least 8 pointers in the node if
1970 * we aren't going to empty it
1971 */
1972 if (src_nritems - push_items < 8) {
1973 if (push_items <= 8)
1974 return 1;
1975 push_items -= 8;
1976 }
1977 }
1978 } else
1979 push_items = min(src_nritems - 8, push_items);
79f95c82 1980
5f39d397
CM
1981 copy_extent_buffer(dst, src,
1982 btrfs_node_key_ptr_offset(dst_nritems),
1983 btrfs_node_key_ptr_offset(0),
d397712b 1984 push_items * sizeof(struct btrfs_key_ptr));
5f39d397 1985
bb803951 1986 if (push_items < src_nritems) {
5f39d397
CM
1987 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
1988 btrfs_node_key_ptr_offset(push_items),
1989 (src_nritems - push_items) *
1990 sizeof(struct btrfs_key_ptr));
1991 }
1992 btrfs_set_header_nritems(src, src_nritems - push_items);
1993 btrfs_set_header_nritems(dst, dst_nritems + push_items);
1994 btrfs_mark_buffer_dirty(src);
1995 btrfs_mark_buffer_dirty(dst);
31840ae1 1996
79f95c82
CM
1997 return ret;
1998}
1999
2000/*
2001 * try to push data from one node into the next node right in the
2002 * tree.
2003 *
2004 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2005 * error, and > 0 if there was no room in the right hand block.
2006 *
2007 * this will only push up to 1/2 the contents of the left node over
2008 */
5f39d397
CM
2009static int balance_node_right(struct btrfs_trans_handle *trans,
2010 struct btrfs_root *root,
2011 struct extent_buffer *dst,
2012 struct extent_buffer *src)
79f95c82 2013{
79f95c82
CM
2014 int push_items = 0;
2015 int max_push;
2016 int src_nritems;
2017 int dst_nritems;
2018 int ret = 0;
79f95c82 2019
7bb86316
CM
2020 WARN_ON(btrfs_header_generation(src) != trans->transid);
2021 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2022
5f39d397
CM
2023 src_nritems = btrfs_header_nritems(src);
2024 dst_nritems = btrfs_header_nritems(dst);
123abc88 2025 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
d397712b 2026 if (push_items <= 0)
79f95c82 2027 return 1;
bce4eae9 2028
d397712b 2029 if (src_nritems < 4)
bce4eae9 2030 return 1;
79f95c82
CM
2031
2032 max_push = src_nritems / 2 + 1;
2033 /* don't try to empty the node */
d397712b 2034 if (max_push >= src_nritems)
79f95c82 2035 return 1;
252c38f0 2036
79f95c82
CM
2037 if (max_push < push_items)
2038 push_items = max_push;
2039
5f39d397
CM
2040 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2041 btrfs_node_key_ptr_offset(0),
2042 (dst_nritems) *
2043 sizeof(struct btrfs_key_ptr));
d6025579 2044
5f39d397
CM
2045 copy_extent_buffer(dst, src,
2046 btrfs_node_key_ptr_offset(0),
2047 btrfs_node_key_ptr_offset(src_nritems - push_items),
d397712b 2048 push_items * sizeof(struct btrfs_key_ptr));
79f95c82 2049
5f39d397
CM
2050 btrfs_set_header_nritems(src, src_nritems - push_items);
2051 btrfs_set_header_nritems(dst, dst_nritems + push_items);
79f95c82 2052
5f39d397
CM
2053 btrfs_mark_buffer_dirty(src);
2054 btrfs_mark_buffer_dirty(dst);
31840ae1 2055
aa5d6bed 2056 return ret;
be0e5c09
CM
2057}
2058
97571fd0
CM
2059/*
2060 * helper function to insert a new root level in the tree.
2061 * A new node is allocated, and a single item is inserted to
2062 * point to the existing root
aa5d6bed
CM
2063 *
2064 * returns zero on success or < 0 on failure.
97571fd0 2065 */
d397712b 2066static noinline int insert_new_root(struct btrfs_trans_handle *trans,
5f39d397
CM
2067 struct btrfs_root *root,
2068 struct btrfs_path *path, int level)
5c680ed6 2069{
7bb86316 2070 u64 lower_gen;
5f39d397
CM
2071 struct extent_buffer *lower;
2072 struct extent_buffer *c;
925baedd 2073 struct extent_buffer *old;
5f39d397 2074 struct btrfs_disk_key lower_key;
5c680ed6
CM
2075
2076 BUG_ON(path->nodes[level]);
2077 BUG_ON(path->nodes[level-1] != root->node);
2078
7bb86316
CM
2079 lower = path->nodes[level-1];
2080 if (level == 1)
2081 btrfs_item_key(lower, &lower_key, 0);
2082 else
2083 btrfs_node_key(lower, &lower_key, 0);
2084
31840ae1 2085 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
5d4f98a2 2086 root->root_key.objectid, &lower_key,
ad3d81ba 2087 level, root->node->start, 0);
5f39d397
CM
2088 if (IS_ERR(c))
2089 return PTR_ERR(c);
925baedd 2090
5d4f98a2 2091 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
5f39d397
CM
2092 btrfs_set_header_nritems(c, 1);
2093 btrfs_set_header_level(c, level);
db94535d 2094 btrfs_set_header_bytenr(c, c->start);
5f39d397 2095 btrfs_set_header_generation(c, trans->transid);
5d4f98a2 2096 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
5f39d397 2097 btrfs_set_header_owner(c, root->root_key.objectid);
5f39d397
CM
2098
2099 write_extent_buffer(c, root->fs_info->fsid,
2100 (unsigned long)btrfs_header_fsid(c),
2101 BTRFS_FSID_SIZE);
e17cade2
CM
2102
2103 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
2104 (unsigned long)btrfs_header_chunk_tree_uuid(c),
2105 BTRFS_UUID_SIZE);
2106
5f39d397 2107 btrfs_set_node_key(c, &lower_key, 0);
db94535d 2108 btrfs_set_node_blockptr(c, 0, lower->start);
7bb86316 2109 lower_gen = btrfs_header_generation(lower);
31840ae1 2110 WARN_ON(lower_gen != trans->transid);
7bb86316
CM
2111
2112 btrfs_set_node_ptr_generation(c, 0, lower_gen);
d5719762 2113
5f39d397 2114 btrfs_mark_buffer_dirty(c);
d5719762 2115
925baedd
CM
2116 spin_lock(&root->node_lock);
2117 old = root->node;
5f39d397 2118 root->node = c;
925baedd
CM
2119 spin_unlock(&root->node_lock);
2120
2121 /* the super has an extra ref to root->node */
2122 free_extent_buffer(old);
2123
0b86a832 2124 add_root_to_dirty_list(root);
5f39d397
CM
2125 extent_buffer_get(c);
2126 path->nodes[level] = c;
925baedd 2127 path->locks[level] = 1;
5c680ed6
CM
2128 path->slots[level] = 0;
2129 return 0;
2130}
2131
74123bd7
CM
2132/*
2133 * worker function to insert a single pointer in a node.
2134 * the node should have enough room for the pointer already
97571fd0 2135 *
74123bd7
CM
2136 * slot and level indicate where you want the key to go, and
2137 * blocknr is the block the key points to.
aa5d6bed
CM
2138 *
2139 * returns zero on success and < 0 on any error
74123bd7 2140 */
e089f05c
CM
2141static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
2142 *root, struct btrfs_path *path, struct btrfs_disk_key
db94535d 2143 *key, u64 bytenr, int slot, int level)
74123bd7 2144{
5f39d397 2145 struct extent_buffer *lower;
74123bd7 2146 int nritems;
5c680ed6
CM
2147
2148 BUG_ON(!path->nodes[level]);
5f39d397
CM
2149 lower = path->nodes[level];
2150 nritems = btrfs_header_nritems(lower);
c293498b 2151 BUG_ON(slot > nritems);
123abc88 2152 if (nritems == BTRFS_NODEPTRS_PER_BLOCK(root))
74123bd7
CM
2153 BUG();
2154 if (slot != nritems) {
5f39d397
CM
2155 memmove_extent_buffer(lower,
2156 btrfs_node_key_ptr_offset(slot + 1),
2157 btrfs_node_key_ptr_offset(slot),
d6025579 2158 (nritems - slot) * sizeof(struct btrfs_key_ptr));
74123bd7 2159 }
5f39d397 2160 btrfs_set_node_key(lower, key, slot);
db94535d 2161 btrfs_set_node_blockptr(lower, slot, bytenr);
74493f7a
CM
2162 WARN_ON(trans->transid == 0);
2163 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
5f39d397
CM
2164 btrfs_set_header_nritems(lower, nritems + 1);
2165 btrfs_mark_buffer_dirty(lower);
74123bd7
CM
2166 return 0;
2167}
2168
97571fd0
CM
2169/*
2170 * split the node at the specified level in path in two.
2171 * The path is corrected to point to the appropriate node after the split
2172 *
2173 * Before splitting this tries to make some room in the node by pushing
2174 * left and right, if either one works, it returns right away.
aa5d6bed
CM
2175 *
2176 * returns 0 on success and < 0 on failure
97571fd0 2177 */
e02119d5
CM
2178static noinline int split_node(struct btrfs_trans_handle *trans,
2179 struct btrfs_root *root,
2180 struct btrfs_path *path, int level)
be0e5c09 2181{
5f39d397
CM
2182 struct extent_buffer *c;
2183 struct extent_buffer *split;
2184 struct btrfs_disk_key disk_key;
be0e5c09 2185 int mid;
5c680ed6 2186 int ret;
aa5d6bed 2187 int wret;
7518a238 2188 u32 c_nritems;
eb60ceac 2189
5f39d397 2190 c = path->nodes[level];
7bb86316 2191 WARN_ON(btrfs_header_generation(c) != trans->transid);
5f39d397 2192 if (c == root->node) {
5c680ed6 2193 /* trying to split the root, lets make a new one */
e089f05c 2194 ret = insert_new_root(trans, root, path, level + 1);
5c680ed6
CM
2195 if (ret)
2196 return ret;
a4b6e07d 2197 } else if (!trans->transaction->delayed_refs.flushing) {
e66f709b 2198 ret = push_nodes_for_insert(trans, root, path, level);
5f39d397
CM
2199 c = path->nodes[level];
2200 if (!ret && btrfs_header_nritems(c) <
c448acf0 2201 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
e66f709b 2202 return 0;
54aa1f4d
CM
2203 if (ret < 0)
2204 return ret;
be0e5c09 2205 }
e66f709b 2206
5f39d397 2207 c_nritems = btrfs_header_nritems(c);
5d4f98a2
YZ
2208 mid = (c_nritems + 1) / 2;
2209 btrfs_node_key(c, &disk_key, mid);
7bb86316 2210
5d4f98a2 2211 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
31840ae1 2212 root->root_key.objectid,
5d4f98a2 2213 &disk_key, level, c->start, 0);
5f39d397
CM
2214 if (IS_ERR(split))
2215 return PTR_ERR(split);
2216
5d4f98a2 2217 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
5f39d397 2218 btrfs_set_header_level(split, btrfs_header_level(c));
db94535d 2219 btrfs_set_header_bytenr(split, split->start);
5f39d397 2220 btrfs_set_header_generation(split, trans->transid);
5d4f98a2 2221 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
5f39d397
CM
2222 btrfs_set_header_owner(split, root->root_key.objectid);
2223 write_extent_buffer(split, root->fs_info->fsid,
2224 (unsigned long)btrfs_header_fsid(split),
2225 BTRFS_FSID_SIZE);
e17cade2
CM
2226 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
2227 (unsigned long)btrfs_header_chunk_tree_uuid(split),
2228 BTRFS_UUID_SIZE);
54aa1f4d 2229
5f39d397
CM
2230
2231 copy_extent_buffer(split, c,
2232 btrfs_node_key_ptr_offset(0),
2233 btrfs_node_key_ptr_offset(mid),
2234 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2235 btrfs_set_header_nritems(split, c_nritems - mid);
2236 btrfs_set_header_nritems(c, mid);
aa5d6bed
CM
2237 ret = 0;
2238
5f39d397
CM
2239 btrfs_mark_buffer_dirty(c);
2240 btrfs_mark_buffer_dirty(split);
2241
db94535d 2242 wret = insert_ptr(trans, root, path, &disk_key, split->start,
5f39d397 2243 path->slots[level + 1] + 1,
123abc88 2244 level + 1);
aa5d6bed
CM
2245 if (wret)
2246 ret = wret;
2247
5de08d7d 2248 if (path->slots[level] >= mid) {
5c680ed6 2249 path->slots[level] -= mid;
925baedd 2250 btrfs_tree_unlock(c);
5f39d397
CM
2251 free_extent_buffer(c);
2252 path->nodes[level] = split;
5c680ed6
CM
2253 path->slots[level + 1] += 1;
2254 } else {
925baedd 2255 btrfs_tree_unlock(split);
5f39d397 2256 free_extent_buffer(split);
be0e5c09 2257 }
aa5d6bed 2258 return ret;
be0e5c09
CM
2259}
2260
74123bd7
CM
2261/*
2262 * how many bytes are required to store the items in a leaf. start
2263 * and nr indicate which items in the leaf to check. This totals up the
2264 * space used both by the item structs and the item data
2265 */
5f39d397 2266static int leaf_space_used(struct extent_buffer *l, int start, int nr)
be0e5c09
CM
2267{
2268 int data_len;
5f39d397 2269 int nritems = btrfs_header_nritems(l);
d4dbff95 2270 int end = min(nritems, start + nr) - 1;
be0e5c09
CM
2271
2272 if (!nr)
2273 return 0;
5f39d397
CM
2274 data_len = btrfs_item_end_nr(l, start);
2275 data_len = data_len - btrfs_item_offset_nr(l, end);
0783fcfc 2276 data_len += sizeof(struct btrfs_item) * nr;
d4dbff95 2277 WARN_ON(data_len < 0);
be0e5c09
CM
2278 return data_len;
2279}
2280
d4dbff95
CM
2281/*
2282 * The space between the end of the leaf items and
2283 * the start of the leaf data. IOW, how much room
2284 * the leaf has left for both items and data
2285 */
d397712b 2286noinline int btrfs_leaf_free_space(struct btrfs_root *root,
e02119d5 2287 struct extent_buffer *leaf)
d4dbff95 2288{
5f39d397
CM
2289 int nritems = btrfs_header_nritems(leaf);
2290 int ret;
2291 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
2292 if (ret < 0) {
d397712b
CM
2293 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
2294 "used %d nritems %d\n",
ae2f5411 2295 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
5f39d397
CM
2296 leaf_space_used(leaf, 0, nritems), nritems);
2297 }
2298 return ret;
d4dbff95
CM
2299}
2300
44871b1b
CM
2301static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
2302 struct btrfs_root *root,
2303 struct btrfs_path *path,
2304 int data_size, int empty,
2305 struct extent_buffer *right,
2306 int free_space, u32 left_nritems)
00ec4c51 2307{
5f39d397 2308 struct extent_buffer *left = path->nodes[0];
44871b1b 2309 struct extent_buffer *upper = path->nodes[1];
5f39d397 2310 struct btrfs_disk_key disk_key;
00ec4c51 2311 int slot;
34a38218 2312 u32 i;
00ec4c51
CM
2313 int push_space = 0;
2314 int push_items = 0;
0783fcfc 2315 struct btrfs_item *item;
34a38218 2316 u32 nr;
7518a238 2317 u32 right_nritems;
5f39d397 2318 u32 data_end;
db94535d 2319 u32 this_item_size;
00ec4c51 2320
34a38218
CM
2321 if (empty)
2322 nr = 0;
2323 else
2324 nr = 1;
2325
31840ae1 2326 if (path->slots[0] >= left_nritems)
87b29b20 2327 push_space += data_size;
31840ae1 2328
44871b1b 2329 slot = path->slots[1];
34a38218
CM
2330 i = left_nritems - 1;
2331 while (i >= nr) {
5f39d397 2332 item = btrfs_item_nr(left, i);
db94535d 2333
31840ae1
ZY
2334 if (!empty && push_items > 0) {
2335 if (path->slots[0] > i)
2336 break;
2337 if (path->slots[0] == i) {
2338 int space = btrfs_leaf_free_space(root, left);
2339 if (space + push_space * 2 > free_space)
2340 break;
2341 }
2342 }
2343
00ec4c51 2344 if (path->slots[0] == i)
87b29b20 2345 push_space += data_size;
db94535d
CM
2346
2347 if (!left->map_token) {
2348 map_extent_buffer(left, (unsigned long)item,
2349 sizeof(struct btrfs_item),
2350 &left->map_token, &left->kaddr,
2351 &left->map_start, &left->map_len,
2352 KM_USER1);
2353 }
2354
2355 this_item_size = btrfs_item_size(left, item);
2356 if (this_item_size + sizeof(*item) + push_space > free_space)
00ec4c51 2357 break;
31840ae1 2358
00ec4c51 2359 push_items++;
db94535d 2360 push_space += this_item_size + sizeof(*item);
34a38218
CM
2361 if (i == 0)
2362 break;
2363 i--;
db94535d
CM
2364 }
2365 if (left->map_token) {
2366 unmap_extent_buffer(left, left->map_token, KM_USER1);
2367 left->map_token = NULL;
00ec4c51 2368 }
5f39d397 2369
925baedd
CM
2370 if (push_items == 0)
2371 goto out_unlock;
5f39d397 2372
34a38218 2373 if (!empty && push_items == left_nritems)
a429e513 2374 WARN_ON(1);
5f39d397 2375
00ec4c51 2376 /* push left to right */
5f39d397 2377 right_nritems = btrfs_header_nritems(right);
34a38218 2378
5f39d397 2379 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
123abc88 2380 push_space -= leaf_data_end(root, left);
5f39d397 2381
00ec4c51 2382 /* make room in the right data area */
5f39d397
CM
2383 data_end = leaf_data_end(root, right);
2384 memmove_extent_buffer(right,
2385 btrfs_leaf_data(right) + data_end - push_space,
2386 btrfs_leaf_data(right) + data_end,
2387 BTRFS_LEAF_DATA_SIZE(root) - data_end);
2388
00ec4c51 2389 /* copy from the left data area */
5f39d397 2390 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
d6025579
CM
2391 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2392 btrfs_leaf_data(left) + leaf_data_end(root, left),
2393 push_space);
5f39d397
CM
2394
2395 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
2396 btrfs_item_nr_offset(0),
2397 right_nritems * sizeof(struct btrfs_item));
2398
00ec4c51 2399 /* copy the items from left to right */
5f39d397
CM
2400 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
2401 btrfs_item_nr_offset(left_nritems - push_items),
2402 push_items * sizeof(struct btrfs_item));
00ec4c51
CM
2403
2404 /* update the item pointers */
7518a238 2405 right_nritems += push_items;
5f39d397 2406 btrfs_set_header_nritems(right, right_nritems);
123abc88 2407 push_space = BTRFS_LEAF_DATA_SIZE(root);
7518a238 2408 for (i = 0; i < right_nritems; i++) {
5f39d397 2409 item = btrfs_item_nr(right, i);
db94535d
CM
2410 if (!right->map_token) {
2411 map_extent_buffer(right, (unsigned long)item,
2412 sizeof(struct btrfs_item),
2413 &right->map_token, &right->kaddr,
2414 &right->map_start, &right->map_len,
2415 KM_USER1);
2416 }
2417 push_space -= btrfs_item_size(right, item);
2418 btrfs_set_item_offset(right, item, push_space);
2419 }
2420
2421 if (right->map_token) {
2422 unmap_extent_buffer(right, right->map_token, KM_USER1);
2423 right->map_token = NULL;
00ec4c51 2424 }
7518a238 2425 left_nritems -= push_items;
5f39d397 2426 btrfs_set_header_nritems(left, left_nritems);
00ec4c51 2427
34a38218
CM
2428 if (left_nritems)
2429 btrfs_mark_buffer_dirty(left);
5f39d397 2430 btrfs_mark_buffer_dirty(right);
a429e513 2431
5f39d397
CM
2432 btrfs_item_key(right, &disk_key, 0);
2433 btrfs_set_node_key(upper, &disk_key, slot + 1);
d6025579 2434 btrfs_mark_buffer_dirty(upper);
02217ed2 2435
00ec4c51 2436 /* then fixup the leaf pointer in the path */
7518a238
CM
2437 if (path->slots[0] >= left_nritems) {
2438 path->slots[0] -= left_nritems;
925baedd
CM
2439 if (btrfs_header_nritems(path->nodes[0]) == 0)
2440 clean_tree_block(trans, root, path->nodes[0]);
2441 btrfs_tree_unlock(path->nodes[0]);
5f39d397
CM
2442 free_extent_buffer(path->nodes[0]);
2443 path->nodes[0] = right;
00ec4c51
CM
2444 path->slots[1] += 1;
2445 } else {
925baedd 2446 btrfs_tree_unlock(right);
5f39d397 2447 free_extent_buffer(right);
00ec4c51
CM
2448 }
2449 return 0;
925baedd
CM
2450
2451out_unlock:
2452 btrfs_tree_unlock(right);
2453 free_extent_buffer(right);
2454 return 1;
00ec4c51 2455}
925baedd 2456
44871b1b
CM
2457/*
2458 * push some data in the path leaf to the right, trying to free up at
2459 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2460 *
2461 * returns 1 if the push failed because the other node didn't have enough
2462 * room, 0 if everything worked out and < 0 if there were major errors.
2463 */
2464static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
2465 *root, struct btrfs_path *path, int data_size,
2466 int empty)
2467{
2468 struct extent_buffer *left = path->nodes[0];
2469 struct extent_buffer *right;
2470 struct extent_buffer *upper;
2471 int slot;
2472 int free_space;
2473 u32 left_nritems;
2474 int ret;
2475
2476 if (!path->nodes[1])
2477 return 1;
2478
2479 slot = path->slots[1];
2480 upper = path->nodes[1];
2481 if (slot >= btrfs_header_nritems(upper) - 1)
2482 return 1;
2483
2484 btrfs_assert_tree_locked(path->nodes[1]);
2485
2486 right = read_node_slot(root, upper, slot + 1);
2487 btrfs_tree_lock(right);
2488 btrfs_set_lock_blocking(right);
2489
2490 free_space = btrfs_leaf_free_space(root, right);
2491 if (free_space < data_size)
2492 goto out_unlock;
2493
2494 /* cow and double check */
2495 ret = btrfs_cow_block(trans, root, right, upper,
2496 slot + 1, &right);
2497 if (ret)
2498 goto out_unlock;
2499
2500 free_space = btrfs_leaf_free_space(root, right);
2501 if (free_space < data_size)
2502 goto out_unlock;
2503
2504 left_nritems = btrfs_header_nritems(left);
2505 if (left_nritems == 0)
2506 goto out_unlock;
2507
2508 return __push_leaf_right(trans, root, path, data_size, empty,
2509 right, free_space, left_nritems);
2510out_unlock:
2511 btrfs_tree_unlock(right);
2512 free_extent_buffer(right);
2513 return 1;
2514}
2515
74123bd7
CM
2516/*
2517 * push some data in the path leaf to the left, trying to free up at
2518 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2519 */
44871b1b
CM
2520static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
2521 struct btrfs_root *root,
2522 struct btrfs_path *path, int data_size,
2523 int empty, struct extent_buffer *left,
2524 int free_space, int right_nritems)
be0e5c09 2525{
5f39d397
CM
2526 struct btrfs_disk_key disk_key;
2527 struct extent_buffer *right = path->nodes[0];
be0e5c09
CM
2528 int slot;
2529 int i;
be0e5c09
CM
2530 int push_space = 0;
2531 int push_items = 0;
0783fcfc 2532 struct btrfs_item *item;
7518a238 2533 u32 old_left_nritems;
34a38218 2534 u32 nr;
aa5d6bed
CM
2535 int ret = 0;
2536 int wret;
db94535d
CM
2537 u32 this_item_size;
2538 u32 old_left_item_size;
be0e5c09
CM
2539
2540 slot = path->slots[1];
02217ed2 2541
34a38218
CM
2542 if (empty)
2543 nr = right_nritems;
2544 else
2545 nr = right_nritems - 1;
2546
2547 for (i = 0; i < nr; i++) {
5f39d397 2548 item = btrfs_item_nr(right, i);
db94535d
CM
2549 if (!right->map_token) {
2550 map_extent_buffer(right, (unsigned long)item,
2551 sizeof(struct btrfs_item),
2552 &right->map_token, &right->kaddr,
2553 &right->map_start, &right->map_len,
2554 KM_USER1);
2555 }
2556
31840ae1
ZY
2557 if (!empty && push_items > 0) {
2558 if (path->slots[0] < i)
2559 break;
2560 if (path->slots[0] == i) {
2561 int space = btrfs_leaf_free_space(root, right);
2562 if (space + push_space * 2 > free_space)
2563 break;
2564 }
2565 }
2566
be0e5c09 2567 if (path->slots[0] == i)
87b29b20 2568 push_space += data_size;
db94535d
CM
2569
2570 this_item_size = btrfs_item_size(right, item);
2571 if (this_item_size + sizeof(*item) + push_space > free_space)
be0e5c09 2572 break;
db94535d 2573
be0e5c09 2574 push_items++;
db94535d
CM
2575 push_space += this_item_size + sizeof(*item);
2576 }
2577
2578 if (right->map_token) {
2579 unmap_extent_buffer(right, right->map_token, KM_USER1);
2580 right->map_token = NULL;
be0e5c09 2581 }
db94535d 2582
be0e5c09 2583 if (push_items == 0) {
925baedd
CM
2584 ret = 1;
2585 goto out;
be0e5c09 2586 }
34a38218 2587 if (!empty && push_items == btrfs_header_nritems(right))
a429e513 2588 WARN_ON(1);
5f39d397 2589
be0e5c09 2590 /* push data from right to left */
5f39d397
CM
2591 copy_extent_buffer(left, right,
2592 btrfs_item_nr_offset(btrfs_header_nritems(left)),
2593 btrfs_item_nr_offset(0),
2594 push_items * sizeof(struct btrfs_item));
2595
123abc88 2596 push_space = BTRFS_LEAF_DATA_SIZE(root) -
d397712b 2597 btrfs_item_offset_nr(right, push_items - 1);
5f39d397
CM
2598
2599 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
d6025579
CM
2600 leaf_data_end(root, left) - push_space,
2601 btrfs_leaf_data(right) +
5f39d397 2602 btrfs_item_offset_nr(right, push_items - 1),
d6025579 2603 push_space);
5f39d397 2604 old_left_nritems = btrfs_header_nritems(left);
87b29b20 2605 BUG_ON(old_left_nritems <= 0);
eb60ceac 2606
db94535d 2607 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
0783fcfc 2608 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
5f39d397 2609 u32 ioff;
db94535d 2610
5f39d397 2611 item = btrfs_item_nr(left, i);
db94535d
CM
2612 if (!left->map_token) {
2613 map_extent_buffer(left, (unsigned long)item,
2614 sizeof(struct btrfs_item),
2615 &left->map_token, &left->kaddr,
2616 &left->map_start, &left->map_len,
2617 KM_USER1);
2618 }
2619
5f39d397
CM
2620 ioff = btrfs_item_offset(left, item);
2621 btrfs_set_item_offset(left, item,
db94535d 2622 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size));
be0e5c09 2623 }
5f39d397 2624 btrfs_set_header_nritems(left, old_left_nritems + push_items);
db94535d
CM
2625 if (left->map_token) {
2626 unmap_extent_buffer(left, left->map_token, KM_USER1);
2627 left->map_token = NULL;
2628 }
be0e5c09
CM
2629
2630 /* fixup right node */
34a38218 2631 if (push_items > right_nritems) {
d397712b
CM
2632 printk(KERN_CRIT "push items %d nr %u\n", push_items,
2633 right_nritems);
34a38218
CM
2634 WARN_ON(1);
2635 }
2636
2637 if (push_items < right_nritems) {
2638 push_space = btrfs_item_offset_nr(right, push_items - 1) -
2639 leaf_data_end(root, right);
2640 memmove_extent_buffer(right, btrfs_leaf_data(right) +
2641 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2642 btrfs_leaf_data(right) +
2643 leaf_data_end(root, right), push_space);
2644
2645 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
5f39d397
CM
2646 btrfs_item_nr_offset(push_items),
2647 (btrfs_header_nritems(right) - push_items) *
2648 sizeof(struct btrfs_item));
34a38218 2649 }
eef1c494
Y
2650 right_nritems -= push_items;
2651 btrfs_set_header_nritems(right, right_nritems);
123abc88 2652 push_space = BTRFS_LEAF_DATA_SIZE(root);
5f39d397
CM
2653 for (i = 0; i < right_nritems; i++) {
2654 item = btrfs_item_nr(right, i);
db94535d
CM
2655
2656 if (!right->map_token) {
2657 map_extent_buffer(right, (unsigned long)item,
2658 sizeof(struct btrfs_item),
2659 &right->map_token, &right->kaddr,
2660 &right->map_start, &right->map_len,
2661 KM_USER1);
2662 }
2663
2664 push_space = push_space - btrfs_item_size(right, item);
2665 btrfs_set_item_offset(right, item, push_space);
2666 }
2667 if (right->map_token) {
2668 unmap_extent_buffer(right, right->map_token, KM_USER1);
2669 right->map_token = NULL;
be0e5c09 2670 }
eb60ceac 2671
5f39d397 2672 btrfs_mark_buffer_dirty(left);
34a38218
CM
2673 if (right_nritems)
2674 btrfs_mark_buffer_dirty(right);
098f59c2 2675
5f39d397
CM
2676 btrfs_item_key(right, &disk_key, 0);
2677 wret = fixup_low_keys(trans, root, path, &disk_key, 1);
aa5d6bed
CM
2678 if (wret)
2679 ret = wret;
be0e5c09
CM
2680
2681 /* then fixup the leaf pointer in the path */
2682 if (path->slots[0] < push_items) {
2683 path->slots[0] += old_left_nritems;
925baedd
CM
2684 if (btrfs_header_nritems(path->nodes[0]) == 0)
2685 clean_tree_block(trans, root, path->nodes[0]);
2686 btrfs_tree_unlock(path->nodes[0]);
5f39d397
CM
2687 free_extent_buffer(path->nodes[0]);
2688 path->nodes[0] = left;
be0e5c09
CM
2689 path->slots[1] -= 1;
2690 } else {
925baedd 2691 btrfs_tree_unlock(left);
5f39d397 2692 free_extent_buffer(left);
be0e5c09
CM
2693 path->slots[0] -= push_items;
2694 }
eb60ceac 2695 BUG_ON(path->slots[0] < 0);
aa5d6bed 2696 return ret;
925baedd
CM
2697out:
2698 btrfs_tree_unlock(left);
2699 free_extent_buffer(left);
2700 return ret;
be0e5c09
CM
2701}
2702
44871b1b
CM
2703/*
2704 * push some data in the path leaf to the left, trying to free up at
2705 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2706 */
2707static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
2708 *root, struct btrfs_path *path, int data_size,
2709 int empty)
2710{
2711 struct extent_buffer *right = path->nodes[0];
2712 struct extent_buffer *left;
2713 int slot;
2714 int free_space;
2715 u32 right_nritems;
2716 int ret = 0;
2717
2718 slot = path->slots[1];
2719 if (slot == 0)
2720 return 1;
2721 if (!path->nodes[1])
2722 return 1;
2723
2724 right_nritems = btrfs_header_nritems(right);
2725 if (right_nritems == 0)
2726 return 1;
2727
2728 btrfs_assert_tree_locked(path->nodes[1]);
2729
2730 left = read_node_slot(root, path->nodes[1], slot - 1);
2731 btrfs_tree_lock(left);
2732 btrfs_set_lock_blocking(left);
2733
2734 free_space = btrfs_leaf_free_space(root, left);
2735 if (free_space < data_size) {
2736 ret = 1;
2737 goto out;
2738 }
2739
2740 /* cow and double check */
2741 ret = btrfs_cow_block(trans, root, left,
2742 path->nodes[1], slot - 1, &left);
2743 if (ret) {
2744 /* we hit -ENOSPC, but it isn't fatal here */
2745 ret = 1;
2746 goto out;
2747 }
2748
2749 free_space = btrfs_leaf_free_space(root, left);
2750 if (free_space < data_size) {
2751 ret = 1;
2752 goto out;
2753 }
2754
2755 return __push_leaf_left(trans, root, path, data_size,
2756 empty, left, free_space, right_nritems);
2757out:
2758 btrfs_tree_unlock(left);
2759 free_extent_buffer(left);
2760 return ret;
2761}
2762
2763/*
2764 * split the path's leaf in two, making sure there is at least data_size
2765 * available for the resulting leaf level of the path.
2766 *
2767 * returns 0 if all went well and < 0 on failure.
2768 */
2769static noinline int copy_for_split(struct btrfs_trans_handle *trans,
2770 struct btrfs_root *root,
2771 struct btrfs_path *path,
2772 struct extent_buffer *l,
2773 struct extent_buffer *right,
2774 int slot, int mid, int nritems)
2775{
2776 int data_copy_size;
2777 int rt_data_off;
2778 int i;
2779 int ret = 0;
2780 int wret;
2781 struct btrfs_disk_key disk_key;
2782
2783 nritems = nritems - mid;
2784 btrfs_set_header_nritems(right, nritems);
2785 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
2786
2787 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
2788 btrfs_item_nr_offset(mid),
2789 nritems * sizeof(struct btrfs_item));
2790
2791 copy_extent_buffer(right, l,
2792 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
2793 data_copy_size, btrfs_leaf_data(l) +
2794 leaf_data_end(root, l), data_copy_size);
2795
2796 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
2797 btrfs_item_end_nr(l, mid);
2798
2799 for (i = 0; i < nritems; i++) {
2800 struct btrfs_item *item = btrfs_item_nr(right, i);
2801 u32 ioff;
2802
2803 if (!right->map_token) {
2804 map_extent_buffer(right, (unsigned long)item,
2805 sizeof(struct btrfs_item),
2806 &right->map_token, &right->kaddr,
2807 &right->map_start, &right->map_len,
2808 KM_USER1);
2809 }
2810
2811 ioff = btrfs_item_offset(right, item);
2812 btrfs_set_item_offset(right, item, ioff + rt_data_off);
2813 }
2814
2815 if (right->map_token) {
2816 unmap_extent_buffer(right, right->map_token, KM_USER1);
2817 right->map_token = NULL;
2818 }
2819
2820 btrfs_set_header_nritems(l, mid);
2821 ret = 0;
2822 btrfs_item_key(right, &disk_key, 0);
2823 wret = insert_ptr(trans, root, path, &disk_key, right->start,
2824 path->slots[1] + 1, 1);
2825 if (wret)
2826 ret = wret;
2827
2828 btrfs_mark_buffer_dirty(right);
2829 btrfs_mark_buffer_dirty(l);
2830 BUG_ON(path->slots[0] != slot);
2831
44871b1b
CM
2832 if (mid <= slot) {
2833 btrfs_tree_unlock(path->nodes[0]);
2834 free_extent_buffer(path->nodes[0]);
2835 path->nodes[0] = right;
2836 path->slots[0] -= mid;
2837 path->slots[1] += 1;
2838 } else {
2839 btrfs_tree_unlock(right);
2840 free_extent_buffer(right);
2841 }
2842
2843 BUG_ON(path->slots[0] < 0);
2844
2845 return ret;
2846}
2847
74123bd7
CM
2848/*
2849 * split the path's leaf in two, making sure there is at least data_size
2850 * available for the resulting leaf level of the path.
aa5d6bed
CM
2851 *
2852 * returns 0 if all went well and < 0 on failure.
74123bd7 2853 */
e02119d5
CM
2854static noinline int split_leaf(struct btrfs_trans_handle *trans,
2855 struct btrfs_root *root,
2856 struct btrfs_key *ins_key,
2857 struct btrfs_path *path, int data_size,
2858 int extend)
be0e5c09 2859{
5d4f98a2 2860 struct btrfs_disk_key disk_key;
5f39d397 2861 struct extent_buffer *l;
7518a238 2862 u32 nritems;
eb60ceac
CM
2863 int mid;
2864 int slot;
5f39d397 2865 struct extent_buffer *right;
d4dbff95 2866 int ret = 0;
aa5d6bed 2867 int wret;
5d4f98a2 2868 int split;
cc0c5538 2869 int num_doubles = 0;
aa5d6bed 2870
40689478 2871 /* first try to make some room by pushing left and right */
a4b6e07d
CM
2872 if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY &&
2873 !trans->transaction->delayed_refs.flushing) {
34a38218 2874 wret = push_leaf_right(trans, root, path, data_size, 0);
d397712b 2875 if (wret < 0)
eaee50e8 2876 return wret;
3685f791 2877 if (wret) {
34a38218 2878 wret = push_leaf_left(trans, root, path, data_size, 0);
3685f791
CM
2879 if (wret < 0)
2880 return wret;
2881 }
2882 l = path->nodes[0];
aa5d6bed 2883
3685f791 2884 /* did the pushes work? */
87b29b20 2885 if (btrfs_leaf_free_space(root, l) >= data_size)
3685f791 2886 return 0;
3326d1b0 2887 }
aa5d6bed 2888
5c680ed6 2889 if (!path->nodes[1]) {
e089f05c 2890 ret = insert_new_root(trans, root, path, 1);
5c680ed6
CM
2891 if (ret)
2892 return ret;
2893 }
cc0c5538 2894again:
5d4f98a2 2895 split = 1;
cc0c5538 2896 l = path->nodes[0];
eb60ceac 2897 slot = path->slots[0];
5f39d397 2898 nritems = btrfs_header_nritems(l);
d397712b 2899 mid = (nritems + 1) / 2;
54aa1f4d 2900
5d4f98a2
YZ
2901 if (mid <= slot) {
2902 if (nritems == 1 ||
2903 leaf_space_used(l, mid, nritems - mid) + data_size >
2904 BTRFS_LEAF_DATA_SIZE(root)) {
2905 if (slot >= nritems) {
2906 split = 0;
2907 } else {
2908 mid = slot;
2909 if (mid != nritems &&
2910 leaf_space_used(l, mid, nritems - mid) +
2911 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2912 split = 2;
2913 }
2914 }
2915 }
2916 } else {
2917 if (leaf_space_used(l, 0, mid) + data_size >
2918 BTRFS_LEAF_DATA_SIZE(root)) {
2919 if (!extend && data_size && slot == 0) {
2920 split = 0;
2921 } else if ((extend || !data_size) && slot == 0) {
2922 mid = 1;
2923 } else {
2924 mid = slot;
2925 if (mid != nritems &&
2926 leaf_space_used(l, mid, nritems - mid) +
2927 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2928 split = 2 ;
2929 }
2930 }
2931 }
2932 }
2933
2934 if (split == 0)
2935 btrfs_cpu_key_to_disk(&disk_key, ins_key);
2936 else
2937 btrfs_item_key(l, &disk_key, mid);
2938
2939 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
31840ae1 2940 root->root_key.objectid,
5d4f98a2 2941 &disk_key, 0, l->start, 0);
cea9e445
CM
2942 if (IS_ERR(right)) {
2943 BUG_ON(1);
5f39d397 2944 return PTR_ERR(right);
cea9e445 2945 }
5f39d397
CM
2946
2947 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
db94535d 2948 btrfs_set_header_bytenr(right, right->start);
5f39d397 2949 btrfs_set_header_generation(right, trans->transid);
5d4f98a2 2950 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
5f39d397
CM
2951 btrfs_set_header_owner(right, root->root_key.objectid);
2952 btrfs_set_header_level(right, 0);
2953 write_extent_buffer(right, root->fs_info->fsid,
2954 (unsigned long)btrfs_header_fsid(right),
2955 BTRFS_FSID_SIZE);
e17cade2
CM
2956
2957 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
2958 (unsigned long)btrfs_header_chunk_tree_uuid(right),
2959 BTRFS_UUID_SIZE);
44871b1b 2960
5d4f98a2
YZ
2961 if (split == 0) {
2962 if (mid <= slot) {
2963 btrfs_set_header_nritems(right, 0);
2964 wret = insert_ptr(trans, root, path,
2965 &disk_key, right->start,
2966 path->slots[1] + 1, 1);
2967 if (wret)
2968 ret = wret;
925baedd 2969
5d4f98a2
YZ
2970 btrfs_tree_unlock(path->nodes[0]);
2971 free_extent_buffer(path->nodes[0]);
2972 path->nodes[0] = right;
2973 path->slots[0] = 0;
2974 path->slots[1] += 1;
2975 } else {
2976 btrfs_set_header_nritems(right, 0);
2977 wret = insert_ptr(trans, root, path,
2978 &disk_key,
2979 right->start,
2980 path->slots[1], 1);
2981 if (wret)
2982 ret = wret;
2983 btrfs_tree_unlock(path->nodes[0]);
2984 free_extent_buffer(path->nodes[0]);
2985 path->nodes[0] = right;
2986 path->slots[0] = 0;
2987 if (path->slots[1] == 0) {
2988 wret = fixup_low_keys(trans, root,
2989 path, &disk_key, 1);
d4dbff95
CM
2990 if (wret)
2991 ret = wret;
5ee78ac7 2992 }
d4dbff95 2993 }
5d4f98a2
YZ
2994 btrfs_mark_buffer_dirty(right);
2995 return ret;
d4dbff95 2996 }
74123bd7 2997
44871b1b 2998 ret = copy_for_split(trans, root, path, l, right, slot, mid, nritems);
31840ae1
ZY
2999 BUG_ON(ret);
3000
5d4f98a2 3001 if (split == 2) {
cc0c5538
CM
3002 BUG_ON(num_doubles != 0);
3003 num_doubles++;
3004 goto again;
a429e513 3005 }
44871b1b 3006
be0e5c09
CM
3007 return ret;
3008}
3009
459931ec
CM
3010/*
3011 * This function splits a single item into two items,
3012 * giving 'new_key' to the new item and splitting the
3013 * old one at split_offset (from the start of the item).
3014 *
3015 * The path may be released by this operation. After
3016 * the split, the path is pointing to the old item. The
3017 * new item is going to be in the same node as the old one.
3018 *
3019 * Note, the item being split must be smaller enough to live alone on
3020 * a tree block with room for one extra struct btrfs_item
3021 *
3022 * This allows us to split the item in place, keeping a lock on the
3023 * leaf the entire time.
3024 */
3025int btrfs_split_item(struct btrfs_trans_handle *trans,
3026 struct btrfs_root *root,
3027 struct btrfs_path *path,
3028 struct btrfs_key *new_key,
3029 unsigned long split_offset)
3030{
3031 u32 item_size;
3032 struct extent_buffer *leaf;
3033 struct btrfs_key orig_key;
3034 struct btrfs_item *item;
3035 struct btrfs_item *new_item;
3036 int ret = 0;
3037 int slot;
3038 u32 nritems;
3039 u32 orig_offset;
3040 struct btrfs_disk_key disk_key;
3041 char *buf;
3042
3043 leaf = path->nodes[0];
3044 btrfs_item_key_to_cpu(leaf, &orig_key, path->slots[0]);
3045 if (btrfs_leaf_free_space(root, leaf) >= sizeof(struct btrfs_item))
3046 goto split;
3047
3048 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3049 btrfs_release_path(root, path);
3050
3051 path->search_for_split = 1;
3052 path->keep_locks = 1;
3053
3054 ret = btrfs_search_slot(trans, root, &orig_key, path, 0, 1);
3055 path->search_for_split = 0;
3056
3057 /* if our item isn't there or got smaller, return now */
3058 if (ret != 0 || item_size != btrfs_item_size_nr(path->nodes[0],
3059 path->slots[0])) {
3060 path->keep_locks = 0;
3061 return -EAGAIN;
3062 }
3063
b9473439 3064 btrfs_set_path_blocking(path);
87b29b20
YZ
3065 ret = split_leaf(trans, root, &orig_key, path,
3066 sizeof(struct btrfs_item), 1);
459931ec
CM
3067 path->keep_locks = 0;
3068 BUG_ON(ret);
3069
b9473439
CM
3070 btrfs_unlock_up_safe(path, 1);
3071 leaf = path->nodes[0];
3072 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
3073
3074split:
b4ce94de
CM
3075 /*
3076 * make sure any changes to the path from split_leaf leave it
3077 * in a blocking state
3078 */
3079 btrfs_set_path_blocking(path);
3080
459931ec
CM
3081 item = btrfs_item_nr(leaf, path->slots[0]);
3082 orig_offset = btrfs_item_offset(leaf, item);
3083 item_size = btrfs_item_size(leaf, item);
3084
459931ec
CM
3085 buf = kmalloc(item_size, GFP_NOFS);
3086 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3087 path->slots[0]), item_size);
3088 slot = path->slots[0] + 1;
3089 leaf = path->nodes[0];
3090
3091 nritems = btrfs_header_nritems(leaf);
3092
3093 if (slot != nritems) {
3094 /* shift the items */
3095 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
3096 btrfs_item_nr_offset(slot),
3097 (nritems - slot) * sizeof(struct btrfs_item));
3098
3099 }
3100
3101 btrfs_cpu_key_to_disk(&disk_key, new_key);
3102 btrfs_set_item_key(leaf, &disk_key, slot);
3103
3104 new_item = btrfs_item_nr(leaf, slot);
3105
3106 btrfs_set_item_offset(leaf, new_item, orig_offset);
3107 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
3108
3109 btrfs_set_item_offset(leaf, item,
3110 orig_offset + item_size - split_offset);
3111 btrfs_set_item_size(leaf, item, split_offset);
3112
3113 btrfs_set_header_nritems(leaf, nritems + 1);
3114
3115 /* write the data for the start of the original item */
3116 write_extent_buffer(leaf, buf,
3117 btrfs_item_ptr_offset(leaf, path->slots[0]),
3118 split_offset);
3119
3120 /* write the data for the new item */
3121 write_extent_buffer(leaf, buf + split_offset,
3122 btrfs_item_ptr_offset(leaf, slot),
3123 item_size - split_offset);
3124 btrfs_mark_buffer_dirty(leaf);
3125
3126 ret = 0;
3127 if (btrfs_leaf_free_space(root, leaf) < 0) {
3128 btrfs_print_leaf(root, leaf);
3129 BUG();
3130 }
3131 kfree(buf);
3132 return ret;
3133}
3134
d352ac68
CM
3135/*
3136 * make the item pointed to by the path smaller. new_size indicates
3137 * how small to make it, and from_end tells us if we just chop bytes
3138 * off the end of the item or if we shift the item to chop bytes off
3139 * the front.
3140 */
b18c6685
CM
3141int btrfs_truncate_item(struct btrfs_trans_handle *trans,
3142 struct btrfs_root *root,
3143 struct btrfs_path *path,
179e29e4 3144 u32 new_size, int from_end)
b18c6685
CM
3145{
3146 int ret = 0;
3147 int slot;
3148 int slot_orig;
5f39d397
CM
3149 struct extent_buffer *leaf;
3150 struct btrfs_item *item;
b18c6685
CM
3151 u32 nritems;
3152 unsigned int data_end;
3153 unsigned int old_data_start;
3154 unsigned int old_size;
3155 unsigned int size_diff;
3156 int i;
3157
3158 slot_orig = path->slots[0];
5f39d397 3159 leaf = path->nodes[0];
179e29e4
CM
3160 slot = path->slots[0];
3161
3162 old_size = btrfs_item_size_nr(leaf, slot);
3163 if (old_size == new_size)
3164 return 0;
b18c6685 3165
5f39d397 3166 nritems = btrfs_header_nritems(leaf);
b18c6685
CM
3167 data_end = leaf_data_end(root, leaf);
3168
5f39d397 3169 old_data_start = btrfs_item_offset_nr(leaf, slot);
179e29e4 3170
b18c6685
CM
3171 size_diff = old_size - new_size;
3172
3173 BUG_ON(slot < 0);
3174 BUG_ON(slot >= nritems);
3175
3176 /*
3177 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3178 */
3179 /* first correct the data pointers */
3180 for (i = slot; i < nritems; i++) {
5f39d397
CM
3181 u32 ioff;
3182 item = btrfs_item_nr(leaf, i);
db94535d
CM
3183
3184 if (!leaf->map_token) {
3185 map_extent_buffer(leaf, (unsigned long)item,
3186 sizeof(struct btrfs_item),
3187 &leaf->map_token, &leaf->kaddr,
3188 &leaf->map_start, &leaf->map_len,
3189 KM_USER1);
3190 }
3191
5f39d397
CM
3192 ioff = btrfs_item_offset(leaf, item);
3193 btrfs_set_item_offset(leaf, item, ioff + size_diff);
b18c6685 3194 }
db94535d
CM
3195
3196 if (leaf->map_token) {
3197 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3198 leaf->map_token = NULL;
3199 }
3200
b18c6685 3201 /* shift the data */
179e29e4
CM
3202 if (from_end) {
3203 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3204 data_end + size_diff, btrfs_leaf_data(leaf) +
3205 data_end, old_data_start + new_size - data_end);
3206 } else {
3207 struct btrfs_disk_key disk_key;
3208 u64 offset;
3209
3210 btrfs_item_key(leaf, &disk_key, slot);
3211
3212 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
3213 unsigned long ptr;
3214 struct btrfs_file_extent_item *fi;
3215
3216 fi = btrfs_item_ptr(leaf, slot,
3217 struct btrfs_file_extent_item);
3218 fi = (struct btrfs_file_extent_item *)(
3219 (unsigned long)fi - size_diff);
3220
3221 if (btrfs_file_extent_type(leaf, fi) ==
3222 BTRFS_FILE_EXTENT_INLINE) {
3223 ptr = btrfs_item_ptr_offset(leaf, slot);
3224 memmove_extent_buffer(leaf, ptr,
d397712b
CM
3225 (unsigned long)fi,
3226 offsetof(struct btrfs_file_extent_item,
179e29e4
CM
3227 disk_bytenr));
3228 }
3229 }
3230
3231 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3232 data_end + size_diff, btrfs_leaf_data(leaf) +
3233 data_end, old_data_start - data_end);
3234
3235 offset = btrfs_disk_key_offset(&disk_key);
3236 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
3237 btrfs_set_item_key(leaf, &disk_key, slot);
3238 if (slot == 0)
3239 fixup_low_keys(trans, root, path, &disk_key, 1);
3240 }
5f39d397
CM
3241
3242 item = btrfs_item_nr(leaf, slot);
3243 btrfs_set_item_size(leaf, item, new_size);
3244 btrfs_mark_buffer_dirty(leaf);
b18c6685
CM
3245
3246 ret = 0;
5f39d397
CM
3247 if (btrfs_leaf_free_space(root, leaf) < 0) {
3248 btrfs_print_leaf(root, leaf);
b18c6685 3249 BUG();
5f39d397 3250 }
b18c6685
CM
3251 return ret;
3252}
3253
d352ac68
CM
3254/*
3255 * make the item pointed to by the path bigger, data_size is the new size.
3256 */
5f39d397
CM
3257int btrfs_extend_item(struct btrfs_trans_handle *trans,
3258 struct btrfs_root *root, struct btrfs_path *path,
3259 u32 data_size)
6567e837
CM
3260{
3261 int ret = 0;
3262 int slot;
3263 int slot_orig;
5f39d397
CM
3264 struct extent_buffer *leaf;
3265 struct btrfs_item *item;
6567e837
CM
3266 u32 nritems;
3267 unsigned int data_end;
3268 unsigned int old_data;
3269 unsigned int old_size;
3270 int i;
3271
3272 slot_orig = path->slots[0];
5f39d397 3273 leaf = path->nodes[0];
6567e837 3274
5f39d397 3275 nritems = btrfs_header_nritems(leaf);
6567e837
CM
3276 data_end = leaf_data_end(root, leaf);
3277
5f39d397
CM
3278 if (btrfs_leaf_free_space(root, leaf) < data_size) {
3279 btrfs_print_leaf(root, leaf);
6567e837 3280 BUG();
5f39d397 3281 }
6567e837 3282 slot = path->slots[0];
5f39d397 3283 old_data = btrfs_item_end_nr(leaf, slot);
6567e837
CM
3284
3285 BUG_ON(slot < 0);
3326d1b0
CM
3286 if (slot >= nritems) {
3287 btrfs_print_leaf(root, leaf);
d397712b
CM
3288 printk(KERN_CRIT "slot %d too large, nritems %d\n",
3289 slot, nritems);
3326d1b0
CM
3290 BUG_ON(1);
3291 }
6567e837
CM
3292
3293 /*
3294 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3295 */
3296 /* first correct the data pointers */
3297 for (i = slot; i < nritems; i++) {
5f39d397
CM
3298 u32 ioff;
3299 item = btrfs_item_nr(leaf, i);
db94535d
CM
3300
3301 if (!leaf->map_token) {
3302 map_extent_buffer(leaf, (unsigned long)item,
3303 sizeof(struct btrfs_item),
3304 &leaf->map_token, &leaf->kaddr,
3305 &leaf->map_start, &leaf->map_len,
3306 KM_USER1);
3307 }
5f39d397
CM
3308 ioff = btrfs_item_offset(leaf, item);
3309 btrfs_set_item_offset(leaf, item, ioff - data_size);
6567e837 3310 }
5f39d397 3311
db94535d
CM
3312 if (leaf->map_token) {
3313 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3314 leaf->map_token = NULL;
3315 }
3316
6567e837 3317 /* shift the data */
5f39d397 3318 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
6567e837
CM
3319 data_end - data_size, btrfs_leaf_data(leaf) +
3320 data_end, old_data - data_end);
5f39d397 3321
6567e837 3322 data_end = old_data;
5f39d397
CM
3323 old_size = btrfs_item_size_nr(leaf, slot);
3324 item = btrfs_item_nr(leaf, slot);
3325 btrfs_set_item_size(leaf, item, old_size + data_size);
3326 btrfs_mark_buffer_dirty(leaf);
6567e837
CM
3327
3328 ret = 0;
5f39d397
CM
3329 if (btrfs_leaf_free_space(root, leaf) < 0) {
3330 btrfs_print_leaf(root, leaf);
6567e837 3331 BUG();
5f39d397 3332 }
6567e837
CM
3333 return ret;
3334}
3335
f3465ca4
JB
3336/*
3337 * Given a key and some data, insert items into the tree.
3338 * This does all the path init required, making room in the tree if needed.
3339 * Returns the number of keys that were inserted.
3340 */
3341int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
3342 struct btrfs_root *root,
3343 struct btrfs_path *path,
3344 struct btrfs_key *cpu_key, u32 *data_size,
3345 int nr)
3346{
3347 struct extent_buffer *leaf;
3348 struct btrfs_item *item;
3349 int ret = 0;
3350 int slot;
f3465ca4
JB
3351 int i;
3352 u32 nritems;
3353 u32 total_data = 0;
3354 u32 total_size = 0;
3355 unsigned int data_end;
3356 struct btrfs_disk_key disk_key;
3357 struct btrfs_key found_key;
3358
87b29b20
YZ
3359 for (i = 0; i < nr; i++) {
3360 if (total_size + data_size[i] + sizeof(struct btrfs_item) >
3361 BTRFS_LEAF_DATA_SIZE(root)) {
3362 break;
3363 nr = i;
3364 }
f3465ca4 3365 total_data += data_size[i];
87b29b20
YZ
3366 total_size += data_size[i] + sizeof(struct btrfs_item);
3367 }
3368 BUG_ON(nr == 0);
f3465ca4 3369
f3465ca4
JB
3370 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3371 if (ret == 0)
3372 return -EEXIST;
3373 if (ret < 0)
3374 goto out;
3375
f3465ca4
JB
3376 leaf = path->nodes[0];
3377
3378 nritems = btrfs_header_nritems(leaf);
3379 data_end = leaf_data_end(root, leaf);
3380
3381 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3382 for (i = nr; i >= 0; i--) {
3383 total_data -= data_size[i];
3384 total_size -= data_size[i] + sizeof(struct btrfs_item);
3385 if (total_size < btrfs_leaf_free_space(root, leaf))
3386 break;
3387 }
3388 nr = i;
3389 }
3390
3391 slot = path->slots[0];
3392 BUG_ON(slot < 0);
3393
3394 if (slot != nritems) {
3395 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3396
3397 item = btrfs_item_nr(leaf, slot);
3398 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3399
3400 /* figure out how many keys we can insert in here */
3401 total_data = data_size[0];
3402 for (i = 1; i < nr; i++) {
5d4f98a2 3403 if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0)
f3465ca4
JB
3404 break;
3405 total_data += data_size[i];
3406 }
3407 nr = i;
3408
3409 if (old_data < data_end) {
3410 btrfs_print_leaf(root, leaf);
d397712b 3411 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
f3465ca4
JB
3412 slot, old_data, data_end);
3413 BUG_ON(1);
3414 }
3415 /*
3416 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3417 */
3418 /* first correct the data pointers */
3419 WARN_ON(leaf->map_token);
3420 for (i = slot; i < nritems; i++) {
3421 u32 ioff;
3422
3423 item = btrfs_item_nr(leaf, i);
3424 if (!leaf->map_token) {
3425 map_extent_buffer(leaf, (unsigned long)item,
3426 sizeof(struct btrfs_item),
3427 &leaf->map_token, &leaf->kaddr,
3428 &leaf->map_start, &leaf->map_len,
3429 KM_USER1);
3430 }
3431
3432 ioff = btrfs_item_offset(leaf, item);
3433 btrfs_set_item_offset(leaf, item, ioff - total_data);
3434 }
3435 if (leaf->map_token) {
3436 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3437 leaf->map_token = NULL;
3438 }
3439
3440 /* shift the items */
3441 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3442 btrfs_item_nr_offset(slot),
3443 (nritems - slot) * sizeof(struct btrfs_item));
3444
3445 /* shift the data */
3446 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3447 data_end - total_data, btrfs_leaf_data(leaf) +
3448 data_end, old_data - data_end);
3449 data_end = old_data;
3450 } else {
3451 /*
3452 * this sucks but it has to be done, if we are inserting at
3453 * the end of the leaf only insert 1 of the items, since we
3454 * have no way of knowing whats on the next leaf and we'd have
3455 * to drop our current locks to figure it out
3456 */
3457 nr = 1;
3458 }
3459
3460 /* setup the item for the new data */
3461 for (i = 0; i < nr; i++) {
3462 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3463 btrfs_set_item_key(leaf, &disk_key, slot + i);
3464 item = btrfs_item_nr(leaf, slot + i);
3465 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3466 data_end -= data_size[i];
3467 btrfs_set_item_size(leaf, item, data_size[i]);
3468 }
3469 btrfs_set_header_nritems(leaf, nritems + nr);
3470 btrfs_mark_buffer_dirty(leaf);
3471
3472 ret = 0;
3473 if (slot == 0) {
3474 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3475 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3476 }
3477
3478 if (btrfs_leaf_free_space(root, leaf) < 0) {
3479 btrfs_print_leaf(root, leaf);
3480 BUG();
3481 }
3482out:
3483 if (!ret)
3484 ret = nr;
3485 return ret;
3486}
3487
74123bd7 3488/*
44871b1b
CM
3489 * this is a helper for btrfs_insert_empty_items, the main goal here is
3490 * to save stack depth by doing the bulk of the work in a function
3491 * that doesn't call btrfs_search_slot
74123bd7 3492 */
44871b1b
CM
3493static noinline_for_stack int
3494setup_items_for_insert(struct btrfs_trans_handle *trans,
3495 struct btrfs_root *root, struct btrfs_path *path,
3496 struct btrfs_key *cpu_key, u32 *data_size,
3497 u32 total_data, u32 total_size, int nr)
be0e5c09 3498{
5f39d397 3499 struct btrfs_item *item;
9c58309d 3500 int i;
7518a238 3501 u32 nritems;
be0e5c09 3502 unsigned int data_end;
e2fa7227 3503 struct btrfs_disk_key disk_key;
44871b1b
CM
3504 int ret;
3505 struct extent_buffer *leaf;
3506 int slot;
e2fa7227 3507
5f39d397 3508 leaf = path->nodes[0];
44871b1b 3509 slot = path->slots[0];
74123bd7 3510
5f39d397 3511 nritems = btrfs_header_nritems(leaf);
123abc88 3512 data_end = leaf_data_end(root, leaf);
eb60ceac 3513
f25956cc 3514 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3326d1b0 3515 btrfs_print_leaf(root, leaf);
d397712b 3516 printk(KERN_CRIT "not enough freespace need %u have %d\n",
9c58309d 3517 total_size, btrfs_leaf_free_space(root, leaf));
be0e5c09 3518 BUG();
d4dbff95 3519 }
5f39d397 3520
be0e5c09 3521 if (slot != nritems) {
5f39d397 3522 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
be0e5c09 3523
5f39d397
CM
3524 if (old_data < data_end) {
3525 btrfs_print_leaf(root, leaf);
d397712b 3526 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
5f39d397
CM
3527 slot, old_data, data_end);
3528 BUG_ON(1);
3529 }
be0e5c09
CM
3530 /*
3531 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3532 */
3533 /* first correct the data pointers */
db94535d 3534 WARN_ON(leaf->map_token);
0783fcfc 3535 for (i = slot; i < nritems; i++) {
5f39d397 3536 u32 ioff;
db94535d 3537
5f39d397 3538 item = btrfs_item_nr(leaf, i);
db94535d
CM
3539 if (!leaf->map_token) {
3540 map_extent_buffer(leaf, (unsigned long)item,
3541 sizeof(struct btrfs_item),
3542 &leaf->map_token, &leaf->kaddr,
3543 &leaf->map_start, &leaf->map_len,
3544 KM_USER1);
3545 }
3546
5f39d397 3547 ioff = btrfs_item_offset(leaf, item);
9c58309d 3548 btrfs_set_item_offset(leaf, item, ioff - total_data);
0783fcfc 3549 }
db94535d
CM
3550 if (leaf->map_token) {
3551 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3552 leaf->map_token = NULL;
3553 }
be0e5c09
CM
3554
3555 /* shift the items */
9c58309d 3556 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
5f39d397 3557 btrfs_item_nr_offset(slot),
d6025579 3558 (nritems - slot) * sizeof(struct btrfs_item));
be0e5c09
CM
3559
3560 /* shift the data */
5f39d397 3561 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
9c58309d 3562 data_end - total_data, btrfs_leaf_data(leaf) +
d6025579 3563 data_end, old_data - data_end);
be0e5c09
CM
3564 data_end = old_data;
3565 }
5f39d397 3566
62e2749e 3567 /* setup the item for the new data */
9c58309d
CM
3568 for (i = 0; i < nr; i++) {
3569 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3570 btrfs_set_item_key(leaf, &disk_key, slot + i);
3571 item = btrfs_item_nr(leaf, slot + i);
3572 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3573 data_end -= data_size[i];
3574 btrfs_set_item_size(leaf, item, data_size[i]);
3575 }
44871b1b 3576
9c58309d 3577 btrfs_set_header_nritems(leaf, nritems + nr);
aa5d6bed
CM
3578
3579 ret = 0;
5a01a2e3 3580 if (slot == 0) {
44871b1b 3581 struct btrfs_disk_key disk_key;
5a01a2e3 3582 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
e089f05c 3583 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
5a01a2e3 3584 }
b9473439
CM
3585 btrfs_unlock_up_safe(path, 1);
3586 btrfs_mark_buffer_dirty(leaf);
aa5d6bed 3587
5f39d397
CM
3588 if (btrfs_leaf_free_space(root, leaf) < 0) {
3589 btrfs_print_leaf(root, leaf);
be0e5c09 3590 BUG();
5f39d397 3591 }
44871b1b
CM
3592 return ret;
3593}
3594
3595/*
3596 * Given a key and some data, insert items into the tree.
3597 * This does all the path init required, making room in the tree if needed.
3598 */
3599int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3600 struct btrfs_root *root,
3601 struct btrfs_path *path,
3602 struct btrfs_key *cpu_key, u32 *data_size,
3603 int nr)
3604{
3605 struct extent_buffer *leaf;
3606 int ret = 0;
3607 int slot;
3608 int i;
3609 u32 total_size = 0;
3610 u32 total_data = 0;
3611
3612 for (i = 0; i < nr; i++)
3613 total_data += data_size[i];
3614
3615 total_size = total_data + (nr * sizeof(struct btrfs_item));
3616 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3617 if (ret == 0)
3618 return -EEXIST;
3619 if (ret < 0)
3620 goto out;
3621
3622 leaf = path->nodes[0];
3623 slot = path->slots[0];
3624 BUG_ON(slot < 0);
3625
3626 ret = setup_items_for_insert(trans, root, path, cpu_key, data_size,
3627 total_data, total_size, nr);
3628
ed2ff2cb 3629out:
62e2749e
CM
3630 return ret;
3631}
3632
3633/*
3634 * Given a key and some data, insert an item into the tree.
3635 * This does all the path init required, making room in the tree if needed.
3636 */
e089f05c
CM
3637int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
3638 *root, struct btrfs_key *cpu_key, void *data, u32
3639 data_size)
62e2749e
CM
3640{
3641 int ret = 0;
2c90e5d6 3642 struct btrfs_path *path;
5f39d397
CM
3643 struct extent_buffer *leaf;
3644 unsigned long ptr;
62e2749e 3645
2c90e5d6
CM
3646 path = btrfs_alloc_path();
3647 BUG_ON(!path);
2c90e5d6 3648 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
62e2749e 3649 if (!ret) {
5f39d397
CM
3650 leaf = path->nodes[0];
3651 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3652 write_extent_buffer(leaf, data, ptr, data_size);
3653 btrfs_mark_buffer_dirty(leaf);
62e2749e 3654 }
2c90e5d6 3655 btrfs_free_path(path);
aa5d6bed 3656 return ret;
be0e5c09
CM
3657}
3658
74123bd7 3659/*
5de08d7d 3660 * delete the pointer from a given node.
74123bd7 3661 *
d352ac68
CM
3662 * the tree should have been previously balanced so the deletion does not
3663 * empty a node.
74123bd7 3664 */
e089f05c
CM
3665static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3666 struct btrfs_path *path, int level, int slot)
be0e5c09 3667{
5f39d397 3668 struct extent_buffer *parent = path->nodes[level];
7518a238 3669 u32 nritems;
aa5d6bed 3670 int ret = 0;
bb803951 3671 int wret;
be0e5c09 3672
5f39d397 3673 nritems = btrfs_header_nritems(parent);
d397712b 3674 if (slot != nritems - 1) {
5f39d397
CM
3675 memmove_extent_buffer(parent,
3676 btrfs_node_key_ptr_offset(slot),
3677 btrfs_node_key_ptr_offset(slot + 1),
d6025579
CM
3678 sizeof(struct btrfs_key_ptr) *
3679 (nritems - slot - 1));
bb803951 3680 }
7518a238 3681 nritems--;
5f39d397 3682 btrfs_set_header_nritems(parent, nritems);
7518a238 3683 if (nritems == 0 && parent == root->node) {
5f39d397 3684 BUG_ON(btrfs_header_level(root->node) != 1);
bb803951 3685 /* just turn the root into a leaf and break */
5f39d397 3686 btrfs_set_header_level(root->node, 0);
bb803951 3687 } else if (slot == 0) {
5f39d397
CM
3688 struct btrfs_disk_key disk_key;
3689
3690 btrfs_node_key(parent, &disk_key, 0);
3691 wret = fixup_low_keys(trans, root, path, &disk_key, level + 1);
0f70abe2
CM
3692 if (wret)
3693 ret = wret;
be0e5c09 3694 }
d6025579 3695 btrfs_mark_buffer_dirty(parent);
aa5d6bed 3696 return ret;
be0e5c09
CM
3697}
3698
323ac95b
CM
3699/*
3700 * a helper function to delete the leaf pointed to by path->slots[1] and
5d4f98a2 3701 * path->nodes[1].
323ac95b
CM
3702 *
3703 * This deletes the pointer in path->nodes[1] and frees the leaf
3704 * block extent. zero is returned if it all worked out, < 0 otherwise.
3705 *
3706 * The path must have already been setup for deleting the leaf, including
3707 * all the proper balancing. path->nodes[1] must be locked.
3708 */
5d4f98a2
YZ
3709static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
3710 struct btrfs_root *root,
3711 struct btrfs_path *path,
3712 struct extent_buffer *leaf)
323ac95b
CM
3713{
3714 int ret;
323ac95b 3715
5d4f98a2 3716 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
323ac95b
CM
3717 ret = del_ptr(trans, root, path, 1, path->slots[1]);
3718 if (ret)
3719 return ret;
3720
4d081c41
CM
3721 /*
3722 * btrfs_free_extent is expensive, we want to make sure we
3723 * aren't holding any locks when we call it
3724 */
3725 btrfs_unlock_up_safe(path, 0);
3726
5d4f98a2
YZ
3727 ret = btrfs_free_extent(trans, root, leaf->start, leaf->len,
3728 0, root->root_key.objectid, 0, 0);
323ac95b
CM
3729 return ret;
3730}
74123bd7
CM
3731/*
3732 * delete the item at the leaf level in path. If that empties
3733 * the leaf, remove it from the tree
3734 */
85e21bac
CM
3735int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3736 struct btrfs_path *path, int slot, int nr)
be0e5c09 3737{
5f39d397
CM
3738 struct extent_buffer *leaf;
3739 struct btrfs_item *item;
85e21bac
CM
3740 int last_off;
3741 int dsize = 0;
aa5d6bed
CM
3742 int ret = 0;
3743 int wret;
85e21bac 3744 int i;
7518a238 3745 u32 nritems;
be0e5c09 3746
5f39d397 3747 leaf = path->nodes[0];
85e21bac
CM
3748 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
3749
3750 for (i = 0; i < nr; i++)
3751 dsize += btrfs_item_size_nr(leaf, slot + i);
3752
5f39d397 3753 nritems = btrfs_header_nritems(leaf);
be0e5c09 3754
85e21bac 3755 if (slot + nr != nritems) {
123abc88 3756 int data_end = leaf_data_end(root, leaf);
5f39d397
CM
3757
3758 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
d6025579
CM
3759 data_end + dsize,
3760 btrfs_leaf_data(leaf) + data_end,
85e21bac 3761 last_off - data_end);
5f39d397 3762
85e21bac 3763 for (i = slot + nr; i < nritems; i++) {
5f39d397 3764 u32 ioff;
db94535d 3765
5f39d397 3766 item = btrfs_item_nr(leaf, i);
db94535d
CM
3767 if (!leaf->map_token) {
3768 map_extent_buffer(leaf, (unsigned long)item,
3769 sizeof(struct btrfs_item),
3770 &leaf->map_token, &leaf->kaddr,
3771 &leaf->map_start, &leaf->map_len,
3772 KM_USER1);
3773 }
5f39d397
CM
3774 ioff = btrfs_item_offset(leaf, item);
3775 btrfs_set_item_offset(leaf, item, ioff + dsize);
0783fcfc 3776 }
db94535d
CM
3777
3778 if (leaf->map_token) {
3779 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3780 leaf->map_token = NULL;
3781 }
3782
5f39d397 3783 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
85e21bac 3784 btrfs_item_nr_offset(slot + nr),
d6025579 3785 sizeof(struct btrfs_item) *
85e21bac 3786 (nritems - slot - nr));
be0e5c09 3787 }
85e21bac
CM
3788 btrfs_set_header_nritems(leaf, nritems - nr);
3789 nritems -= nr;
5f39d397 3790
74123bd7 3791 /* delete the leaf if we've emptied it */
7518a238 3792 if (nritems == 0) {
5f39d397
CM
3793 if (leaf == root->node) {
3794 btrfs_set_header_level(leaf, 0);
9a8dd150 3795 } else {
5d4f98a2 3796 ret = btrfs_del_leaf(trans, root, path, leaf);
323ac95b 3797 BUG_ON(ret);
9a8dd150 3798 }
be0e5c09 3799 } else {
7518a238 3800 int used = leaf_space_used(leaf, 0, nritems);
aa5d6bed 3801 if (slot == 0) {
5f39d397
CM
3802 struct btrfs_disk_key disk_key;
3803
3804 btrfs_item_key(leaf, &disk_key, 0);
e089f05c 3805 wret = fixup_low_keys(trans, root, path,
5f39d397 3806 &disk_key, 1);
aa5d6bed
CM
3807 if (wret)
3808 ret = wret;
3809 }
aa5d6bed 3810
74123bd7 3811 /* delete the leaf if it is mostly empty */
a4b6e07d
CM
3812 if (used < BTRFS_LEAF_DATA_SIZE(root) / 4 &&
3813 !trans->transaction->delayed_refs.flushing) {
be0e5c09
CM
3814 /* push_leaf_left fixes the path.
3815 * make sure the path still points to our leaf
3816 * for possible call to del_ptr below
3817 */
4920c9ac 3818 slot = path->slots[1];
5f39d397
CM
3819 extent_buffer_get(leaf);
3820
b9473439 3821 btrfs_set_path_blocking(path);
85e21bac 3822 wret = push_leaf_left(trans, root, path, 1, 1);
54aa1f4d 3823 if (wret < 0 && wret != -ENOSPC)
aa5d6bed 3824 ret = wret;
5f39d397
CM
3825
3826 if (path->nodes[0] == leaf &&
3827 btrfs_header_nritems(leaf)) {
85e21bac 3828 wret = push_leaf_right(trans, root, path, 1, 1);
54aa1f4d 3829 if (wret < 0 && wret != -ENOSPC)
aa5d6bed
CM
3830 ret = wret;
3831 }
5f39d397
CM
3832
3833 if (btrfs_header_nritems(leaf) == 0) {
323ac95b 3834 path->slots[1] = slot;
5d4f98a2 3835 ret = btrfs_del_leaf(trans, root, path, leaf);
323ac95b 3836 BUG_ON(ret);
5f39d397 3837 free_extent_buffer(leaf);
5de08d7d 3838 } else {
925baedd
CM
3839 /* if we're still in the path, make sure
3840 * we're dirty. Otherwise, one of the
3841 * push_leaf functions must have already
3842 * dirtied this buffer
3843 */
3844 if (path->nodes[0] == leaf)
3845 btrfs_mark_buffer_dirty(leaf);
5f39d397 3846 free_extent_buffer(leaf);
be0e5c09 3847 }
d5719762 3848 } else {
5f39d397 3849 btrfs_mark_buffer_dirty(leaf);
be0e5c09
CM
3850 }
3851 }
aa5d6bed 3852 return ret;
be0e5c09
CM
3853}
3854
7bb86316 3855/*
925baedd 3856 * search the tree again to find a leaf with lesser keys
7bb86316
CM
3857 * returns 0 if it found something or 1 if there are no lesser leaves.
3858 * returns < 0 on io errors.
d352ac68
CM
3859 *
3860 * This may release the path, and so you may lose any locks held at the
3861 * time you call it.
7bb86316
CM
3862 */
3863int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
3864{
925baedd
CM
3865 struct btrfs_key key;
3866 struct btrfs_disk_key found_key;
3867 int ret;
7bb86316 3868
925baedd 3869 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
7bb86316 3870
925baedd
CM
3871 if (key.offset > 0)
3872 key.offset--;
3873 else if (key.type > 0)
3874 key.type--;
3875 else if (key.objectid > 0)
3876 key.objectid--;
3877 else
3878 return 1;
7bb86316 3879
925baedd
CM
3880 btrfs_release_path(root, path);
3881 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3882 if (ret < 0)
3883 return ret;
3884 btrfs_item_key(path->nodes[0], &found_key, 0);
3885 ret = comp_keys(&found_key, &key);
3886 if (ret < 0)
3887 return 0;
3888 return 1;
7bb86316
CM
3889}
3890
3f157a2f
CM
3891/*
3892 * A helper function to walk down the tree starting at min_key, and looking
3893 * for nodes or leaves that are either in cache or have a minimum
d352ac68 3894 * transaction id. This is used by the btree defrag code, and tree logging
3f157a2f
CM
3895 *
3896 * This does not cow, but it does stuff the starting key it finds back
3897 * into min_key, so you can call btrfs_search_slot with cow=1 on the
3898 * key and get a writable path.
3899 *
3900 * This does lock as it descends, and path->keep_locks should be set
3901 * to 1 by the caller.
3902 *
3903 * This honors path->lowest_level to prevent descent past a given level
3904 * of the tree.
3905 *
d352ac68
CM
3906 * min_trans indicates the oldest transaction that you are interested
3907 * in walking through. Any nodes or leaves older than min_trans are
3908 * skipped over (without reading them).
3909 *
3f157a2f
CM
3910 * returns zero if something useful was found, < 0 on error and 1 if there
3911 * was nothing in the tree that matched the search criteria.
3912 */
3913int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
e02119d5 3914 struct btrfs_key *max_key,
3f157a2f
CM
3915 struct btrfs_path *path, int cache_only,
3916 u64 min_trans)
3917{
3918 struct extent_buffer *cur;
3919 struct btrfs_key found_key;
3920 int slot;
9652480b 3921 int sret;
3f157a2f
CM
3922 u32 nritems;
3923 int level;
3924 int ret = 1;
3925
934d375b 3926 WARN_ON(!path->keep_locks);
3f157a2f
CM
3927again:
3928 cur = btrfs_lock_root_node(root);
3929 level = btrfs_header_level(cur);
e02119d5 3930 WARN_ON(path->nodes[level]);
3f157a2f
CM
3931 path->nodes[level] = cur;
3932 path->locks[level] = 1;
3933
3934 if (btrfs_header_generation(cur) < min_trans) {
3935 ret = 1;
3936 goto out;
3937 }
d397712b 3938 while (1) {
3f157a2f
CM
3939 nritems = btrfs_header_nritems(cur);
3940 level = btrfs_header_level(cur);
9652480b 3941 sret = bin_search(cur, min_key, level, &slot);
3f157a2f 3942
323ac95b
CM
3943 /* at the lowest level, we're done, setup the path and exit */
3944 if (level == path->lowest_level) {
e02119d5
CM
3945 if (slot >= nritems)
3946 goto find_next_key;
3f157a2f
CM
3947 ret = 0;
3948 path->slots[level] = slot;
3949 btrfs_item_key_to_cpu(cur, &found_key, slot);
3950 goto out;
3951 }
9652480b
Y
3952 if (sret && slot > 0)
3953 slot--;
3f157a2f
CM
3954 /*
3955 * check this node pointer against the cache_only and
3956 * min_trans parameters. If it isn't in cache or is too
3957 * old, skip to the next one.
3958 */
d397712b 3959 while (slot < nritems) {
3f157a2f
CM
3960 u64 blockptr;
3961 u64 gen;
3962 struct extent_buffer *tmp;
e02119d5
CM
3963 struct btrfs_disk_key disk_key;
3964
3f157a2f
CM
3965 blockptr = btrfs_node_blockptr(cur, slot);
3966 gen = btrfs_node_ptr_generation(cur, slot);
3967 if (gen < min_trans) {
3968 slot++;
3969 continue;
3970 }
3971 if (!cache_only)
3972 break;
3973
e02119d5
CM
3974 if (max_key) {
3975 btrfs_node_key(cur, &disk_key, slot);
3976 if (comp_keys(&disk_key, max_key) >= 0) {
3977 ret = 1;
3978 goto out;
3979 }
3980 }
3981
3f157a2f
CM
3982 tmp = btrfs_find_tree_block(root, blockptr,
3983 btrfs_level_size(root, level - 1));
3984
3985 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
3986 free_extent_buffer(tmp);
3987 break;
3988 }
3989 if (tmp)
3990 free_extent_buffer(tmp);
3991 slot++;
3992 }
e02119d5 3993find_next_key:
3f157a2f
CM
3994 /*
3995 * we didn't find a candidate key in this node, walk forward
3996 * and find another one
3997 */
3998 if (slot >= nritems) {
e02119d5 3999 path->slots[level] = slot;
b4ce94de 4000 btrfs_set_path_blocking(path);
e02119d5 4001 sret = btrfs_find_next_key(root, path, min_key, level,
3f157a2f 4002 cache_only, min_trans);
e02119d5 4003 if (sret == 0) {
3f157a2f
CM
4004 btrfs_release_path(root, path);
4005 goto again;
4006 } else {
4007 goto out;
4008 }
4009 }
4010 /* save our key for returning back */
4011 btrfs_node_key_to_cpu(cur, &found_key, slot);
4012 path->slots[level] = slot;
4013 if (level == path->lowest_level) {
4014 ret = 0;
4015 unlock_up(path, level, 1);
4016 goto out;
4017 }
b4ce94de 4018 btrfs_set_path_blocking(path);
3f157a2f
CM
4019 cur = read_node_slot(root, cur, slot);
4020
4021 btrfs_tree_lock(cur);
b4ce94de 4022
3f157a2f
CM
4023 path->locks[level - 1] = 1;
4024 path->nodes[level - 1] = cur;
4025 unlock_up(path, level, 1);
4008c04a 4026 btrfs_clear_path_blocking(path, NULL);
3f157a2f
CM
4027 }
4028out:
4029 if (ret == 0)
4030 memcpy(min_key, &found_key, sizeof(found_key));
b4ce94de 4031 btrfs_set_path_blocking(path);
3f157a2f
CM
4032 return ret;
4033}
4034
4035/*
4036 * this is similar to btrfs_next_leaf, but does not try to preserve
4037 * and fixup the path. It looks for and returns the next key in the
4038 * tree based on the current path and the cache_only and min_trans
4039 * parameters.
4040 *
4041 * 0 is returned if another key is found, < 0 if there are any errors
4042 * and 1 is returned if there are no higher keys in the tree
4043 *
4044 * path->keep_locks should be set to 1 on the search made before
4045 * calling this function.
4046 */
e7a84565 4047int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
3f157a2f
CM
4048 struct btrfs_key *key, int lowest_level,
4049 int cache_only, u64 min_trans)
e7a84565
CM
4050{
4051 int level = lowest_level;
4052 int slot;
4053 struct extent_buffer *c;
4054
934d375b 4055 WARN_ON(!path->keep_locks);
d397712b 4056 while (level < BTRFS_MAX_LEVEL) {
e7a84565
CM
4057 if (!path->nodes[level])
4058 return 1;
4059
4060 slot = path->slots[level] + 1;
4061 c = path->nodes[level];
3f157a2f 4062next:
e7a84565
CM
4063 if (slot >= btrfs_header_nritems(c)) {
4064 level++;
d397712b 4065 if (level == BTRFS_MAX_LEVEL)
e7a84565 4066 return 1;
e7a84565
CM
4067 continue;
4068 }
4069 if (level == 0)
4070 btrfs_item_key_to_cpu(c, key, slot);
3f157a2f
CM
4071 else {
4072 u64 blockptr = btrfs_node_blockptr(c, slot);
4073 u64 gen = btrfs_node_ptr_generation(c, slot);
4074
4075 if (cache_only) {
4076 struct extent_buffer *cur;
4077 cur = btrfs_find_tree_block(root, blockptr,
4078 btrfs_level_size(root, level - 1));
4079 if (!cur || !btrfs_buffer_uptodate(cur, gen)) {
4080 slot++;
4081 if (cur)
4082 free_extent_buffer(cur);
4083 goto next;
4084 }
4085 free_extent_buffer(cur);
4086 }
4087 if (gen < min_trans) {
4088 slot++;
4089 goto next;
4090 }
e7a84565 4091 btrfs_node_key_to_cpu(c, key, slot);
3f157a2f 4092 }
e7a84565
CM
4093 return 0;
4094 }
4095 return 1;
4096}
4097
97571fd0 4098/*
925baedd 4099 * search the tree again to find a leaf with greater keys
0f70abe2
CM
4100 * returns 0 if it found something or 1 if there are no greater leaves.
4101 * returns < 0 on io errors.
97571fd0 4102 */
234b63a0 4103int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
d97e63b6
CM
4104{
4105 int slot;
8e73f275 4106 int level;
5f39d397 4107 struct extent_buffer *c;
8e73f275 4108 struct extent_buffer *next;
925baedd
CM
4109 struct btrfs_key key;
4110 u32 nritems;
4111 int ret;
8e73f275
CM
4112 int old_spinning = path->leave_spinning;
4113 int force_blocking = 0;
925baedd
CM
4114
4115 nritems = btrfs_header_nritems(path->nodes[0]);
d397712b 4116 if (nritems == 0)
925baedd 4117 return 1;
925baedd 4118
8e73f275
CM
4119 /*
4120 * we take the blocks in an order that upsets lockdep. Using
4121 * blocking mode is the only way around it.
4122 */
4123#ifdef CONFIG_DEBUG_LOCK_ALLOC
4124 force_blocking = 1;
4125#endif
925baedd 4126
8e73f275
CM
4127 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4128again:
4129 level = 1;
4130 next = NULL;
925baedd 4131 btrfs_release_path(root, path);
8e73f275 4132
a2135011 4133 path->keep_locks = 1;
8e73f275
CM
4134
4135 if (!force_blocking)
4136 path->leave_spinning = 1;
4137
925baedd
CM
4138 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4139 path->keep_locks = 0;
4140
4141 if (ret < 0)
4142 return ret;
4143
a2135011 4144 nritems = btrfs_header_nritems(path->nodes[0]);
168fd7d2
CM
4145 /*
4146 * by releasing the path above we dropped all our locks. A balance
4147 * could have added more items next to the key that used to be
4148 * at the very end of the block. So, check again here and
4149 * advance the path if there are now more items available.
4150 */
a2135011 4151 if (nritems > 0 && path->slots[0] < nritems - 1) {
168fd7d2 4152 path->slots[0]++;
8e73f275 4153 ret = 0;
925baedd
CM
4154 goto done;
4155 }
d97e63b6 4156
d397712b 4157 while (level < BTRFS_MAX_LEVEL) {
8e73f275
CM
4158 if (!path->nodes[level]) {
4159 ret = 1;
4160 goto done;
4161 }
5f39d397 4162
d97e63b6
CM
4163 slot = path->slots[level] + 1;
4164 c = path->nodes[level];
5f39d397 4165 if (slot >= btrfs_header_nritems(c)) {
d97e63b6 4166 level++;
8e73f275
CM
4167 if (level == BTRFS_MAX_LEVEL) {
4168 ret = 1;
4169 goto done;
4170 }
d97e63b6
CM
4171 continue;
4172 }
5f39d397 4173
925baedd
CM
4174 if (next) {
4175 btrfs_tree_unlock(next);
5f39d397 4176 free_extent_buffer(next);
925baedd 4177 }
5f39d397 4178
8e73f275
CM
4179 next = c;
4180 ret = read_block_for_search(NULL, root, path, &next, level,
4181 slot, &key);
4182 if (ret == -EAGAIN)
4183 goto again;
5f39d397 4184
76a05b35
CM
4185 if (ret < 0) {
4186 btrfs_release_path(root, path);
4187 goto done;
4188 }
4189
5cd57b2c 4190 if (!path->skip_locking) {
8e73f275
CM
4191 ret = btrfs_try_spin_lock(next);
4192 if (!ret) {
4193 btrfs_set_path_blocking(path);
4194 btrfs_tree_lock(next);
4195 if (!force_blocking)
4196 btrfs_clear_path_blocking(path, next);
4197 }
4198 if (force_blocking)
4199 btrfs_set_lock_blocking(next);
5cd57b2c 4200 }
d97e63b6
CM
4201 break;
4202 }
4203 path->slots[level] = slot;
d397712b 4204 while (1) {
d97e63b6
CM
4205 level--;
4206 c = path->nodes[level];
925baedd
CM
4207 if (path->locks[level])
4208 btrfs_tree_unlock(c);
8e73f275 4209
5f39d397 4210 free_extent_buffer(c);
d97e63b6
CM
4211 path->nodes[level] = next;
4212 path->slots[level] = 0;
a74a4b97
CM
4213 if (!path->skip_locking)
4214 path->locks[level] = 1;
8e73f275 4215
d97e63b6
CM
4216 if (!level)
4217 break;
b4ce94de 4218
8e73f275
CM
4219 ret = read_block_for_search(NULL, root, path, &next, level,
4220 0, &key);
4221 if (ret == -EAGAIN)
4222 goto again;
4223
76a05b35
CM
4224 if (ret < 0) {
4225 btrfs_release_path(root, path);
4226 goto done;
4227 }
4228
5cd57b2c 4229 if (!path->skip_locking) {
b9447ef8 4230 btrfs_assert_tree_locked(path->nodes[level]);
8e73f275
CM
4231 ret = btrfs_try_spin_lock(next);
4232 if (!ret) {
4233 btrfs_set_path_blocking(path);
4234 btrfs_tree_lock(next);
4235 if (!force_blocking)
4236 btrfs_clear_path_blocking(path, next);
4237 }
4238 if (force_blocking)
4239 btrfs_set_lock_blocking(next);
5cd57b2c 4240 }
d97e63b6 4241 }
8e73f275 4242 ret = 0;
925baedd
CM
4243done:
4244 unlock_up(path, 0, 1);
8e73f275
CM
4245 path->leave_spinning = old_spinning;
4246 if (!old_spinning)
4247 btrfs_set_path_blocking(path);
4248
4249 return ret;
d97e63b6 4250}
0b86a832 4251
3f157a2f
CM
4252/*
4253 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4254 * searching until it gets past min_objectid or finds an item of 'type'
4255 *
4256 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4257 */
0b86a832
CM
4258int btrfs_previous_item(struct btrfs_root *root,
4259 struct btrfs_path *path, u64 min_objectid,
4260 int type)
4261{
4262 struct btrfs_key found_key;
4263 struct extent_buffer *leaf;
e02119d5 4264 u32 nritems;
0b86a832
CM
4265 int ret;
4266
d397712b 4267 while (1) {
0b86a832 4268 if (path->slots[0] == 0) {
b4ce94de 4269 btrfs_set_path_blocking(path);
0b86a832
CM
4270 ret = btrfs_prev_leaf(root, path);
4271 if (ret != 0)
4272 return ret;
4273 } else {
4274 path->slots[0]--;
4275 }
4276 leaf = path->nodes[0];
e02119d5
CM
4277 nritems = btrfs_header_nritems(leaf);
4278 if (nritems == 0)
4279 return 1;
4280 if (path->slots[0] == nritems)
4281 path->slots[0]--;
4282
0b86a832
CM
4283 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4284 if (found_key.type == type)
4285 return 0;
e02119d5
CM
4286 if (found_key.objectid < min_objectid)
4287 break;
4288 if (found_key.objectid == min_objectid &&
4289 found_key.type < type)
4290 break;
0b86a832
CM
4291 }
4292 return 1;
4293}