Btrfs: fix deadlock when throttling transactions
[linux-block.git] / fs / btrfs / ctree.c
CommitLineData
6cbd5570 1/*
d352ac68 2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
6cbd5570
CM
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
a6b6e75e 19#include <linux/sched.h>
5a0e3ad6 20#include <linux/slab.h>
eb60ceac
CM
21#include "ctree.h"
22#include "disk-io.h"
7f5c1516 23#include "transaction.h"
5f39d397 24#include "print-tree.h"
925baedd 25#include "locking.h"
9a8dd150 26
e089f05c
CM
27static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
28 *root, struct btrfs_path *path, int level);
29static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
d4dbff95 30 *root, struct btrfs_key *ins_key,
cc0c5538 31 struct btrfs_path *path, int data_size, int extend);
5f39d397
CM
32static int push_node_left(struct btrfs_trans_handle *trans,
33 struct btrfs_root *root, struct extent_buffer *dst,
971a1f66 34 struct extent_buffer *src, int empty);
5f39d397
CM
35static int balance_node_right(struct btrfs_trans_handle *trans,
36 struct btrfs_root *root,
37 struct extent_buffer *dst_buf,
38 struct extent_buffer *src_buf);
e089f05c
CM
39static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
40 struct btrfs_path *path, int level, int slot);
d97e63b6 41
df24a2b9 42struct btrfs_path *btrfs_alloc_path(void)
2c90e5d6 43{
df24a2b9 44 struct btrfs_path *path;
e00f7308 45 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
df24a2b9 46 return path;
2c90e5d6
CM
47}
48
b4ce94de
CM
49/*
50 * set all locked nodes in the path to blocking locks. This should
51 * be done before scheduling
52 */
53noinline void btrfs_set_path_blocking(struct btrfs_path *p)
54{
55 int i;
56 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
57 if (p->nodes[i] && p->locks[i])
58 btrfs_set_lock_blocking(p->nodes[i]);
59 }
60}
61
62/*
63 * reset all the locked nodes in the patch to spinning locks.
4008c04a
CM
64 *
65 * held is used to keep lockdep happy, when lockdep is enabled
66 * we set held to a blocking lock before we go around and
67 * retake all the spinlocks in the path. You can safely use NULL
68 * for held
b4ce94de 69 */
4008c04a
CM
70noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
71 struct extent_buffer *held)
b4ce94de
CM
72{
73 int i;
4008c04a
CM
74
75#ifdef CONFIG_DEBUG_LOCK_ALLOC
76 /* lockdep really cares that we take all of these spinlocks
77 * in the right order. If any of the locks in the path are not
78 * currently blocking, it is going to complain. So, make really
79 * really sure by forcing the path to blocking before we clear
80 * the path blocking.
81 */
82 if (held)
83 btrfs_set_lock_blocking(held);
84 btrfs_set_path_blocking(p);
85#endif
86
87 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
b4ce94de
CM
88 if (p->nodes[i] && p->locks[i])
89 btrfs_clear_lock_blocking(p->nodes[i]);
90 }
4008c04a
CM
91
92#ifdef CONFIG_DEBUG_LOCK_ALLOC
93 if (held)
94 btrfs_clear_lock_blocking(held);
95#endif
b4ce94de
CM
96}
97
d352ac68 98/* this also releases the path */
df24a2b9 99void btrfs_free_path(struct btrfs_path *p)
be0e5c09 100{
ff175d57
JJ
101 if (!p)
102 return;
b3b4aa74 103 btrfs_release_path(p);
df24a2b9 104 kmem_cache_free(btrfs_path_cachep, p);
be0e5c09
CM
105}
106
d352ac68
CM
107/*
108 * path release drops references on the extent buffers in the path
109 * and it drops any locks held by this path
110 *
111 * It is safe to call this on paths that no locks or extent buffers held.
112 */
b3b4aa74 113noinline void btrfs_release_path(struct btrfs_path *p)
eb60ceac
CM
114{
115 int i;
a2135011 116
234b63a0 117 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3f157a2f 118 p->slots[i] = 0;
eb60ceac 119 if (!p->nodes[i])
925baedd
CM
120 continue;
121 if (p->locks[i]) {
122 btrfs_tree_unlock(p->nodes[i]);
123 p->locks[i] = 0;
124 }
5f39d397 125 free_extent_buffer(p->nodes[i]);
3f157a2f 126 p->nodes[i] = NULL;
eb60ceac
CM
127 }
128}
129
d352ac68
CM
130/*
131 * safely gets a reference on the root node of a tree. A lock
132 * is not taken, so a concurrent writer may put a different node
133 * at the root of the tree. See btrfs_lock_root_node for the
134 * looping required.
135 *
136 * The extent buffer returned by this has a reference taken, so
137 * it won't disappear. It may stop being the root of the tree
138 * at any time because there are no locks held.
139 */
925baedd
CM
140struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
141{
142 struct extent_buffer *eb;
240f62c8
CM
143
144 rcu_read_lock();
145 eb = rcu_dereference(root->node);
925baedd 146 extent_buffer_get(eb);
240f62c8 147 rcu_read_unlock();
925baedd
CM
148 return eb;
149}
150
d352ac68
CM
151/* loop around taking references on and locking the root node of the
152 * tree until you end up with a lock on the root. A locked buffer
153 * is returned, with a reference held.
154 */
925baedd
CM
155struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
156{
157 struct extent_buffer *eb;
158
d397712b 159 while (1) {
925baedd
CM
160 eb = btrfs_root_node(root);
161 btrfs_tree_lock(eb);
240f62c8 162 if (eb == root->node)
925baedd 163 break;
925baedd
CM
164 btrfs_tree_unlock(eb);
165 free_extent_buffer(eb);
166 }
167 return eb;
168}
169
d352ac68
CM
170/* cowonly root (everything not a reference counted cow subvolume), just get
171 * put onto a simple dirty list. transaction.c walks this to make sure they
172 * get properly updated on disk.
173 */
0b86a832
CM
174static void add_root_to_dirty_list(struct btrfs_root *root)
175{
176 if (root->track_dirty && list_empty(&root->dirty_list)) {
177 list_add(&root->dirty_list,
178 &root->fs_info->dirty_cowonly_roots);
179 }
180}
181
d352ac68
CM
182/*
183 * used by snapshot creation to make a copy of a root for a tree with
184 * a given objectid. The buffer with the new root node is returned in
185 * cow_ret, and this func returns zero on success or a negative error code.
186 */
be20aa9d
CM
187int btrfs_copy_root(struct btrfs_trans_handle *trans,
188 struct btrfs_root *root,
189 struct extent_buffer *buf,
190 struct extent_buffer **cow_ret, u64 new_root_objectid)
191{
192 struct extent_buffer *cow;
be20aa9d
CM
193 int ret = 0;
194 int level;
5d4f98a2 195 struct btrfs_disk_key disk_key;
be20aa9d
CM
196
197 WARN_ON(root->ref_cows && trans->transid !=
198 root->fs_info->running_transaction->transid);
199 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
200
201 level = btrfs_header_level(buf);
5d4f98a2
YZ
202 if (level == 0)
203 btrfs_item_key(buf, &disk_key, 0);
204 else
205 btrfs_node_key(buf, &disk_key, 0);
31840ae1 206
5d4f98a2
YZ
207 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
208 new_root_objectid, &disk_key, level,
209 buf->start, 0);
210 if (IS_ERR(cow))
be20aa9d
CM
211 return PTR_ERR(cow);
212
213 copy_extent_buffer(cow, buf, 0, 0, cow->len);
214 btrfs_set_header_bytenr(cow, cow->start);
215 btrfs_set_header_generation(cow, trans->transid);
5d4f98a2
YZ
216 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
217 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
218 BTRFS_HEADER_FLAG_RELOC);
219 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
220 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
221 else
222 btrfs_set_header_owner(cow, new_root_objectid);
be20aa9d 223
2b82032c
YZ
224 write_extent_buffer(cow, root->fs_info->fsid,
225 (unsigned long)btrfs_header_fsid(cow),
226 BTRFS_FSID_SIZE);
227
be20aa9d 228 WARN_ON(btrfs_header_generation(buf) > trans->transid);
5d4f98a2
YZ
229 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
230 ret = btrfs_inc_ref(trans, root, cow, 1);
231 else
232 ret = btrfs_inc_ref(trans, root, cow, 0);
4aec2b52 233
be20aa9d
CM
234 if (ret)
235 return ret;
236
237 btrfs_mark_buffer_dirty(cow);
238 *cow_ret = cow;
239 return 0;
240}
241
5d4f98a2
YZ
242/*
243 * check if the tree block can be shared by multiple trees
244 */
245int btrfs_block_can_be_shared(struct btrfs_root *root,
246 struct extent_buffer *buf)
247{
248 /*
249 * Tree blocks not in refernece counted trees and tree roots
250 * are never shared. If a block was allocated after the last
251 * snapshot and the block was not allocated by tree relocation,
252 * we know the block is not shared.
253 */
254 if (root->ref_cows &&
255 buf != root->node && buf != root->commit_root &&
256 (btrfs_header_generation(buf) <=
257 btrfs_root_last_snapshot(&root->root_item) ||
258 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
259 return 1;
260#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
261 if (root->ref_cows &&
262 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
263 return 1;
264#endif
265 return 0;
266}
267
268static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
269 struct btrfs_root *root,
270 struct extent_buffer *buf,
f0486c68
YZ
271 struct extent_buffer *cow,
272 int *last_ref)
5d4f98a2
YZ
273{
274 u64 refs;
275 u64 owner;
276 u64 flags;
277 u64 new_flags = 0;
278 int ret;
279
280 /*
281 * Backrefs update rules:
282 *
283 * Always use full backrefs for extent pointers in tree block
284 * allocated by tree relocation.
285 *
286 * If a shared tree block is no longer referenced by its owner
287 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
288 * use full backrefs for extent pointers in tree block.
289 *
290 * If a tree block is been relocating
291 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
292 * use full backrefs for extent pointers in tree block.
293 * The reason for this is some operations (such as drop tree)
294 * are only allowed for blocks use full backrefs.
295 */
296
297 if (btrfs_block_can_be_shared(root, buf)) {
298 ret = btrfs_lookup_extent_info(trans, root, buf->start,
299 buf->len, &refs, &flags);
300 BUG_ON(ret);
301 BUG_ON(refs == 0);
302 } else {
303 refs = 1;
304 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
305 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
306 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
307 else
308 flags = 0;
309 }
310
311 owner = btrfs_header_owner(buf);
312 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
313 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
314
315 if (refs > 1) {
316 if ((owner == root->root_key.objectid ||
317 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
318 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
319 ret = btrfs_inc_ref(trans, root, buf, 1);
320 BUG_ON(ret);
321
322 if (root->root_key.objectid ==
323 BTRFS_TREE_RELOC_OBJECTID) {
324 ret = btrfs_dec_ref(trans, root, buf, 0);
325 BUG_ON(ret);
326 ret = btrfs_inc_ref(trans, root, cow, 1);
327 BUG_ON(ret);
328 }
329 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
330 } else {
331
332 if (root->root_key.objectid ==
333 BTRFS_TREE_RELOC_OBJECTID)
334 ret = btrfs_inc_ref(trans, root, cow, 1);
335 else
336 ret = btrfs_inc_ref(trans, root, cow, 0);
337 BUG_ON(ret);
338 }
339 if (new_flags != 0) {
340 ret = btrfs_set_disk_extent_flags(trans, root,
341 buf->start,
342 buf->len,
343 new_flags, 0);
344 BUG_ON(ret);
345 }
346 } else {
347 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
348 if (root->root_key.objectid ==
349 BTRFS_TREE_RELOC_OBJECTID)
350 ret = btrfs_inc_ref(trans, root, cow, 1);
351 else
352 ret = btrfs_inc_ref(trans, root, cow, 0);
353 BUG_ON(ret);
354 ret = btrfs_dec_ref(trans, root, buf, 1);
355 BUG_ON(ret);
356 }
357 clean_tree_block(trans, root, buf);
f0486c68 358 *last_ref = 1;
5d4f98a2
YZ
359 }
360 return 0;
361}
362
d352ac68 363/*
d397712b
CM
364 * does the dirty work in cow of a single block. The parent block (if
365 * supplied) is updated to point to the new cow copy. The new buffer is marked
366 * dirty and returned locked. If you modify the block it needs to be marked
367 * dirty again.
d352ac68
CM
368 *
369 * search_start -- an allocation hint for the new block
370 *
d397712b
CM
371 * empty_size -- a hint that you plan on doing more cow. This is the size in
372 * bytes the allocator should try to find free next to the block it returns.
373 * This is just a hint and may be ignored by the allocator.
d352ac68 374 */
d397712b 375static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
5f39d397
CM
376 struct btrfs_root *root,
377 struct extent_buffer *buf,
378 struct extent_buffer *parent, int parent_slot,
379 struct extent_buffer **cow_ret,
9fa8cfe7 380 u64 search_start, u64 empty_size)
02217ed2 381{
5d4f98a2 382 struct btrfs_disk_key disk_key;
5f39d397 383 struct extent_buffer *cow;
7bb86316 384 int level;
f0486c68 385 int last_ref = 0;
925baedd 386 int unlock_orig = 0;
5d4f98a2 387 u64 parent_start;
7bb86316 388
925baedd
CM
389 if (*cow_ret == buf)
390 unlock_orig = 1;
391
b9447ef8 392 btrfs_assert_tree_locked(buf);
925baedd 393
7bb86316
CM
394 WARN_ON(root->ref_cows && trans->transid !=
395 root->fs_info->running_transaction->transid);
6702ed49 396 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
5f39d397 397
7bb86316 398 level = btrfs_header_level(buf);
31840ae1 399
5d4f98a2
YZ
400 if (level == 0)
401 btrfs_item_key(buf, &disk_key, 0);
402 else
403 btrfs_node_key(buf, &disk_key, 0);
404
405 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
406 if (parent)
407 parent_start = parent->start;
408 else
409 parent_start = 0;
410 } else
411 parent_start = 0;
412
413 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
414 root->root_key.objectid, &disk_key,
415 level, search_start, empty_size);
54aa1f4d
CM
416 if (IS_ERR(cow))
417 return PTR_ERR(cow);
6702ed49 418
b4ce94de
CM
419 /* cow is set to blocking by btrfs_init_new_buffer */
420
5f39d397 421 copy_extent_buffer(cow, buf, 0, 0, cow->len);
db94535d 422 btrfs_set_header_bytenr(cow, cow->start);
5f39d397 423 btrfs_set_header_generation(cow, trans->transid);
5d4f98a2
YZ
424 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
425 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
426 BTRFS_HEADER_FLAG_RELOC);
427 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
428 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
429 else
430 btrfs_set_header_owner(cow, root->root_key.objectid);
6702ed49 431
2b82032c
YZ
432 write_extent_buffer(cow, root->fs_info->fsid,
433 (unsigned long)btrfs_header_fsid(cow),
434 BTRFS_FSID_SIZE);
435
f0486c68 436 update_ref_for_cow(trans, root, buf, cow, &last_ref);
1a40e23b 437
3fd0a558
YZ
438 if (root->ref_cows)
439 btrfs_reloc_cow_block(trans, root, buf, cow);
440
02217ed2 441 if (buf == root->node) {
925baedd 442 WARN_ON(parent && parent != buf);
5d4f98a2
YZ
443 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
444 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
445 parent_start = buf->start;
446 else
447 parent_start = 0;
925baedd 448
5f39d397 449 extent_buffer_get(cow);
240f62c8 450 rcu_assign_pointer(root->node, cow);
925baedd 451
f0486c68
YZ
452 btrfs_free_tree_block(trans, root, buf, parent_start,
453 last_ref);
5f39d397 454 free_extent_buffer(buf);
0b86a832 455 add_root_to_dirty_list(root);
02217ed2 456 } else {
5d4f98a2
YZ
457 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
458 parent_start = parent->start;
459 else
460 parent_start = 0;
461
462 WARN_ON(trans->transid != btrfs_header_generation(parent));
5f39d397 463 btrfs_set_node_blockptr(parent, parent_slot,
db94535d 464 cow->start);
74493f7a
CM
465 btrfs_set_node_ptr_generation(parent, parent_slot,
466 trans->transid);
d6025579 467 btrfs_mark_buffer_dirty(parent);
f0486c68
YZ
468 btrfs_free_tree_block(trans, root, buf, parent_start,
469 last_ref);
02217ed2 470 }
925baedd
CM
471 if (unlock_orig)
472 btrfs_tree_unlock(buf);
5f39d397 473 free_extent_buffer(buf);
ccd467d6 474 btrfs_mark_buffer_dirty(cow);
2c90e5d6 475 *cow_ret = cow;
02217ed2
CM
476 return 0;
477}
478
5d4f98a2
YZ
479static inline int should_cow_block(struct btrfs_trans_handle *trans,
480 struct btrfs_root *root,
481 struct extent_buffer *buf)
482{
483 if (btrfs_header_generation(buf) == trans->transid &&
484 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
485 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
486 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
487 return 0;
488 return 1;
489}
490
d352ac68
CM
491/*
492 * cows a single block, see __btrfs_cow_block for the real work.
493 * This version of it has extra checks so that a block isn't cow'd more than
494 * once per transaction, as long as it hasn't been written yet
495 */
d397712b 496noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
5f39d397
CM
497 struct btrfs_root *root, struct extent_buffer *buf,
498 struct extent_buffer *parent, int parent_slot,
9fa8cfe7 499 struct extent_buffer **cow_ret)
6702ed49
CM
500{
501 u64 search_start;
f510cfec 502 int ret;
dc17ff8f 503
6702ed49 504 if (trans->transaction != root->fs_info->running_transaction) {
d397712b
CM
505 printk(KERN_CRIT "trans %llu running %llu\n",
506 (unsigned long long)trans->transid,
507 (unsigned long long)
6702ed49
CM
508 root->fs_info->running_transaction->transid);
509 WARN_ON(1);
510 }
511 if (trans->transid != root->fs_info->generation) {
d397712b
CM
512 printk(KERN_CRIT "trans %llu running %llu\n",
513 (unsigned long long)trans->transid,
514 (unsigned long long)root->fs_info->generation);
6702ed49
CM
515 WARN_ON(1);
516 }
dc17ff8f 517
5d4f98a2 518 if (!should_cow_block(trans, root, buf)) {
6702ed49
CM
519 *cow_ret = buf;
520 return 0;
521 }
c487685d 522
0b86a832 523 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
b4ce94de
CM
524
525 if (parent)
526 btrfs_set_lock_blocking(parent);
527 btrfs_set_lock_blocking(buf);
528
f510cfec 529 ret = __btrfs_cow_block(trans, root, buf, parent,
9fa8cfe7 530 parent_slot, cow_ret, search_start, 0);
1abe9b8a 531
532 trace_btrfs_cow_block(root, buf, *cow_ret);
533
f510cfec 534 return ret;
6702ed49
CM
535}
536
d352ac68
CM
537/*
538 * helper function for defrag to decide if two blocks pointed to by a
539 * node are actually close by
540 */
6b80053d 541static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
6702ed49 542{
6b80053d 543 if (blocknr < other && other - (blocknr + blocksize) < 32768)
6702ed49 544 return 1;
6b80053d 545 if (blocknr > other && blocknr - (other + blocksize) < 32768)
6702ed49
CM
546 return 1;
547 return 0;
548}
549
081e9573
CM
550/*
551 * compare two keys in a memcmp fashion
552 */
553static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
554{
555 struct btrfs_key k1;
556
557 btrfs_disk_key_to_cpu(&k1, disk);
558
20736aba 559 return btrfs_comp_cpu_keys(&k1, k2);
081e9573
CM
560}
561
f3465ca4
JB
562/*
563 * same as comp_keys only with two btrfs_key's
564 */
5d4f98a2 565int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
f3465ca4
JB
566{
567 if (k1->objectid > k2->objectid)
568 return 1;
569 if (k1->objectid < k2->objectid)
570 return -1;
571 if (k1->type > k2->type)
572 return 1;
573 if (k1->type < k2->type)
574 return -1;
575 if (k1->offset > k2->offset)
576 return 1;
577 if (k1->offset < k2->offset)
578 return -1;
579 return 0;
580}
081e9573 581
d352ac68
CM
582/*
583 * this is used by the defrag code to go through all the
584 * leaves pointed to by a node and reallocate them so that
585 * disk order is close to key order
586 */
6702ed49 587int btrfs_realloc_node(struct btrfs_trans_handle *trans,
5f39d397 588 struct btrfs_root *root, struct extent_buffer *parent,
a6b6e75e
CM
589 int start_slot, int cache_only, u64 *last_ret,
590 struct btrfs_key *progress)
6702ed49 591{
6b80053d 592 struct extent_buffer *cur;
6702ed49 593 u64 blocknr;
ca7a79ad 594 u64 gen;
e9d0b13b
CM
595 u64 search_start = *last_ret;
596 u64 last_block = 0;
6702ed49
CM
597 u64 other;
598 u32 parent_nritems;
6702ed49
CM
599 int end_slot;
600 int i;
601 int err = 0;
f2183bde 602 int parent_level;
6b80053d
CM
603 int uptodate;
604 u32 blocksize;
081e9573
CM
605 int progress_passed = 0;
606 struct btrfs_disk_key disk_key;
6702ed49 607
5708b959
CM
608 parent_level = btrfs_header_level(parent);
609 if (cache_only && parent_level != 1)
610 return 0;
611
d397712b 612 if (trans->transaction != root->fs_info->running_transaction)
6702ed49 613 WARN_ON(1);
d397712b 614 if (trans->transid != root->fs_info->generation)
6702ed49 615 WARN_ON(1);
86479a04 616
6b80053d 617 parent_nritems = btrfs_header_nritems(parent);
6b80053d 618 blocksize = btrfs_level_size(root, parent_level - 1);
6702ed49
CM
619 end_slot = parent_nritems;
620
621 if (parent_nritems == 1)
622 return 0;
623
b4ce94de
CM
624 btrfs_set_lock_blocking(parent);
625
6702ed49
CM
626 for (i = start_slot; i < end_slot; i++) {
627 int close = 1;
a6b6e75e 628
081e9573
CM
629 btrfs_node_key(parent, &disk_key, i);
630 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
631 continue;
632
633 progress_passed = 1;
6b80053d 634 blocknr = btrfs_node_blockptr(parent, i);
ca7a79ad 635 gen = btrfs_node_ptr_generation(parent, i);
e9d0b13b
CM
636 if (last_block == 0)
637 last_block = blocknr;
5708b959 638
6702ed49 639 if (i > 0) {
6b80053d
CM
640 other = btrfs_node_blockptr(parent, i - 1);
641 close = close_blocks(blocknr, other, blocksize);
6702ed49 642 }
0ef3e66b 643 if (!close && i < end_slot - 2) {
6b80053d
CM
644 other = btrfs_node_blockptr(parent, i + 1);
645 close = close_blocks(blocknr, other, blocksize);
6702ed49 646 }
e9d0b13b
CM
647 if (close) {
648 last_block = blocknr;
6702ed49 649 continue;
e9d0b13b 650 }
6702ed49 651
6b80053d
CM
652 cur = btrfs_find_tree_block(root, blocknr, blocksize);
653 if (cur)
1259ab75 654 uptodate = btrfs_buffer_uptodate(cur, gen);
6b80053d
CM
655 else
656 uptodate = 0;
5708b959 657 if (!cur || !uptodate) {
6702ed49 658 if (cache_only) {
6b80053d 659 free_extent_buffer(cur);
6702ed49
CM
660 continue;
661 }
6b80053d
CM
662 if (!cur) {
663 cur = read_tree_block(root, blocknr,
ca7a79ad 664 blocksize, gen);
97d9a8a4
TI
665 if (!cur)
666 return -EIO;
6b80053d 667 } else if (!uptodate) {
ca7a79ad 668 btrfs_read_buffer(cur, gen);
f2183bde 669 }
6702ed49 670 }
e9d0b13b 671 if (search_start == 0)
6b80053d 672 search_start = last_block;
e9d0b13b 673
e7a84565 674 btrfs_tree_lock(cur);
b4ce94de 675 btrfs_set_lock_blocking(cur);
6b80053d 676 err = __btrfs_cow_block(trans, root, cur, parent, i,
e7a84565 677 &cur, search_start,
6b80053d 678 min(16 * blocksize,
9fa8cfe7 679 (end_slot - i) * blocksize));
252c38f0 680 if (err) {
e7a84565 681 btrfs_tree_unlock(cur);
6b80053d 682 free_extent_buffer(cur);
6702ed49 683 break;
252c38f0 684 }
e7a84565
CM
685 search_start = cur->start;
686 last_block = cur->start;
f2183bde 687 *last_ret = search_start;
e7a84565
CM
688 btrfs_tree_unlock(cur);
689 free_extent_buffer(cur);
6702ed49
CM
690 }
691 return err;
692}
693
74123bd7
CM
694/*
695 * The leaf data grows from end-to-front in the node.
696 * this returns the address of the start of the last item,
697 * which is the stop of the leaf data stack
698 */
123abc88 699static inline unsigned int leaf_data_end(struct btrfs_root *root,
5f39d397 700 struct extent_buffer *leaf)
be0e5c09 701{
5f39d397 702 u32 nr = btrfs_header_nritems(leaf);
be0e5c09 703 if (nr == 0)
123abc88 704 return BTRFS_LEAF_DATA_SIZE(root);
5f39d397 705 return btrfs_item_offset_nr(leaf, nr - 1);
be0e5c09
CM
706}
707
aa5d6bed 708
74123bd7 709/*
5f39d397
CM
710 * search for key in the extent_buffer. The items start at offset p,
711 * and they are item_size apart. There are 'max' items in p.
712 *
74123bd7
CM
713 * the slot in the array is returned via slot, and it points to
714 * the place where you would insert key if it is not found in
715 * the array.
716 *
717 * slot may point to max if the key is bigger than all of the keys
718 */
e02119d5
CM
719static noinline int generic_bin_search(struct extent_buffer *eb,
720 unsigned long p,
721 int item_size, struct btrfs_key *key,
722 int max, int *slot)
be0e5c09
CM
723{
724 int low = 0;
725 int high = max;
726 int mid;
727 int ret;
479965d6 728 struct btrfs_disk_key *tmp = NULL;
5f39d397
CM
729 struct btrfs_disk_key unaligned;
730 unsigned long offset;
5f39d397
CM
731 char *kaddr = NULL;
732 unsigned long map_start = 0;
733 unsigned long map_len = 0;
479965d6 734 int err;
be0e5c09 735
d397712b 736 while (low < high) {
be0e5c09 737 mid = (low + high) / 2;
5f39d397
CM
738 offset = p + mid * item_size;
739
a6591715 740 if (!kaddr || offset < map_start ||
5f39d397
CM
741 (offset + sizeof(struct btrfs_disk_key)) >
742 map_start + map_len) {
934d375b
CM
743
744 err = map_private_extent_buffer(eb, offset,
479965d6 745 sizeof(struct btrfs_disk_key),
a6591715 746 &kaddr, &map_start, &map_len);
479965d6
CM
747
748 if (!err) {
749 tmp = (struct btrfs_disk_key *)(kaddr + offset -
750 map_start);
751 } else {
752 read_extent_buffer(eb, &unaligned,
753 offset, sizeof(unaligned));
754 tmp = &unaligned;
755 }
5f39d397 756
5f39d397
CM
757 } else {
758 tmp = (struct btrfs_disk_key *)(kaddr + offset -
759 map_start);
760 }
be0e5c09
CM
761 ret = comp_keys(tmp, key);
762
763 if (ret < 0)
764 low = mid + 1;
765 else if (ret > 0)
766 high = mid;
767 else {
768 *slot = mid;
769 return 0;
770 }
771 }
772 *slot = low;
773 return 1;
774}
775
97571fd0
CM
776/*
777 * simple bin_search frontend that does the right thing for
778 * leaves vs nodes
779 */
5f39d397
CM
780static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
781 int level, int *slot)
be0e5c09 782{
5f39d397
CM
783 if (level == 0) {
784 return generic_bin_search(eb,
785 offsetof(struct btrfs_leaf, items),
0783fcfc 786 sizeof(struct btrfs_item),
5f39d397 787 key, btrfs_header_nritems(eb),
7518a238 788 slot);
be0e5c09 789 } else {
5f39d397
CM
790 return generic_bin_search(eb,
791 offsetof(struct btrfs_node, ptrs),
123abc88 792 sizeof(struct btrfs_key_ptr),
5f39d397 793 key, btrfs_header_nritems(eb),
7518a238 794 slot);
be0e5c09
CM
795 }
796 return -1;
797}
798
5d4f98a2
YZ
799int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
800 int level, int *slot)
801{
802 return bin_search(eb, key, level, slot);
803}
804
f0486c68
YZ
805static void root_add_used(struct btrfs_root *root, u32 size)
806{
807 spin_lock(&root->accounting_lock);
808 btrfs_set_root_used(&root->root_item,
809 btrfs_root_used(&root->root_item) + size);
810 spin_unlock(&root->accounting_lock);
811}
812
813static void root_sub_used(struct btrfs_root *root, u32 size)
814{
815 spin_lock(&root->accounting_lock);
816 btrfs_set_root_used(&root->root_item,
817 btrfs_root_used(&root->root_item) - size);
818 spin_unlock(&root->accounting_lock);
819}
820
d352ac68
CM
821/* given a node and slot number, this reads the blocks it points to. The
822 * extent buffer is returned with a reference taken (but unlocked).
823 * NULL is returned on error.
824 */
e02119d5 825static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
5f39d397 826 struct extent_buffer *parent, int slot)
bb803951 827{
ca7a79ad 828 int level = btrfs_header_level(parent);
bb803951
CM
829 if (slot < 0)
830 return NULL;
5f39d397 831 if (slot >= btrfs_header_nritems(parent))
bb803951 832 return NULL;
ca7a79ad
CM
833
834 BUG_ON(level == 0);
835
db94535d 836 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
ca7a79ad
CM
837 btrfs_level_size(root, level - 1),
838 btrfs_node_ptr_generation(parent, slot));
bb803951
CM
839}
840
d352ac68
CM
841/*
842 * node level balancing, used to make sure nodes are in proper order for
843 * item deletion. We balance from the top down, so we have to make sure
844 * that a deletion won't leave an node completely empty later on.
845 */
e02119d5 846static noinline int balance_level(struct btrfs_trans_handle *trans,
98ed5174
CM
847 struct btrfs_root *root,
848 struct btrfs_path *path, int level)
bb803951 849{
5f39d397
CM
850 struct extent_buffer *right = NULL;
851 struct extent_buffer *mid;
852 struct extent_buffer *left = NULL;
853 struct extent_buffer *parent = NULL;
bb803951
CM
854 int ret = 0;
855 int wret;
856 int pslot;
bb803951 857 int orig_slot = path->slots[level];
79f95c82 858 u64 orig_ptr;
bb803951
CM
859
860 if (level == 0)
861 return 0;
862
5f39d397 863 mid = path->nodes[level];
b4ce94de 864
925baedd 865 WARN_ON(!path->locks[level]);
7bb86316
CM
866 WARN_ON(btrfs_header_generation(mid) != trans->transid);
867
1d4f8a0c 868 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
79f95c82 869
234b63a0 870 if (level < BTRFS_MAX_LEVEL - 1)
5f39d397 871 parent = path->nodes[level + 1];
bb803951
CM
872 pslot = path->slots[level + 1];
873
40689478
CM
874 /*
875 * deal with the case where there is only one pointer in the root
876 * by promoting the node below to a root
877 */
5f39d397
CM
878 if (!parent) {
879 struct extent_buffer *child;
bb803951 880
5f39d397 881 if (btrfs_header_nritems(mid) != 1)
bb803951
CM
882 return 0;
883
884 /* promote the child to a root */
5f39d397 885 child = read_node_slot(root, mid, 0);
7951f3ce 886 BUG_ON(!child);
925baedd 887 btrfs_tree_lock(child);
b4ce94de 888 btrfs_set_lock_blocking(child);
9fa8cfe7 889 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
f0486c68
YZ
890 if (ret) {
891 btrfs_tree_unlock(child);
892 free_extent_buffer(child);
893 goto enospc;
894 }
2f375ab9 895
240f62c8 896 rcu_assign_pointer(root->node, child);
925baedd 897
0b86a832 898 add_root_to_dirty_list(root);
925baedd 899 btrfs_tree_unlock(child);
b4ce94de 900
925baedd 901 path->locks[level] = 0;
bb803951 902 path->nodes[level] = NULL;
5f39d397 903 clean_tree_block(trans, root, mid);
925baedd 904 btrfs_tree_unlock(mid);
bb803951 905 /* once for the path */
5f39d397 906 free_extent_buffer(mid);
f0486c68
YZ
907
908 root_sub_used(root, mid->len);
909 btrfs_free_tree_block(trans, root, mid, 0, 1);
bb803951 910 /* once for the root ptr */
5f39d397 911 free_extent_buffer(mid);
f0486c68 912 return 0;
bb803951 913 }
5f39d397 914 if (btrfs_header_nritems(mid) >
123abc88 915 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
bb803951
CM
916 return 0;
917
559af821 918 btrfs_header_nritems(mid);
54aa1f4d 919
5f39d397
CM
920 left = read_node_slot(root, parent, pslot - 1);
921 if (left) {
925baedd 922 btrfs_tree_lock(left);
b4ce94de 923 btrfs_set_lock_blocking(left);
5f39d397 924 wret = btrfs_cow_block(trans, root, left,
9fa8cfe7 925 parent, pslot - 1, &left);
54aa1f4d
CM
926 if (wret) {
927 ret = wret;
928 goto enospc;
929 }
2cc58cf2 930 }
5f39d397
CM
931 right = read_node_slot(root, parent, pslot + 1);
932 if (right) {
925baedd 933 btrfs_tree_lock(right);
b4ce94de 934 btrfs_set_lock_blocking(right);
5f39d397 935 wret = btrfs_cow_block(trans, root, right,
9fa8cfe7 936 parent, pslot + 1, &right);
2cc58cf2
CM
937 if (wret) {
938 ret = wret;
939 goto enospc;
940 }
941 }
942
943 /* first, try to make some room in the middle buffer */
5f39d397
CM
944 if (left) {
945 orig_slot += btrfs_header_nritems(left);
bce4eae9 946 wret = push_node_left(trans, root, left, mid, 1);
79f95c82
CM
947 if (wret < 0)
948 ret = wret;
559af821 949 btrfs_header_nritems(mid);
bb803951 950 }
79f95c82
CM
951
952 /*
953 * then try to empty the right most buffer into the middle
954 */
5f39d397 955 if (right) {
971a1f66 956 wret = push_node_left(trans, root, mid, right, 1);
54aa1f4d 957 if (wret < 0 && wret != -ENOSPC)
79f95c82 958 ret = wret;
5f39d397 959 if (btrfs_header_nritems(right) == 0) {
5f39d397 960 clean_tree_block(trans, root, right);
925baedd 961 btrfs_tree_unlock(right);
e089f05c
CM
962 wret = del_ptr(trans, root, path, level + 1, pslot +
963 1);
bb803951
CM
964 if (wret)
965 ret = wret;
f0486c68
YZ
966 root_sub_used(root, right->len);
967 btrfs_free_tree_block(trans, root, right, 0, 1);
968 free_extent_buffer(right);
969 right = NULL;
bb803951 970 } else {
5f39d397
CM
971 struct btrfs_disk_key right_key;
972 btrfs_node_key(right, &right_key, 0);
973 btrfs_set_node_key(parent, &right_key, pslot + 1);
974 btrfs_mark_buffer_dirty(parent);
bb803951
CM
975 }
976 }
5f39d397 977 if (btrfs_header_nritems(mid) == 1) {
79f95c82
CM
978 /*
979 * we're not allowed to leave a node with one item in the
980 * tree during a delete. A deletion from lower in the tree
981 * could try to delete the only pointer in this node.
982 * So, pull some keys from the left.
983 * There has to be a left pointer at this point because
984 * otherwise we would have pulled some pointers from the
985 * right
986 */
5f39d397
CM
987 BUG_ON(!left);
988 wret = balance_node_right(trans, root, mid, left);
54aa1f4d 989 if (wret < 0) {
79f95c82 990 ret = wret;
54aa1f4d
CM
991 goto enospc;
992 }
bce4eae9
CM
993 if (wret == 1) {
994 wret = push_node_left(trans, root, left, mid, 1);
995 if (wret < 0)
996 ret = wret;
997 }
79f95c82
CM
998 BUG_ON(wret == 1);
999 }
5f39d397 1000 if (btrfs_header_nritems(mid) == 0) {
5f39d397 1001 clean_tree_block(trans, root, mid);
925baedd 1002 btrfs_tree_unlock(mid);
e089f05c 1003 wret = del_ptr(trans, root, path, level + 1, pslot);
bb803951
CM
1004 if (wret)
1005 ret = wret;
f0486c68
YZ
1006 root_sub_used(root, mid->len);
1007 btrfs_free_tree_block(trans, root, mid, 0, 1);
1008 free_extent_buffer(mid);
1009 mid = NULL;
79f95c82
CM
1010 } else {
1011 /* update the parent key to reflect our changes */
5f39d397
CM
1012 struct btrfs_disk_key mid_key;
1013 btrfs_node_key(mid, &mid_key, 0);
1014 btrfs_set_node_key(parent, &mid_key, pslot);
1015 btrfs_mark_buffer_dirty(parent);
79f95c82 1016 }
bb803951 1017
79f95c82 1018 /* update the path */
5f39d397
CM
1019 if (left) {
1020 if (btrfs_header_nritems(left) > orig_slot) {
1021 extent_buffer_get(left);
925baedd 1022 /* left was locked after cow */
5f39d397 1023 path->nodes[level] = left;
bb803951
CM
1024 path->slots[level + 1] -= 1;
1025 path->slots[level] = orig_slot;
925baedd
CM
1026 if (mid) {
1027 btrfs_tree_unlock(mid);
5f39d397 1028 free_extent_buffer(mid);
925baedd 1029 }
bb803951 1030 } else {
5f39d397 1031 orig_slot -= btrfs_header_nritems(left);
bb803951
CM
1032 path->slots[level] = orig_slot;
1033 }
1034 }
79f95c82 1035 /* double check we haven't messed things up */
e20d96d6 1036 if (orig_ptr !=
5f39d397 1037 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
79f95c82 1038 BUG();
54aa1f4d 1039enospc:
925baedd
CM
1040 if (right) {
1041 btrfs_tree_unlock(right);
5f39d397 1042 free_extent_buffer(right);
925baedd
CM
1043 }
1044 if (left) {
1045 if (path->nodes[level] != left)
1046 btrfs_tree_unlock(left);
5f39d397 1047 free_extent_buffer(left);
925baedd 1048 }
bb803951
CM
1049 return ret;
1050}
1051
d352ac68
CM
1052/* Node balancing for insertion. Here we only split or push nodes around
1053 * when they are completely full. This is also done top down, so we
1054 * have to be pessimistic.
1055 */
d397712b 1056static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
98ed5174
CM
1057 struct btrfs_root *root,
1058 struct btrfs_path *path, int level)
e66f709b 1059{
5f39d397
CM
1060 struct extent_buffer *right = NULL;
1061 struct extent_buffer *mid;
1062 struct extent_buffer *left = NULL;
1063 struct extent_buffer *parent = NULL;
e66f709b
CM
1064 int ret = 0;
1065 int wret;
1066 int pslot;
1067 int orig_slot = path->slots[level];
e66f709b
CM
1068
1069 if (level == 0)
1070 return 1;
1071
5f39d397 1072 mid = path->nodes[level];
7bb86316 1073 WARN_ON(btrfs_header_generation(mid) != trans->transid);
e66f709b
CM
1074
1075 if (level < BTRFS_MAX_LEVEL - 1)
5f39d397 1076 parent = path->nodes[level + 1];
e66f709b
CM
1077 pslot = path->slots[level + 1];
1078
5f39d397 1079 if (!parent)
e66f709b 1080 return 1;
e66f709b 1081
5f39d397 1082 left = read_node_slot(root, parent, pslot - 1);
e66f709b
CM
1083
1084 /* first, try to make some room in the middle buffer */
5f39d397 1085 if (left) {
e66f709b 1086 u32 left_nr;
925baedd
CM
1087
1088 btrfs_tree_lock(left);
b4ce94de
CM
1089 btrfs_set_lock_blocking(left);
1090
5f39d397 1091 left_nr = btrfs_header_nritems(left);
33ade1f8
CM
1092 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1093 wret = 1;
1094 } else {
5f39d397 1095 ret = btrfs_cow_block(trans, root, left, parent,
9fa8cfe7 1096 pslot - 1, &left);
54aa1f4d
CM
1097 if (ret)
1098 wret = 1;
1099 else {
54aa1f4d 1100 wret = push_node_left(trans, root,
971a1f66 1101 left, mid, 0);
54aa1f4d 1102 }
33ade1f8 1103 }
e66f709b
CM
1104 if (wret < 0)
1105 ret = wret;
1106 if (wret == 0) {
5f39d397 1107 struct btrfs_disk_key disk_key;
e66f709b 1108 orig_slot += left_nr;
5f39d397
CM
1109 btrfs_node_key(mid, &disk_key, 0);
1110 btrfs_set_node_key(parent, &disk_key, pslot);
1111 btrfs_mark_buffer_dirty(parent);
1112 if (btrfs_header_nritems(left) > orig_slot) {
1113 path->nodes[level] = left;
e66f709b
CM
1114 path->slots[level + 1] -= 1;
1115 path->slots[level] = orig_slot;
925baedd 1116 btrfs_tree_unlock(mid);
5f39d397 1117 free_extent_buffer(mid);
e66f709b
CM
1118 } else {
1119 orig_slot -=
5f39d397 1120 btrfs_header_nritems(left);
e66f709b 1121 path->slots[level] = orig_slot;
925baedd 1122 btrfs_tree_unlock(left);
5f39d397 1123 free_extent_buffer(left);
e66f709b 1124 }
e66f709b
CM
1125 return 0;
1126 }
925baedd 1127 btrfs_tree_unlock(left);
5f39d397 1128 free_extent_buffer(left);
e66f709b 1129 }
925baedd 1130 right = read_node_slot(root, parent, pslot + 1);
e66f709b
CM
1131
1132 /*
1133 * then try to empty the right most buffer into the middle
1134 */
5f39d397 1135 if (right) {
33ade1f8 1136 u32 right_nr;
b4ce94de 1137
925baedd 1138 btrfs_tree_lock(right);
b4ce94de
CM
1139 btrfs_set_lock_blocking(right);
1140
5f39d397 1141 right_nr = btrfs_header_nritems(right);
33ade1f8
CM
1142 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1143 wret = 1;
1144 } else {
5f39d397
CM
1145 ret = btrfs_cow_block(trans, root, right,
1146 parent, pslot + 1,
9fa8cfe7 1147 &right);
54aa1f4d
CM
1148 if (ret)
1149 wret = 1;
1150 else {
54aa1f4d 1151 wret = balance_node_right(trans, root,
5f39d397 1152 right, mid);
54aa1f4d 1153 }
33ade1f8 1154 }
e66f709b
CM
1155 if (wret < 0)
1156 ret = wret;
1157 if (wret == 0) {
5f39d397
CM
1158 struct btrfs_disk_key disk_key;
1159
1160 btrfs_node_key(right, &disk_key, 0);
1161 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1162 btrfs_mark_buffer_dirty(parent);
1163
1164 if (btrfs_header_nritems(mid) <= orig_slot) {
1165 path->nodes[level] = right;
e66f709b
CM
1166 path->slots[level + 1] += 1;
1167 path->slots[level] = orig_slot -
5f39d397 1168 btrfs_header_nritems(mid);
925baedd 1169 btrfs_tree_unlock(mid);
5f39d397 1170 free_extent_buffer(mid);
e66f709b 1171 } else {
925baedd 1172 btrfs_tree_unlock(right);
5f39d397 1173 free_extent_buffer(right);
e66f709b 1174 }
e66f709b
CM
1175 return 0;
1176 }
925baedd 1177 btrfs_tree_unlock(right);
5f39d397 1178 free_extent_buffer(right);
e66f709b 1179 }
e66f709b
CM
1180 return 1;
1181}
1182
3c69faec 1183/*
d352ac68
CM
1184 * readahead one full node of leaves, finding things that are close
1185 * to the block in 'slot', and triggering ra on them.
3c69faec 1186 */
c8c42864
CM
1187static void reada_for_search(struct btrfs_root *root,
1188 struct btrfs_path *path,
1189 int level, int slot, u64 objectid)
3c69faec 1190{
5f39d397 1191 struct extent_buffer *node;
01f46658 1192 struct btrfs_disk_key disk_key;
3c69faec 1193 u32 nritems;
3c69faec 1194 u64 search;
a7175319 1195 u64 target;
6b80053d 1196 u64 nread = 0;
cb25c2ea 1197 u64 gen;
3c69faec 1198 int direction = path->reada;
5f39d397 1199 struct extent_buffer *eb;
6b80053d
CM
1200 u32 nr;
1201 u32 blocksize;
1202 u32 nscan = 0;
db94535d 1203
a6b6e75e 1204 if (level != 1)
6702ed49
CM
1205 return;
1206
1207 if (!path->nodes[level])
3c69faec
CM
1208 return;
1209
5f39d397 1210 node = path->nodes[level];
925baedd 1211
3c69faec 1212 search = btrfs_node_blockptr(node, slot);
6b80053d
CM
1213 blocksize = btrfs_level_size(root, level - 1);
1214 eb = btrfs_find_tree_block(root, search, blocksize);
5f39d397
CM
1215 if (eb) {
1216 free_extent_buffer(eb);
3c69faec
CM
1217 return;
1218 }
1219
a7175319 1220 target = search;
6b80053d 1221
5f39d397 1222 nritems = btrfs_header_nritems(node);
6b80053d 1223 nr = slot;
25b8b936 1224
d397712b 1225 while (1) {
6b80053d
CM
1226 if (direction < 0) {
1227 if (nr == 0)
1228 break;
1229 nr--;
1230 } else if (direction > 0) {
1231 nr++;
1232 if (nr >= nritems)
1233 break;
3c69faec 1234 }
01f46658
CM
1235 if (path->reada < 0 && objectid) {
1236 btrfs_node_key(node, &disk_key, nr);
1237 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1238 break;
1239 }
6b80053d 1240 search = btrfs_node_blockptr(node, nr);
a7175319
CM
1241 if ((search <= target && target - search <= 65536) ||
1242 (search > target && search - target <= 65536)) {
cb25c2ea 1243 gen = btrfs_node_ptr_generation(node, nr);
cb25c2ea 1244 readahead_tree_block(root, search, blocksize, gen);
6b80053d
CM
1245 nread += blocksize;
1246 }
1247 nscan++;
a7175319 1248 if ((nread > 65536 || nscan > 32))
6b80053d 1249 break;
3c69faec
CM
1250 }
1251}
925baedd 1252
b4ce94de
CM
1253/*
1254 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1255 * cache
1256 */
1257static noinline int reada_for_balance(struct btrfs_root *root,
1258 struct btrfs_path *path, int level)
1259{
1260 int slot;
1261 int nritems;
1262 struct extent_buffer *parent;
1263 struct extent_buffer *eb;
1264 u64 gen;
1265 u64 block1 = 0;
1266 u64 block2 = 0;
1267 int ret = 0;
1268 int blocksize;
1269
8c594ea8 1270 parent = path->nodes[level + 1];
b4ce94de
CM
1271 if (!parent)
1272 return 0;
1273
1274 nritems = btrfs_header_nritems(parent);
8c594ea8 1275 slot = path->slots[level + 1];
b4ce94de
CM
1276 blocksize = btrfs_level_size(root, level);
1277
1278 if (slot > 0) {
1279 block1 = btrfs_node_blockptr(parent, slot - 1);
1280 gen = btrfs_node_ptr_generation(parent, slot - 1);
1281 eb = btrfs_find_tree_block(root, block1, blocksize);
1282 if (eb && btrfs_buffer_uptodate(eb, gen))
1283 block1 = 0;
1284 free_extent_buffer(eb);
1285 }
8c594ea8 1286 if (slot + 1 < nritems) {
b4ce94de
CM
1287 block2 = btrfs_node_blockptr(parent, slot + 1);
1288 gen = btrfs_node_ptr_generation(parent, slot + 1);
1289 eb = btrfs_find_tree_block(root, block2, blocksize);
1290 if (eb && btrfs_buffer_uptodate(eb, gen))
1291 block2 = 0;
1292 free_extent_buffer(eb);
1293 }
1294 if (block1 || block2) {
1295 ret = -EAGAIN;
8c594ea8
CM
1296
1297 /* release the whole path */
b3b4aa74 1298 btrfs_release_path(path);
8c594ea8
CM
1299
1300 /* read the blocks */
b4ce94de
CM
1301 if (block1)
1302 readahead_tree_block(root, block1, blocksize, 0);
1303 if (block2)
1304 readahead_tree_block(root, block2, blocksize, 0);
1305
1306 if (block1) {
1307 eb = read_tree_block(root, block1, blocksize, 0);
1308 free_extent_buffer(eb);
1309 }
8c594ea8 1310 if (block2) {
b4ce94de
CM
1311 eb = read_tree_block(root, block2, blocksize, 0);
1312 free_extent_buffer(eb);
1313 }
1314 }
1315 return ret;
1316}
1317
1318
d352ac68 1319/*
d397712b
CM
1320 * when we walk down the tree, it is usually safe to unlock the higher layers
1321 * in the tree. The exceptions are when our path goes through slot 0, because
1322 * operations on the tree might require changing key pointers higher up in the
1323 * tree.
d352ac68 1324 *
d397712b
CM
1325 * callers might also have set path->keep_locks, which tells this code to keep
1326 * the lock if the path points to the last slot in the block. This is part of
1327 * walking through the tree, and selecting the next slot in the higher block.
d352ac68 1328 *
d397712b
CM
1329 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1330 * if lowest_unlock is 1, level 0 won't be unlocked
d352ac68 1331 */
e02119d5
CM
1332static noinline void unlock_up(struct btrfs_path *path, int level,
1333 int lowest_unlock)
925baedd
CM
1334{
1335 int i;
1336 int skip_level = level;
051e1b9f 1337 int no_skips = 0;
925baedd
CM
1338 struct extent_buffer *t;
1339
1340 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1341 if (!path->nodes[i])
1342 break;
1343 if (!path->locks[i])
1344 break;
051e1b9f 1345 if (!no_skips && path->slots[i] == 0) {
925baedd
CM
1346 skip_level = i + 1;
1347 continue;
1348 }
051e1b9f 1349 if (!no_skips && path->keep_locks) {
925baedd
CM
1350 u32 nritems;
1351 t = path->nodes[i];
1352 nritems = btrfs_header_nritems(t);
051e1b9f 1353 if (nritems < 1 || path->slots[i] >= nritems - 1) {
925baedd
CM
1354 skip_level = i + 1;
1355 continue;
1356 }
1357 }
051e1b9f
CM
1358 if (skip_level < i && i >= lowest_unlock)
1359 no_skips = 1;
1360
925baedd
CM
1361 t = path->nodes[i];
1362 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
1363 btrfs_tree_unlock(t);
1364 path->locks[i] = 0;
1365 }
1366 }
1367}
1368
b4ce94de
CM
1369/*
1370 * This releases any locks held in the path starting at level and
1371 * going all the way up to the root.
1372 *
1373 * btrfs_search_slot will keep the lock held on higher nodes in a few
1374 * corner cases, such as COW of the block at slot zero in the node. This
1375 * ignores those rules, and it should only be called when there are no
1376 * more updates to be done higher up in the tree.
1377 */
1378noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
1379{
1380 int i;
1381
5d4f98a2 1382 if (path->keep_locks)
b4ce94de
CM
1383 return;
1384
1385 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1386 if (!path->nodes[i])
12f4dacc 1387 continue;
b4ce94de 1388 if (!path->locks[i])
12f4dacc 1389 continue;
b4ce94de
CM
1390 btrfs_tree_unlock(path->nodes[i]);
1391 path->locks[i] = 0;
1392 }
1393}
1394
c8c42864
CM
1395/*
1396 * helper function for btrfs_search_slot. The goal is to find a block
1397 * in cache without setting the path to blocking. If we find the block
1398 * we return zero and the path is unchanged.
1399 *
1400 * If we can't find the block, we set the path blocking and do some
1401 * reada. -EAGAIN is returned and the search must be repeated.
1402 */
1403static int
1404read_block_for_search(struct btrfs_trans_handle *trans,
1405 struct btrfs_root *root, struct btrfs_path *p,
1406 struct extent_buffer **eb_ret, int level, int slot,
1407 struct btrfs_key *key)
1408{
1409 u64 blocknr;
1410 u64 gen;
1411 u32 blocksize;
1412 struct extent_buffer *b = *eb_ret;
1413 struct extent_buffer *tmp;
76a05b35 1414 int ret;
c8c42864
CM
1415
1416 blocknr = btrfs_node_blockptr(b, slot);
1417 gen = btrfs_node_ptr_generation(b, slot);
1418 blocksize = btrfs_level_size(root, level - 1);
1419
1420 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
cb44921a
CM
1421 if (tmp) {
1422 if (btrfs_buffer_uptodate(tmp, 0)) {
1423 if (btrfs_buffer_uptodate(tmp, gen)) {
1424 /*
1425 * we found an up to date block without
1426 * sleeping, return
1427 * right away
1428 */
1429 *eb_ret = tmp;
1430 return 0;
1431 }
1432 /* the pages were up to date, but we failed
1433 * the generation number check. Do a full
1434 * read for the generation number that is correct.
1435 * We must do this without dropping locks so
1436 * we can trust our generation number
1437 */
1438 free_extent_buffer(tmp);
1439 tmp = read_tree_block(root, blocknr, blocksize, gen);
1440 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
1441 *eb_ret = tmp;
1442 return 0;
1443 }
1444 free_extent_buffer(tmp);
b3b4aa74 1445 btrfs_release_path(p);
cb44921a
CM
1446 return -EIO;
1447 }
c8c42864
CM
1448 }
1449
1450 /*
1451 * reduce lock contention at high levels
1452 * of the btree by dropping locks before
76a05b35
CM
1453 * we read. Don't release the lock on the current
1454 * level because we need to walk this node to figure
1455 * out which blocks to read.
c8c42864 1456 */
8c594ea8
CM
1457 btrfs_unlock_up_safe(p, level + 1);
1458 btrfs_set_path_blocking(p);
1459
cb44921a 1460 free_extent_buffer(tmp);
c8c42864
CM
1461 if (p->reada)
1462 reada_for_search(root, p, level, slot, key->objectid);
1463
b3b4aa74 1464 btrfs_release_path(p);
76a05b35
CM
1465
1466 ret = -EAGAIN;
5bdd3536 1467 tmp = read_tree_block(root, blocknr, blocksize, 0);
76a05b35
CM
1468 if (tmp) {
1469 /*
1470 * If the read above didn't mark this buffer up to date,
1471 * it will never end up being up to date. Set ret to EIO now
1472 * and give up so that our caller doesn't loop forever
1473 * on our EAGAINs.
1474 */
1475 if (!btrfs_buffer_uptodate(tmp, 0))
1476 ret = -EIO;
c8c42864 1477 free_extent_buffer(tmp);
76a05b35
CM
1478 }
1479 return ret;
c8c42864
CM
1480}
1481
1482/*
1483 * helper function for btrfs_search_slot. This does all of the checks
1484 * for node-level blocks and does any balancing required based on
1485 * the ins_len.
1486 *
1487 * If no extra work was required, zero is returned. If we had to
1488 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1489 * start over
1490 */
1491static int
1492setup_nodes_for_search(struct btrfs_trans_handle *trans,
1493 struct btrfs_root *root, struct btrfs_path *p,
1494 struct extent_buffer *b, int level, int ins_len)
1495{
1496 int ret;
1497 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1498 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
1499 int sret;
1500
1501 sret = reada_for_balance(root, p, level);
1502 if (sret)
1503 goto again;
1504
1505 btrfs_set_path_blocking(p);
1506 sret = split_node(trans, root, p, level);
1507 btrfs_clear_path_blocking(p, NULL);
1508
1509 BUG_ON(sret > 0);
1510 if (sret) {
1511 ret = sret;
1512 goto done;
1513 }
1514 b = p->nodes[level];
1515 } else if (ins_len < 0 && btrfs_header_nritems(b) <
cfbb9308 1516 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
c8c42864
CM
1517 int sret;
1518
1519 sret = reada_for_balance(root, p, level);
1520 if (sret)
1521 goto again;
1522
1523 btrfs_set_path_blocking(p);
1524 sret = balance_level(trans, root, p, level);
1525 btrfs_clear_path_blocking(p, NULL);
1526
1527 if (sret) {
1528 ret = sret;
1529 goto done;
1530 }
1531 b = p->nodes[level];
1532 if (!b) {
b3b4aa74 1533 btrfs_release_path(p);
c8c42864
CM
1534 goto again;
1535 }
1536 BUG_ON(btrfs_header_nritems(b) == 1);
1537 }
1538 return 0;
1539
1540again:
1541 ret = -EAGAIN;
1542done:
1543 return ret;
1544}
1545
74123bd7
CM
1546/*
1547 * look for key in the tree. path is filled in with nodes along the way
1548 * if key is found, we return zero and you can find the item in the leaf
1549 * level of the path (level 0)
1550 *
1551 * If the key isn't found, the path points to the slot where it should
aa5d6bed
CM
1552 * be inserted, and 1 is returned. If there are other errors during the
1553 * search a negative error number is returned.
97571fd0
CM
1554 *
1555 * if ins_len > 0, nodes and leaves will be split as we walk down the
1556 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1557 * possible)
74123bd7 1558 */
e089f05c
CM
1559int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1560 *root, struct btrfs_key *key, struct btrfs_path *p, int
1561 ins_len, int cow)
be0e5c09 1562{
5f39d397 1563 struct extent_buffer *b;
be0e5c09
CM
1564 int slot;
1565 int ret;
33c66f43 1566 int err;
be0e5c09 1567 int level;
925baedd 1568 int lowest_unlock = 1;
9f3a7427
CM
1569 u8 lowest_level = 0;
1570
6702ed49 1571 lowest_level = p->lowest_level;
323ac95b 1572 WARN_ON(lowest_level && ins_len > 0);
22b0ebda 1573 WARN_ON(p->nodes[0] != NULL);
25179201 1574
925baedd
CM
1575 if (ins_len < 0)
1576 lowest_unlock = 2;
65b51a00 1577
bb803951 1578again:
5d4f98a2
YZ
1579 if (p->search_commit_root) {
1580 b = root->commit_root;
1581 extent_buffer_get(b);
1582 if (!p->skip_locking)
1583 btrfs_tree_lock(b);
1584 } else {
1585 if (p->skip_locking)
1586 b = btrfs_root_node(root);
1587 else
1588 b = btrfs_lock_root_node(root);
1589 }
925baedd 1590
eb60ceac 1591 while (b) {
5f39d397 1592 level = btrfs_header_level(b);
65b51a00
CM
1593
1594 /*
1595 * setup the path here so we can release it under lock
1596 * contention with the cow code
1597 */
1598 p->nodes[level] = b;
1599 if (!p->skip_locking)
1600 p->locks[level] = 1;
1601
02217ed2 1602 if (cow) {
c8c42864
CM
1603 /*
1604 * if we don't really need to cow this block
1605 * then we don't want to set the path blocking,
1606 * so we test it here
1607 */
5d4f98a2 1608 if (!should_cow_block(trans, root, b))
65b51a00 1609 goto cow_done;
5d4f98a2 1610
b4ce94de
CM
1611 btrfs_set_path_blocking(p);
1612
33c66f43
YZ
1613 err = btrfs_cow_block(trans, root, b,
1614 p->nodes[level + 1],
1615 p->slots[level + 1], &b);
1616 if (err) {
33c66f43 1617 ret = err;
65b51a00 1618 goto done;
54aa1f4d 1619 }
02217ed2 1620 }
65b51a00 1621cow_done:
02217ed2 1622 BUG_ON(!cow && ins_len);
65b51a00 1623
eb60ceac 1624 p->nodes[level] = b;
5cd57b2c
CM
1625 if (!p->skip_locking)
1626 p->locks[level] = 1;
65b51a00 1627
4008c04a 1628 btrfs_clear_path_blocking(p, NULL);
b4ce94de
CM
1629
1630 /*
1631 * we have a lock on b and as long as we aren't changing
1632 * the tree, there is no way to for the items in b to change.
1633 * It is safe to drop the lock on our parent before we
1634 * go through the expensive btree search on b.
1635 *
1636 * If cow is true, then we might be changing slot zero,
1637 * which may require changing the parent. So, we can't
1638 * drop the lock until after we know which slot we're
1639 * operating on.
1640 */
1641 if (!cow)
1642 btrfs_unlock_up_safe(p, level + 1);
1643
5f39d397 1644 ret = bin_search(b, key, level, &slot);
b4ce94de 1645
5f39d397 1646 if (level != 0) {
33c66f43
YZ
1647 int dec = 0;
1648 if (ret && slot > 0) {
1649 dec = 1;
be0e5c09 1650 slot -= 1;
33c66f43 1651 }
be0e5c09 1652 p->slots[level] = slot;
33c66f43 1653 err = setup_nodes_for_search(trans, root, p, b, level,
c8c42864 1654 ins_len);
33c66f43 1655 if (err == -EAGAIN)
c8c42864 1656 goto again;
33c66f43
YZ
1657 if (err) {
1658 ret = err;
c8c42864 1659 goto done;
33c66f43 1660 }
c8c42864
CM
1661 b = p->nodes[level];
1662 slot = p->slots[level];
b4ce94de 1663
f9efa9c7
CM
1664 unlock_up(p, level, lowest_unlock);
1665
925baedd 1666 if (level == lowest_level) {
33c66f43
YZ
1667 if (dec)
1668 p->slots[level]++;
5b21f2ed 1669 goto done;
925baedd 1670 }
ca7a79ad 1671
33c66f43 1672 err = read_block_for_search(trans, root, p,
c8c42864 1673 &b, level, slot, key);
33c66f43 1674 if (err == -EAGAIN)
c8c42864 1675 goto again;
33c66f43
YZ
1676 if (err) {
1677 ret = err;
76a05b35 1678 goto done;
33c66f43 1679 }
76a05b35 1680
b4ce94de 1681 if (!p->skip_locking) {
4008c04a 1682 btrfs_clear_path_blocking(p, NULL);
33c66f43 1683 err = btrfs_try_spin_lock(b);
b4ce94de 1684
33c66f43 1685 if (!err) {
b4ce94de
CM
1686 btrfs_set_path_blocking(p);
1687 btrfs_tree_lock(b);
4008c04a 1688 btrfs_clear_path_blocking(p, b);
b4ce94de
CM
1689 }
1690 }
be0e5c09
CM
1691 } else {
1692 p->slots[level] = slot;
87b29b20
YZ
1693 if (ins_len > 0 &&
1694 btrfs_leaf_free_space(root, b) < ins_len) {
b4ce94de 1695 btrfs_set_path_blocking(p);
33c66f43
YZ
1696 err = split_leaf(trans, root, key,
1697 p, ins_len, ret == 0);
4008c04a 1698 btrfs_clear_path_blocking(p, NULL);
b4ce94de 1699
33c66f43
YZ
1700 BUG_ON(err > 0);
1701 if (err) {
1702 ret = err;
65b51a00
CM
1703 goto done;
1704 }
5c680ed6 1705 }
459931ec
CM
1706 if (!p->search_for_split)
1707 unlock_up(p, level, lowest_unlock);
65b51a00 1708 goto done;
be0e5c09
CM
1709 }
1710 }
65b51a00
CM
1711 ret = 1;
1712done:
b4ce94de
CM
1713 /*
1714 * we don't really know what they plan on doing with the path
1715 * from here on, so for now just mark it as blocking
1716 */
b9473439
CM
1717 if (!p->leave_spinning)
1718 btrfs_set_path_blocking(p);
76a05b35 1719 if (ret < 0)
b3b4aa74 1720 btrfs_release_path(p);
65b51a00 1721 return ret;
be0e5c09
CM
1722}
1723
74123bd7
CM
1724/*
1725 * adjust the pointers going up the tree, starting at level
1726 * making sure the right key of each node is points to 'key'.
1727 * This is used after shifting pointers to the left, so it stops
1728 * fixing up pointers when a given leaf/node is not in slot 0 of the
1729 * higher levels
aa5d6bed
CM
1730 *
1731 * If this fails to write a tree block, it returns -1, but continues
1732 * fixing up the blocks in ram so the tree is consistent.
74123bd7 1733 */
5f39d397
CM
1734static int fixup_low_keys(struct btrfs_trans_handle *trans,
1735 struct btrfs_root *root, struct btrfs_path *path,
1736 struct btrfs_disk_key *key, int level)
be0e5c09
CM
1737{
1738 int i;
aa5d6bed 1739 int ret = 0;
5f39d397
CM
1740 struct extent_buffer *t;
1741
234b63a0 1742 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
be0e5c09 1743 int tslot = path->slots[i];
eb60ceac 1744 if (!path->nodes[i])
be0e5c09 1745 break;
5f39d397
CM
1746 t = path->nodes[i];
1747 btrfs_set_node_key(t, key, tslot);
d6025579 1748 btrfs_mark_buffer_dirty(path->nodes[i]);
be0e5c09
CM
1749 if (tslot != 0)
1750 break;
1751 }
aa5d6bed 1752 return ret;
be0e5c09
CM
1753}
1754
31840ae1
ZY
1755/*
1756 * update item key.
1757 *
1758 * This function isn't completely safe. It's the caller's responsibility
1759 * that the new key won't break the order
1760 */
1761int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
1762 struct btrfs_root *root, struct btrfs_path *path,
1763 struct btrfs_key *new_key)
1764{
1765 struct btrfs_disk_key disk_key;
1766 struct extent_buffer *eb;
1767 int slot;
1768
1769 eb = path->nodes[0];
1770 slot = path->slots[0];
1771 if (slot > 0) {
1772 btrfs_item_key(eb, &disk_key, slot - 1);
1773 if (comp_keys(&disk_key, new_key) >= 0)
1774 return -1;
1775 }
1776 if (slot < btrfs_header_nritems(eb) - 1) {
1777 btrfs_item_key(eb, &disk_key, slot + 1);
1778 if (comp_keys(&disk_key, new_key) <= 0)
1779 return -1;
1780 }
1781
1782 btrfs_cpu_key_to_disk(&disk_key, new_key);
1783 btrfs_set_item_key(eb, &disk_key, slot);
1784 btrfs_mark_buffer_dirty(eb);
1785 if (slot == 0)
1786 fixup_low_keys(trans, root, path, &disk_key, 1);
1787 return 0;
1788}
1789
74123bd7
CM
1790/*
1791 * try to push data from one node into the next node left in the
79f95c82 1792 * tree.
aa5d6bed
CM
1793 *
1794 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1795 * error, and > 0 if there was no room in the left hand block.
74123bd7 1796 */
98ed5174
CM
1797static int push_node_left(struct btrfs_trans_handle *trans,
1798 struct btrfs_root *root, struct extent_buffer *dst,
971a1f66 1799 struct extent_buffer *src, int empty)
be0e5c09 1800{
be0e5c09 1801 int push_items = 0;
bb803951
CM
1802 int src_nritems;
1803 int dst_nritems;
aa5d6bed 1804 int ret = 0;
be0e5c09 1805
5f39d397
CM
1806 src_nritems = btrfs_header_nritems(src);
1807 dst_nritems = btrfs_header_nritems(dst);
123abc88 1808 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
7bb86316
CM
1809 WARN_ON(btrfs_header_generation(src) != trans->transid);
1810 WARN_ON(btrfs_header_generation(dst) != trans->transid);
54aa1f4d 1811
bce4eae9 1812 if (!empty && src_nritems <= 8)
971a1f66
CM
1813 return 1;
1814
d397712b 1815 if (push_items <= 0)
be0e5c09
CM
1816 return 1;
1817
bce4eae9 1818 if (empty) {
971a1f66 1819 push_items = min(src_nritems, push_items);
bce4eae9
CM
1820 if (push_items < src_nritems) {
1821 /* leave at least 8 pointers in the node if
1822 * we aren't going to empty it
1823 */
1824 if (src_nritems - push_items < 8) {
1825 if (push_items <= 8)
1826 return 1;
1827 push_items -= 8;
1828 }
1829 }
1830 } else
1831 push_items = min(src_nritems - 8, push_items);
79f95c82 1832
5f39d397
CM
1833 copy_extent_buffer(dst, src,
1834 btrfs_node_key_ptr_offset(dst_nritems),
1835 btrfs_node_key_ptr_offset(0),
d397712b 1836 push_items * sizeof(struct btrfs_key_ptr));
5f39d397 1837
bb803951 1838 if (push_items < src_nritems) {
5f39d397
CM
1839 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
1840 btrfs_node_key_ptr_offset(push_items),
1841 (src_nritems - push_items) *
1842 sizeof(struct btrfs_key_ptr));
1843 }
1844 btrfs_set_header_nritems(src, src_nritems - push_items);
1845 btrfs_set_header_nritems(dst, dst_nritems + push_items);
1846 btrfs_mark_buffer_dirty(src);
1847 btrfs_mark_buffer_dirty(dst);
31840ae1 1848
79f95c82
CM
1849 return ret;
1850}
1851
1852/*
1853 * try to push data from one node into the next node right in the
1854 * tree.
1855 *
1856 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
1857 * error, and > 0 if there was no room in the right hand block.
1858 *
1859 * this will only push up to 1/2 the contents of the left node over
1860 */
5f39d397
CM
1861static int balance_node_right(struct btrfs_trans_handle *trans,
1862 struct btrfs_root *root,
1863 struct extent_buffer *dst,
1864 struct extent_buffer *src)
79f95c82 1865{
79f95c82
CM
1866 int push_items = 0;
1867 int max_push;
1868 int src_nritems;
1869 int dst_nritems;
1870 int ret = 0;
79f95c82 1871
7bb86316
CM
1872 WARN_ON(btrfs_header_generation(src) != trans->transid);
1873 WARN_ON(btrfs_header_generation(dst) != trans->transid);
1874
5f39d397
CM
1875 src_nritems = btrfs_header_nritems(src);
1876 dst_nritems = btrfs_header_nritems(dst);
123abc88 1877 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
d397712b 1878 if (push_items <= 0)
79f95c82 1879 return 1;
bce4eae9 1880
d397712b 1881 if (src_nritems < 4)
bce4eae9 1882 return 1;
79f95c82
CM
1883
1884 max_push = src_nritems / 2 + 1;
1885 /* don't try to empty the node */
d397712b 1886 if (max_push >= src_nritems)
79f95c82 1887 return 1;
252c38f0 1888
79f95c82
CM
1889 if (max_push < push_items)
1890 push_items = max_push;
1891
5f39d397
CM
1892 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
1893 btrfs_node_key_ptr_offset(0),
1894 (dst_nritems) *
1895 sizeof(struct btrfs_key_ptr));
d6025579 1896
5f39d397
CM
1897 copy_extent_buffer(dst, src,
1898 btrfs_node_key_ptr_offset(0),
1899 btrfs_node_key_ptr_offset(src_nritems - push_items),
d397712b 1900 push_items * sizeof(struct btrfs_key_ptr));
79f95c82 1901
5f39d397
CM
1902 btrfs_set_header_nritems(src, src_nritems - push_items);
1903 btrfs_set_header_nritems(dst, dst_nritems + push_items);
79f95c82 1904
5f39d397
CM
1905 btrfs_mark_buffer_dirty(src);
1906 btrfs_mark_buffer_dirty(dst);
31840ae1 1907
aa5d6bed 1908 return ret;
be0e5c09
CM
1909}
1910
97571fd0
CM
1911/*
1912 * helper function to insert a new root level in the tree.
1913 * A new node is allocated, and a single item is inserted to
1914 * point to the existing root
aa5d6bed
CM
1915 *
1916 * returns zero on success or < 0 on failure.
97571fd0 1917 */
d397712b 1918static noinline int insert_new_root(struct btrfs_trans_handle *trans,
5f39d397
CM
1919 struct btrfs_root *root,
1920 struct btrfs_path *path, int level)
5c680ed6 1921{
7bb86316 1922 u64 lower_gen;
5f39d397
CM
1923 struct extent_buffer *lower;
1924 struct extent_buffer *c;
925baedd 1925 struct extent_buffer *old;
5f39d397 1926 struct btrfs_disk_key lower_key;
5c680ed6
CM
1927
1928 BUG_ON(path->nodes[level]);
1929 BUG_ON(path->nodes[level-1] != root->node);
1930
7bb86316
CM
1931 lower = path->nodes[level-1];
1932 if (level == 1)
1933 btrfs_item_key(lower, &lower_key, 0);
1934 else
1935 btrfs_node_key(lower, &lower_key, 0);
1936
31840ae1 1937 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
5d4f98a2 1938 root->root_key.objectid, &lower_key,
ad3d81ba 1939 level, root->node->start, 0);
5f39d397
CM
1940 if (IS_ERR(c))
1941 return PTR_ERR(c);
925baedd 1942
f0486c68
YZ
1943 root_add_used(root, root->nodesize);
1944
5d4f98a2 1945 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
5f39d397
CM
1946 btrfs_set_header_nritems(c, 1);
1947 btrfs_set_header_level(c, level);
db94535d 1948 btrfs_set_header_bytenr(c, c->start);
5f39d397 1949 btrfs_set_header_generation(c, trans->transid);
5d4f98a2 1950 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
5f39d397 1951 btrfs_set_header_owner(c, root->root_key.objectid);
5f39d397
CM
1952
1953 write_extent_buffer(c, root->fs_info->fsid,
1954 (unsigned long)btrfs_header_fsid(c),
1955 BTRFS_FSID_SIZE);
e17cade2
CM
1956
1957 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
1958 (unsigned long)btrfs_header_chunk_tree_uuid(c),
1959 BTRFS_UUID_SIZE);
1960
5f39d397 1961 btrfs_set_node_key(c, &lower_key, 0);
db94535d 1962 btrfs_set_node_blockptr(c, 0, lower->start);
7bb86316 1963 lower_gen = btrfs_header_generation(lower);
31840ae1 1964 WARN_ON(lower_gen != trans->transid);
7bb86316
CM
1965
1966 btrfs_set_node_ptr_generation(c, 0, lower_gen);
d5719762 1967
5f39d397 1968 btrfs_mark_buffer_dirty(c);
d5719762 1969
925baedd 1970 old = root->node;
240f62c8 1971 rcu_assign_pointer(root->node, c);
925baedd
CM
1972
1973 /* the super has an extra ref to root->node */
1974 free_extent_buffer(old);
1975
0b86a832 1976 add_root_to_dirty_list(root);
5f39d397
CM
1977 extent_buffer_get(c);
1978 path->nodes[level] = c;
925baedd 1979 path->locks[level] = 1;
5c680ed6
CM
1980 path->slots[level] = 0;
1981 return 0;
1982}
1983
74123bd7
CM
1984/*
1985 * worker function to insert a single pointer in a node.
1986 * the node should have enough room for the pointer already
97571fd0 1987 *
74123bd7
CM
1988 * slot and level indicate where you want the key to go, and
1989 * blocknr is the block the key points to.
aa5d6bed
CM
1990 *
1991 * returns zero on success and < 0 on any error
74123bd7 1992 */
e089f05c
CM
1993static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
1994 *root, struct btrfs_path *path, struct btrfs_disk_key
db94535d 1995 *key, u64 bytenr, int slot, int level)
74123bd7 1996{
5f39d397 1997 struct extent_buffer *lower;
74123bd7 1998 int nritems;
5c680ed6
CM
1999
2000 BUG_ON(!path->nodes[level]);
f0486c68 2001 btrfs_assert_tree_locked(path->nodes[level]);
5f39d397
CM
2002 lower = path->nodes[level];
2003 nritems = btrfs_header_nritems(lower);
c293498b 2004 BUG_ON(slot > nritems);
123abc88 2005 if (nritems == BTRFS_NODEPTRS_PER_BLOCK(root))
74123bd7
CM
2006 BUG();
2007 if (slot != nritems) {
5f39d397
CM
2008 memmove_extent_buffer(lower,
2009 btrfs_node_key_ptr_offset(slot + 1),
2010 btrfs_node_key_ptr_offset(slot),
d6025579 2011 (nritems - slot) * sizeof(struct btrfs_key_ptr));
74123bd7 2012 }
5f39d397 2013 btrfs_set_node_key(lower, key, slot);
db94535d 2014 btrfs_set_node_blockptr(lower, slot, bytenr);
74493f7a
CM
2015 WARN_ON(trans->transid == 0);
2016 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
5f39d397
CM
2017 btrfs_set_header_nritems(lower, nritems + 1);
2018 btrfs_mark_buffer_dirty(lower);
74123bd7
CM
2019 return 0;
2020}
2021
97571fd0
CM
2022/*
2023 * split the node at the specified level in path in two.
2024 * The path is corrected to point to the appropriate node after the split
2025 *
2026 * Before splitting this tries to make some room in the node by pushing
2027 * left and right, if either one works, it returns right away.
aa5d6bed
CM
2028 *
2029 * returns 0 on success and < 0 on failure
97571fd0 2030 */
e02119d5
CM
2031static noinline int split_node(struct btrfs_trans_handle *trans,
2032 struct btrfs_root *root,
2033 struct btrfs_path *path, int level)
be0e5c09 2034{
5f39d397
CM
2035 struct extent_buffer *c;
2036 struct extent_buffer *split;
2037 struct btrfs_disk_key disk_key;
be0e5c09 2038 int mid;
5c680ed6 2039 int ret;
aa5d6bed 2040 int wret;
7518a238 2041 u32 c_nritems;
eb60ceac 2042
5f39d397 2043 c = path->nodes[level];
7bb86316 2044 WARN_ON(btrfs_header_generation(c) != trans->transid);
5f39d397 2045 if (c == root->node) {
5c680ed6 2046 /* trying to split the root, lets make a new one */
e089f05c 2047 ret = insert_new_root(trans, root, path, level + 1);
5c680ed6
CM
2048 if (ret)
2049 return ret;
b3612421 2050 } else {
e66f709b 2051 ret = push_nodes_for_insert(trans, root, path, level);
5f39d397
CM
2052 c = path->nodes[level];
2053 if (!ret && btrfs_header_nritems(c) <
c448acf0 2054 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
e66f709b 2055 return 0;
54aa1f4d
CM
2056 if (ret < 0)
2057 return ret;
be0e5c09 2058 }
e66f709b 2059
5f39d397 2060 c_nritems = btrfs_header_nritems(c);
5d4f98a2
YZ
2061 mid = (c_nritems + 1) / 2;
2062 btrfs_node_key(c, &disk_key, mid);
7bb86316 2063
5d4f98a2 2064 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
31840ae1 2065 root->root_key.objectid,
5d4f98a2 2066 &disk_key, level, c->start, 0);
5f39d397
CM
2067 if (IS_ERR(split))
2068 return PTR_ERR(split);
2069
f0486c68
YZ
2070 root_add_used(root, root->nodesize);
2071
5d4f98a2 2072 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
5f39d397 2073 btrfs_set_header_level(split, btrfs_header_level(c));
db94535d 2074 btrfs_set_header_bytenr(split, split->start);
5f39d397 2075 btrfs_set_header_generation(split, trans->transid);
5d4f98a2 2076 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
5f39d397
CM
2077 btrfs_set_header_owner(split, root->root_key.objectid);
2078 write_extent_buffer(split, root->fs_info->fsid,
2079 (unsigned long)btrfs_header_fsid(split),
2080 BTRFS_FSID_SIZE);
e17cade2
CM
2081 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
2082 (unsigned long)btrfs_header_chunk_tree_uuid(split),
2083 BTRFS_UUID_SIZE);
54aa1f4d 2084
5f39d397
CM
2085
2086 copy_extent_buffer(split, c,
2087 btrfs_node_key_ptr_offset(0),
2088 btrfs_node_key_ptr_offset(mid),
2089 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2090 btrfs_set_header_nritems(split, c_nritems - mid);
2091 btrfs_set_header_nritems(c, mid);
aa5d6bed
CM
2092 ret = 0;
2093
5f39d397
CM
2094 btrfs_mark_buffer_dirty(c);
2095 btrfs_mark_buffer_dirty(split);
2096
db94535d 2097 wret = insert_ptr(trans, root, path, &disk_key, split->start,
5f39d397 2098 path->slots[level + 1] + 1,
123abc88 2099 level + 1);
aa5d6bed
CM
2100 if (wret)
2101 ret = wret;
2102
5de08d7d 2103 if (path->slots[level] >= mid) {
5c680ed6 2104 path->slots[level] -= mid;
925baedd 2105 btrfs_tree_unlock(c);
5f39d397
CM
2106 free_extent_buffer(c);
2107 path->nodes[level] = split;
5c680ed6
CM
2108 path->slots[level + 1] += 1;
2109 } else {
925baedd 2110 btrfs_tree_unlock(split);
5f39d397 2111 free_extent_buffer(split);
be0e5c09 2112 }
aa5d6bed 2113 return ret;
be0e5c09
CM
2114}
2115
74123bd7
CM
2116/*
2117 * how many bytes are required to store the items in a leaf. start
2118 * and nr indicate which items in the leaf to check. This totals up the
2119 * space used both by the item structs and the item data
2120 */
5f39d397 2121static int leaf_space_used(struct extent_buffer *l, int start, int nr)
be0e5c09
CM
2122{
2123 int data_len;
5f39d397 2124 int nritems = btrfs_header_nritems(l);
d4dbff95 2125 int end = min(nritems, start + nr) - 1;
be0e5c09
CM
2126
2127 if (!nr)
2128 return 0;
5f39d397
CM
2129 data_len = btrfs_item_end_nr(l, start);
2130 data_len = data_len - btrfs_item_offset_nr(l, end);
0783fcfc 2131 data_len += sizeof(struct btrfs_item) * nr;
d4dbff95 2132 WARN_ON(data_len < 0);
be0e5c09
CM
2133 return data_len;
2134}
2135
d4dbff95
CM
2136/*
2137 * The space between the end of the leaf items and
2138 * the start of the leaf data. IOW, how much room
2139 * the leaf has left for both items and data
2140 */
d397712b 2141noinline int btrfs_leaf_free_space(struct btrfs_root *root,
e02119d5 2142 struct extent_buffer *leaf)
d4dbff95 2143{
5f39d397
CM
2144 int nritems = btrfs_header_nritems(leaf);
2145 int ret;
2146 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
2147 if (ret < 0) {
d397712b
CM
2148 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
2149 "used %d nritems %d\n",
ae2f5411 2150 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
5f39d397
CM
2151 leaf_space_used(leaf, 0, nritems), nritems);
2152 }
2153 return ret;
d4dbff95
CM
2154}
2155
99d8f83c
CM
2156/*
2157 * min slot controls the lowest index we're willing to push to the
2158 * right. We'll push up to and including min_slot, but no lower
2159 */
44871b1b
CM
2160static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
2161 struct btrfs_root *root,
2162 struct btrfs_path *path,
2163 int data_size, int empty,
2164 struct extent_buffer *right,
99d8f83c
CM
2165 int free_space, u32 left_nritems,
2166 u32 min_slot)
00ec4c51 2167{
5f39d397 2168 struct extent_buffer *left = path->nodes[0];
44871b1b 2169 struct extent_buffer *upper = path->nodes[1];
5f39d397 2170 struct btrfs_disk_key disk_key;
00ec4c51 2171 int slot;
34a38218 2172 u32 i;
00ec4c51
CM
2173 int push_space = 0;
2174 int push_items = 0;
0783fcfc 2175 struct btrfs_item *item;
34a38218 2176 u32 nr;
7518a238 2177 u32 right_nritems;
5f39d397 2178 u32 data_end;
db94535d 2179 u32 this_item_size;
00ec4c51 2180
34a38218
CM
2181 if (empty)
2182 nr = 0;
2183 else
99d8f83c 2184 nr = max_t(u32, 1, min_slot);
34a38218 2185
31840ae1 2186 if (path->slots[0] >= left_nritems)
87b29b20 2187 push_space += data_size;
31840ae1 2188
44871b1b 2189 slot = path->slots[1];
34a38218
CM
2190 i = left_nritems - 1;
2191 while (i >= nr) {
5f39d397 2192 item = btrfs_item_nr(left, i);
db94535d 2193
31840ae1
ZY
2194 if (!empty && push_items > 0) {
2195 if (path->slots[0] > i)
2196 break;
2197 if (path->slots[0] == i) {
2198 int space = btrfs_leaf_free_space(root, left);
2199 if (space + push_space * 2 > free_space)
2200 break;
2201 }
2202 }
2203
00ec4c51 2204 if (path->slots[0] == i)
87b29b20 2205 push_space += data_size;
db94535d 2206
db94535d
CM
2207 this_item_size = btrfs_item_size(left, item);
2208 if (this_item_size + sizeof(*item) + push_space > free_space)
00ec4c51 2209 break;
31840ae1 2210
00ec4c51 2211 push_items++;
db94535d 2212 push_space += this_item_size + sizeof(*item);
34a38218
CM
2213 if (i == 0)
2214 break;
2215 i--;
db94535d 2216 }
5f39d397 2217
925baedd
CM
2218 if (push_items == 0)
2219 goto out_unlock;
5f39d397 2220
34a38218 2221 if (!empty && push_items == left_nritems)
a429e513 2222 WARN_ON(1);
5f39d397 2223
00ec4c51 2224 /* push left to right */
5f39d397 2225 right_nritems = btrfs_header_nritems(right);
34a38218 2226
5f39d397 2227 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
123abc88 2228 push_space -= leaf_data_end(root, left);
5f39d397 2229
00ec4c51 2230 /* make room in the right data area */
5f39d397
CM
2231 data_end = leaf_data_end(root, right);
2232 memmove_extent_buffer(right,
2233 btrfs_leaf_data(right) + data_end - push_space,
2234 btrfs_leaf_data(right) + data_end,
2235 BTRFS_LEAF_DATA_SIZE(root) - data_end);
2236
00ec4c51 2237 /* copy from the left data area */
5f39d397 2238 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
d6025579
CM
2239 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2240 btrfs_leaf_data(left) + leaf_data_end(root, left),
2241 push_space);
5f39d397
CM
2242
2243 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
2244 btrfs_item_nr_offset(0),
2245 right_nritems * sizeof(struct btrfs_item));
2246
00ec4c51 2247 /* copy the items from left to right */
5f39d397
CM
2248 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
2249 btrfs_item_nr_offset(left_nritems - push_items),
2250 push_items * sizeof(struct btrfs_item));
00ec4c51
CM
2251
2252 /* update the item pointers */
7518a238 2253 right_nritems += push_items;
5f39d397 2254 btrfs_set_header_nritems(right, right_nritems);
123abc88 2255 push_space = BTRFS_LEAF_DATA_SIZE(root);
7518a238 2256 for (i = 0; i < right_nritems; i++) {
5f39d397 2257 item = btrfs_item_nr(right, i);
db94535d
CM
2258 push_space -= btrfs_item_size(right, item);
2259 btrfs_set_item_offset(right, item, push_space);
2260 }
2261
7518a238 2262 left_nritems -= push_items;
5f39d397 2263 btrfs_set_header_nritems(left, left_nritems);
00ec4c51 2264
34a38218
CM
2265 if (left_nritems)
2266 btrfs_mark_buffer_dirty(left);
f0486c68
YZ
2267 else
2268 clean_tree_block(trans, root, left);
2269
5f39d397 2270 btrfs_mark_buffer_dirty(right);
a429e513 2271
5f39d397
CM
2272 btrfs_item_key(right, &disk_key, 0);
2273 btrfs_set_node_key(upper, &disk_key, slot + 1);
d6025579 2274 btrfs_mark_buffer_dirty(upper);
02217ed2 2275
00ec4c51 2276 /* then fixup the leaf pointer in the path */
7518a238
CM
2277 if (path->slots[0] >= left_nritems) {
2278 path->slots[0] -= left_nritems;
925baedd
CM
2279 if (btrfs_header_nritems(path->nodes[0]) == 0)
2280 clean_tree_block(trans, root, path->nodes[0]);
2281 btrfs_tree_unlock(path->nodes[0]);
5f39d397
CM
2282 free_extent_buffer(path->nodes[0]);
2283 path->nodes[0] = right;
00ec4c51
CM
2284 path->slots[1] += 1;
2285 } else {
925baedd 2286 btrfs_tree_unlock(right);
5f39d397 2287 free_extent_buffer(right);
00ec4c51
CM
2288 }
2289 return 0;
925baedd
CM
2290
2291out_unlock:
2292 btrfs_tree_unlock(right);
2293 free_extent_buffer(right);
2294 return 1;
00ec4c51 2295}
925baedd 2296
44871b1b
CM
2297/*
2298 * push some data in the path leaf to the right, trying to free up at
2299 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2300 *
2301 * returns 1 if the push failed because the other node didn't have enough
2302 * room, 0 if everything worked out and < 0 if there were major errors.
99d8f83c
CM
2303 *
2304 * this will push starting from min_slot to the end of the leaf. It won't
2305 * push any slot lower than min_slot
44871b1b
CM
2306 */
2307static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
99d8f83c
CM
2308 *root, struct btrfs_path *path,
2309 int min_data_size, int data_size,
2310 int empty, u32 min_slot)
44871b1b
CM
2311{
2312 struct extent_buffer *left = path->nodes[0];
2313 struct extent_buffer *right;
2314 struct extent_buffer *upper;
2315 int slot;
2316 int free_space;
2317 u32 left_nritems;
2318 int ret;
2319
2320 if (!path->nodes[1])
2321 return 1;
2322
2323 slot = path->slots[1];
2324 upper = path->nodes[1];
2325 if (slot >= btrfs_header_nritems(upper) - 1)
2326 return 1;
2327
2328 btrfs_assert_tree_locked(path->nodes[1]);
2329
2330 right = read_node_slot(root, upper, slot + 1);
91ca338d
TI
2331 if (right == NULL)
2332 return 1;
2333
44871b1b
CM
2334 btrfs_tree_lock(right);
2335 btrfs_set_lock_blocking(right);
2336
2337 free_space = btrfs_leaf_free_space(root, right);
2338 if (free_space < data_size)
2339 goto out_unlock;
2340
2341 /* cow and double check */
2342 ret = btrfs_cow_block(trans, root, right, upper,
2343 slot + 1, &right);
2344 if (ret)
2345 goto out_unlock;
2346
2347 free_space = btrfs_leaf_free_space(root, right);
2348 if (free_space < data_size)
2349 goto out_unlock;
2350
2351 left_nritems = btrfs_header_nritems(left);
2352 if (left_nritems == 0)
2353 goto out_unlock;
2354
99d8f83c
CM
2355 return __push_leaf_right(trans, root, path, min_data_size, empty,
2356 right, free_space, left_nritems, min_slot);
44871b1b
CM
2357out_unlock:
2358 btrfs_tree_unlock(right);
2359 free_extent_buffer(right);
2360 return 1;
2361}
2362
74123bd7
CM
2363/*
2364 * push some data in the path leaf to the left, trying to free up at
2365 * least data_size bytes. returns zero if the push worked, nonzero otherwise
99d8f83c
CM
2366 *
2367 * max_slot can put a limit on how far into the leaf we'll push items. The
2368 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
2369 * items
74123bd7 2370 */
44871b1b
CM
2371static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
2372 struct btrfs_root *root,
2373 struct btrfs_path *path, int data_size,
2374 int empty, struct extent_buffer *left,
99d8f83c
CM
2375 int free_space, u32 right_nritems,
2376 u32 max_slot)
be0e5c09 2377{
5f39d397
CM
2378 struct btrfs_disk_key disk_key;
2379 struct extent_buffer *right = path->nodes[0];
be0e5c09 2380 int i;
be0e5c09
CM
2381 int push_space = 0;
2382 int push_items = 0;
0783fcfc 2383 struct btrfs_item *item;
7518a238 2384 u32 old_left_nritems;
34a38218 2385 u32 nr;
aa5d6bed
CM
2386 int ret = 0;
2387 int wret;
db94535d
CM
2388 u32 this_item_size;
2389 u32 old_left_item_size;
be0e5c09 2390
34a38218 2391 if (empty)
99d8f83c 2392 nr = min(right_nritems, max_slot);
34a38218 2393 else
99d8f83c 2394 nr = min(right_nritems - 1, max_slot);
34a38218
CM
2395
2396 for (i = 0; i < nr; i++) {
5f39d397 2397 item = btrfs_item_nr(right, i);
db94535d 2398
31840ae1
ZY
2399 if (!empty && push_items > 0) {
2400 if (path->slots[0] < i)
2401 break;
2402 if (path->slots[0] == i) {
2403 int space = btrfs_leaf_free_space(root, right);
2404 if (space + push_space * 2 > free_space)
2405 break;
2406 }
2407 }
2408
be0e5c09 2409 if (path->slots[0] == i)
87b29b20 2410 push_space += data_size;
db94535d
CM
2411
2412 this_item_size = btrfs_item_size(right, item);
2413 if (this_item_size + sizeof(*item) + push_space > free_space)
be0e5c09 2414 break;
db94535d 2415
be0e5c09 2416 push_items++;
db94535d
CM
2417 push_space += this_item_size + sizeof(*item);
2418 }
2419
be0e5c09 2420 if (push_items == 0) {
925baedd
CM
2421 ret = 1;
2422 goto out;
be0e5c09 2423 }
34a38218 2424 if (!empty && push_items == btrfs_header_nritems(right))
a429e513 2425 WARN_ON(1);
5f39d397 2426
be0e5c09 2427 /* push data from right to left */
5f39d397
CM
2428 copy_extent_buffer(left, right,
2429 btrfs_item_nr_offset(btrfs_header_nritems(left)),
2430 btrfs_item_nr_offset(0),
2431 push_items * sizeof(struct btrfs_item));
2432
123abc88 2433 push_space = BTRFS_LEAF_DATA_SIZE(root) -
d397712b 2434 btrfs_item_offset_nr(right, push_items - 1);
5f39d397
CM
2435
2436 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
d6025579
CM
2437 leaf_data_end(root, left) - push_space,
2438 btrfs_leaf_data(right) +
5f39d397 2439 btrfs_item_offset_nr(right, push_items - 1),
d6025579 2440 push_space);
5f39d397 2441 old_left_nritems = btrfs_header_nritems(left);
87b29b20 2442 BUG_ON(old_left_nritems <= 0);
eb60ceac 2443
db94535d 2444 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
0783fcfc 2445 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
5f39d397 2446 u32 ioff;
db94535d 2447
5f39d397 2448 item = btrfs_item_nr(left, i);
db94535d 2449
5f39d397
CM
2450 ioff = btrfs_item_offset(left, item);
2451 btrfs_set_item_offset(left, item,
db94535d 2452 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size));
be0e5c09 2453 }
5f39d397 2454 btrfs_set_header_nritems(left, old_left_nritems + push_items);
be0e5c09
CM
2455
2456 /* fixup right node */
34a38218 2457 if (push_items > right_nritems) {
d397712b
CM
2458 printk(KERN_CRIT "push items %d nr %u\n", push_items,
2459 right_nritems);
34a38218
CM
2460 WARN_ON(1);
2461 }
2462
2463 if (push_items < right_nritems) {
2464 push_space = btrfs_item_offset_nr(right, push_items - 1) -
2465 leaf_data_end(root, right);
2466 memmove_extent_buffer(right, btrfs_leaf_data(right) +
2467 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2468 btrfs_leaf_data(right) +
2469 leaf_data_end(root, right), push_space);
2470
2471 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
5f39d397
CM
2472 btrfs_item_nr_offset(push_items),
2473 (btrfs_header_nritems(right) - push_items) *
2474 sizeof(struct btrfs_item));
34a38218 2475 }
eef1c494
Y
2476 right_nritems -= push_items;
2477 btrfs_set_header_nritems(right, right_nritems);
123abc88 2478 push_space = BTRFS_LEAF_DATA_SIZE(root);
5f39d397
CM
2479 for (i = 0; i < right_nritems; i++) {
2480 item = btrfs_item_nr(right, i);
db94535d 2481
db94535d
CM
2482 push_space = push_space - btrfs_item_size(right, item);
2483 btrfs_set_item_offset(right, item, push_space);
2484 }
eb60ceac 2485
5f39d397 2486 btrfs_mark_buffer_dirty(left);
34a38218
CM
2487 if (right_nritems)
2488 btrfs_mark_buffer_dirty(right);
f0486c68
YZ
2489 else
2490 clean_tree_block(trans, root, right);
098f59c2 2491
5f39d397
CM
2492 btrfs_item_key(right, &disk_key, 0);
2493 wret = fixup_low_keys(trans, root, path, &disk_key, 1);
aa5d6bed
CM
2494 if (wret)
2495 ret = wret;
be0e5c09
CM
2496
2497 /* then fixup the leaf pointer in the path */
2498 if (path->slots[0] < push_items) {
2499 path->slots[0] += old_left_nritems;
925baedd 2500 btrfs_tree_unlock(path->nodes[0]);
5f39d397
CM
2501 free_extent_buffer(path->nodes[0]);
2502 path->nodes[0] = left;
be0e5c09
CM
2503 path->slots[1] -= 1;
2504 } else {
925baedd 2505 btrfs_tree_unlock(left);
5f39d397 2506 free_extent_buffer(left);
be0e5c09
CM
2507 path->slots[0] -= push_items;
2508 }
eb60ceac 2509 BUG_ON(path->slots[0] < 0);
aa5d6bed 2510 return ret;
925baedd
CM
2511out:
2512 btrfs_tree_unlock(left);
2513 free_extent_buffer(left);
2514 return ret;
be0e5c09
CM
2515}
2516
44871b1b
CM
2517/*
2518 * push some data in the path leaf to the left, trying to free up at
2519 * least data_size bytes. returns zero if the push worked, nonzero otherwise
99d8f83c
CM
2520 *
2521 * max_slot can put a limit on how far into the leaf we'll push items. The
2522 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
2523 * items
44871b1b
CM
2524 */
2525static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
99d8f83c
CM
2526 *root, struct btrfs_path *path, int min_data_size,
2527 int data_size, int empty, u32 max_slot)
44871b1b
CM
2528{
2529 struct extent_buffer *right = path->nodes[0];
2530 struct extent_buffer *left;
2531 int slot;
2532 int free_space;
2533 u32 right_nritems;
2534 int ret = 0;
2535
2536 slot = path->slots[1];
2537 if (slot == 0)
2538 return 1;
2539 if (!path->nodes[1])
2540 return 1;
2541
2542 right_nritems = btrfs_header_nritems(right);
2543 if (right_nritems == 0)
2544 return 1;
2545
2546 btrfs_assert_tree_locked(path->nodes[1]);
2547
2548 left = read_node_slot(root, path->nodes[1], slot - 1);
91ca338d
TI
2549 if (left == NULL)
2550 return 1;
2551
44871b1b
CM
2552 btrfs_tree_lock(left);
2553 btrfs_set_lock_blocking(left);
2554
2555 free_space = btrfs_leaf_free_space(root, left);
2556 if (free_space < data_size) {
2557 ret = 1;
2558 goto out;
2559 }
2560
2561 /* cow and double check */
2562 ret = btrfs_cow_block(trans, root, left,
2563 path->nodes[1], slot - 1, &left);
2564 if (ret) {
2565 /* we hit -ENOSPC, but it isn't fatal here */
2566 ret = 1;
2567 goto out;
2568 }
2569
2570 free_space = btrfs_leaf_free_space(root, left);
2571 if (free_space < data_size) {
2572 ret = 1;
2573 goto out;
2574 }
2575
99d8f83c
CM
2576 return __push_leaf_left(trans, root, path, min_data_size,
2577 empty, left, free_space, right_nritems,
2578 max_slot);
44871b1b
CM
2579out:
2580 btrfs_tree_unlock(left);
2581 free_extent_buffer(left);
2582 return ret;
2583}
2584
2585/*
2586 * split the path's leaf in two, making sure there is at least data_size
2587 * available for the resulting leaf level of the path.
2588 *
2589 * returns 0 if all went well and < 0 on failure.
2590 */
2591static noinline int copy_for_split(struct btrfs_trans_handle *trans,
2592 struct btrfs_root *root,
2593 struct btrfs_path *path,
2594 struct extent_buffer *l,
2595 struct extent_buffer *right,
2596 int slot, int mid, int nritems)
2597{
2598 int data_copy_size;
2599 int rt_data_off;
2600 int i;
2601 int ret = 0;
2602 int wret;
2603 struct btrfs_disk_key disk_key;
2604
2605 nritems = nritems - mid;
2606 btrfs_set_header_nritems(right, nritems);
2607 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
2608
2609 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
2610 btrfs_item_nr_offset(mid),
2611 nritems * sizeof(struct btrfs_item));
2612
2613 copy_extent_buffer(right, l,
2614 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
2615 data_copy_size, btrfs_leaf_data(l) +
2616 leaf_data_end(root, l), data_copy_size);
2617
2618 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
2619 btrfs_item_end_nr(l, mid);
2620
2621 for (i = 0; i < nritems; i++) {
2622 struct btrfs_item *item = btrfs_item_nr(right, i);
2623 u32 ioff;
2624
44871b1b
CM
2625 ioff = btrfs_item_offset(right, item);
2626 btrfs_set_item_offset(right, item, ioff + rt_data_off);
2627 }
2628
44871b1b
CM
2629 btrfs_set_header_nritems(l, mid);
2630 ret = 0;
2631 btrfs_item_key(right, &disk_key, 0);
2632 wret = insert_ptr(trans, root, path, &disk_key, right->start,
2633 path->slots[1] + 1, 1);
2634 if (wret)
2635 ret = wret;
2636
2637 btrfs_mark_buffer_dirty(right);
2638 btrfs_mark_buffer_dirty(l);
2639 BUG_ON(path->slots[0] != slot);
2640
44871b1b
CM
2641 if (mid <= slot) {
2642 btrfs_tree_unlock(path->nodes[0]);
2643 free_extent_buffer(path->nodes[0]);
2644 path->nodes[0] = right;
2645 path->slots[0] -= mid;
2646 path->slots[1] += 1;
2647 } else {
2648 btrfs_tree_unlock(right);
2649 free_extent_buffer(right);
2650 }
2651
2652 BUG_ON(path->slots[0] < 0);
2653
2654 return ret;
2655}
2656
99d8f83c
CM
2657/*
2658 * double splits happen when we need to insert a big item in the middle
2659 * of a leaf. A double split can leave us with 3 mostly empty leaves:
2660 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
2661 * A B C
2662 *
2663 * We avoid this by trying to push the items on either side of our target
2664 * into the adjacent leaves. If all goes well we can avoid the double split
2665 * completely.
2666 */
2667static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
2668 struct btrfs_root *root,
2669 struct btrfs_path *path,
2670 int data_size)
2671{
2672 int ret;
2673 int progress = 0;
2674 int slot;
2675 u32 nritems;
2676
2677 slot = path->slots[0];
2678
2679 /*
2680 * try to push all the items after our slot into the
2681 * right leaf
2682 */
2683 ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
2684 if (ret < 0)
2685 return ret;
2686
2687 if (ret == 0)
2688 progress++;
2689
2690 nritems = btrfs_header_nritems(path->nodes[0]);
2691 /*
2692 * our goal is to get our slot at the start or end of a leaf. If
2693 * we've done so we're done
2694 */
2695 if (path->slots[0] == 0 || path->slots[0] == nritems)
2696 return 0;
2697
2698 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
2699 return 0;
2700
2701 /* try to push all the items before our slot into the next leaf */
2702 slot = path->slots[0];
2703 ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
2704 if (ret < 0)
2705 return ret;
2706
2707 if (ret == 0)
2708 progress++;
2709
2710 if (progress)
2711 return 0;
2712 return 1;
2713}
2714
74123bd7
CM
2715/*
2716 * split the path's leaf in two, making sure there is at least data_size
2717 * available for the resulting leaf level of the path.
aa5d6bed
CM
2718 *
2719 * returns 0 if all went well and < 0 on failure.
74123bd7 2720 */
e02119d5
CM
2721static noinline int split_leaf(struct btrfs_trans_handle *trans,
2722 struct btrfs_root *root,
2723 struct btrfs_key *ins_key,
2724 struct btrfs_path *path, int data_size,
2725 int extend)
be0e5c09 2726{
5d4f98a2 2727 struct btrfs_disk_key disk_key;
5f39d397 2728 struct extent_buffer *l;
7518a238 2729 u32 nritems;
eb60ceac
CM
2730 int mid;
2731 int slot;
5f39d397 2732 struct extent_buffer *right;
d4dbff95 2733 int ret = 0;
aa5d6bed 2734 int wret;
5d4f98a2 2735 int split;
cc0c5538 2736 int num_doubles = 0;
99d8f83c 2737 int tried_avoid_double = 0;
aa5d6bed 2738
a5719521
YZ
2739 l = path->nodes[0];
2740 slot = path->slots[0];
2741 if (extend && data_size + btrfs_item_size_nr(l, slot) +
2742 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
2743 return -EOVERFLOW;
2744
40689478 2745 /* first try to make some room by pushing left and right */
99d8f83c
CM
2746 if (data_size) {
2747 wret = push_leaf_right(trans, root, path, data_size,
2748 data_size, 0, 0);
d397712b 2749 if (wret < 0)
eaee50e8 2750 return wret;
3685f791 2751 if (wret) {
99d8f83c
CM
2752 wret = push_leaf_left(trans, root, path, data_size,
2753 data_size, 0, (u32)-1);
3685f791
CM
2754 if (wret < 0)
2755 return wret;
2756 }
2757 l = path->nodes[0];
aa5d6bed 2758
3685f791 2759 /* did the pushes work? */
87b29b20 2760 if (btrfs_leaf_free_space(root, l) >= data_size)
3685f791 2761 return 0;
3326d1b0 2762 }
aa5d6bed 2763
5c680ed6 2764 if (!path->nodes[1]) {
e089f05c 2765 ret = insert_new_root(trans, root, path, 1);
5c680ed6
CM
2766 if (ret)
2767 return ret;
2768 }
cc0c5538 2769again:
5d4f98a2 2770 split = 1;
cc0c5538 2771 l = path->nodes[0];
eb60ceac 2772 slot = path->slots[0];
5f39d397 2773 nritems = btrfs_header_nritems(l);
d397712b 2774 mid = (nritems + 1) / 2;
54aa1f4d 2775
5d4f98a2
YZ
2776 if (mid <= slot) {
2777 if (nritems == 1 ||
2778 leaf_space_used(l, mid, nritems - mid) + data_size >
2779 BTRFS_LEAF_DATA_SIZE(root)) {
2780 if (slot >= nritems) {
2781 split = 0;
2782 } else {
2783 mid = slot;
2784 if (mid != nritems &&
2785 leaf_space_used(l, mid, nritems - mid) +
2786 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
99d8f83c
CM
2787 if (data_size && !tried_avoid_double)
2788 goto push_for_double;
5d4f98a2
YZ
2789 split = 2;
2790 }
2791 }
2792 }
2793 } else {
2794 if (leaf_space_used(l, 0, mid) + data_size >
2795 BTRFS_LEAF_DATA_SIZE(root)) {
2796 if (!extend && data_size && slot == 0) {
2797 split = 0;
2798 } else if ((extend || !data_size) && slot == 0) {
2799 mid = 1;
2800 } else {
2801 mid = slot;
2802 if (mid != nritems &&
2803 leaf_space_used(l, mid, nritems - mid) +
2804 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
99d8f83c
CM
2805 if (data_size && !tried_avoid_double)
2806 goto push_for_double;
5d4f98a2
YZ
2807 split = 2 ;
2808 }
2809 }
2810 }
2811 }
2812
2813 if (split == 0)
2814 btrfs_cpu_key_to_disk(&disk_key, ins_key);
2815 else
2816 btrfs_item_key(l, &disk_key, mid);
2817
2818 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
31840ae1 2819 root->root_key.objectid,
5d4f98a2 2820 &disk_key, 0, l->start, 0);
f0486c68 2821 if (IS_ERR(right))
5f39d397 2822 return PTR_ERR(right);
f0486c68
YZ
2823
2824 root_add_used(root, root->leafsize);
5f39d397
CM
2825
2826 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
db94535d 2827 btrfs_set_header_bytenr(right, right->start);
5f39d397 2828 btrfs_set_header_generation(right, trans->transid);
5d4f98a2 2829 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
5f39d397
CM
2830 btrfs_set_header_owner(right, root->root_key.objectid);
2831 btrfs_set_header_level(right, 0);
2832 write_extent_buffer(right, root->fs_info->fsid,
2833 (unsigned long)btrfs_header_fsid(right),
2834 BTRFS_FSID_SIZE);
e17cade2
CM
2835
2836 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
2837 (unsigned long)btrfs_header_chunk_tree_uuid(right),
2838 BTRFS_UUID_SIZE);
44871b1b 2839
5d4f98a2
YZ
2840 if (split == 0) {
2841 if (mid <= slot) {
2842 btrfs_set_header_nritems(right, 0);
2843 wret = insert_ptr(trans, root, path,
2844 &disk_key, right->start,
2845 path->slots[1] + 1, 1);
2846 if (wret)
2847 ret = wret;
925baedd 2848
5d4f98a2
YZ
2849 btrfs_tree_unlock(path->nodes[0]);
2850 free_extent_buffer(path->nodes[0]);
2851 path->nodes[0] = right;
2852 path->slots[0] = 0;
2853 path->slots[1] += 1;
2854 } else {
2855 btrfs_set_header_nritems(right, 0);
2856 wret = insert_ptr(trans, root, path,
2857 &disk_key,
2858 right->start,
2859 path->slots[1], 1);
2860 if (wret)
2861 ret = wret;
2862 btrfs_tree_unlock(path->nodes[0]);
2863 free_extent_buffer(path->nodes[0]);
2864 path->nodes[0] = right;
2865 path->slots[0] = 0;
2866 if (path->slots[1] == 0) {
2867 wret = fixup_low_keys(trans, root,
2868 path, &disk_key, 1);
d4dbff95
CM
2869 if (wret)
2870 ret = wret;
5ee78ac7 2871 }
d4dbff95 2872 }
5d4f98a2
YZ
2873 btrfs_mark_buffer_dirty(right);
2874 return ret;
d4dbff95 2875 }
74123bd7 2876
44871b1b 2877 ret = copy_for_split(trans, root, path, l, right, slot, mid, nritems);
31840ae1
ZY
2878 BUG_ON(ret);
2879
5d4f98a2 2880 if (split == 2) {
cc0c5538
CM
2881 BUG_ON(num_doubles != 0);
2882 num_doubles++;
2883 goto again;
a429e513 2884 }
44871b1b 2885
be0e5c09 2886 return ret;
99d8f83c
CM
2887
2888push_for_double:
2889 push_for_double_split(trans, root, path, data_size);
2890 tried_avoid_double = 1;
2891 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
2892 return 0;
2893 goto again;
be0e5c09
CM
2894}
2895
ad48fd75
YZ
2896static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
2897 struct btrfs_root *root,
2898 struct btrfs_path *path, int ins_len)
459931ec 2899{
ad48fd75 2900 struct btrfs_key key;
459931ec 2901 struct extent_buffer *leaf;
ad48fd75
YZ
2902 struct btrfs_file_extent_item *fi;
2903 u64 extent_len = 0;
2904 u32 item_size;
2905 int ret;
459931ec
CM
2906
2907 leaf = path->nodes[0];
ad48fd75
YZ
2908 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2909
2910 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
2911 key.type != BTRFS_EXTENT_CSUM_KEY);
2912
2913 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
2914 return 0;
459931ec
CM
2915
2916 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
ad48fd75
YZ
2917 if (key.type == BTRFS_EXTENT_DATA_KEY) {
2918 fi = btrfs_item_ptr(leaf, path->slots[0],
2919 struct btrfs_file_extent_item);
2920 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2921 }
b3b4aa74 2922 btrfs_release_path(path);
459931ec 2923
459931ec 2924 path->keep_locks = 1;
ad48fd75
YZ
2925 path->search_for_split = 1;
2926 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
459931ec 2927 path->search_for_split = 0;
ad48fd75
YZ
2928 if (ret < 0)
2929 goto err;
459931ec 2930
ad48fd75
YZ
2931 ret = -EAGAIN;
2932 leaf = path->nodes[0];
459931ec 2933 /* if our item isn't there or got smaller, return now */
ad48fd75
YZ
2934 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
2935 goto err;
2936
109f6aef
CM
2937 /* the leaf has changed, it now has room. return now */
2938 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
2939 goto err;
2940
ad48fd75
YZ
2941 if (key.type == BTRFS_EXTENT_DATA_KEY) {
2942 fi = btrfs_item_ptr(leaf, path->slots[0],
2943 struct btrfs_file_extent_item);
2944 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
2945 goto err;
459931ec
CM
2946 }
2947
b9473439 2948 btrfs_set_path_blocking(path);
ad48fd75 2949 ret = split_leaf(trans, root, &key, path, ins_len, 1);
f0486c68
YZ
2950 if (ret)
2951 goto err;
459931ec 2952
ad48fd75 2953 path->keep_locks = 0;
b9473439 2954 btrfs_unlock_up_safe(path, 1);
ad48fd75
YZ
2955 return 0;
2956err:
2957 path->keep_locks = 0;
2958 return ret;
2959}
2960
2961static noinline int split_item(struct btrfs_trans_handle *trans,
2962 struct btrfs_root *root,
2963 struct btrfs_path *path,
2964 struct btrfs_key *new_key,
2965 unsigned long split_offset)
2966{
2967 struct extent_buffer *leaf;
2968 struct btrfs_item *item;
2969 struct btrfs_item *new_item;
2970 int slot;
2971 char *buf;
2972 u32 nritems;
2973 u32 item_size;
2974 u32 orig_offset;
2975 struct btrfs_disk_key disk_key;
2976
b9473439
CM
2977 leaf = path->nodes[0];
2978 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
2979
b4ce94de
CM
2980 btrfs_set_path_blocking(path);
2981
459931ec
CM
2982 item = btrfs_item_nr(leaf, path->slots[0]);
2983 orig_offset = btrfs_item_offset(leaf, item);
2984 item_size = btrfs_item_size(leaf, item);
2985
459931ec 2986 buf = kmalloc(item_size, GFP_NOFS);
ad48fd75
YZ
2987 if (!buf)
2988 return -ENOMEM;
2989
459931ec
CM
2990 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
2991 path->slots[0]), item_size);
459931ec 2992
ad48fd75 2993 slot = path->slots[0] + 1;
459931ec 2994 nritems = btrfs_header_nritems(leaf);
459931ec
CM
2995 if (slot != nritems) {
2996 /* shift the items */
2997 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
ad48fd75
YZ
2998 btrfs_item_nr_offset(slot),
2999 (nritems - slot) * sizeof(struct btrfs_item));
459931ec
CM
3000 }
3001
3002 btrfs_cpu_key_to_disk(&disk_key, new_key);
3003 btrfs_set_item_key(leaf, &disk_key, slot);
3004
3005 new_item = btrfs_item_nr(leaf, slot);
3006
3007 btrfs_set_item_offset(leaf, new_item, orig_offset);
3008 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
3009
3010 btrfs_set_item_offset(leaf, item,
3011 orig_offset + item_size - split_offset);
3012 btrfs_set_item_size(leaf, item, split_offset);
3013
3014 btrfs_set_header_nritems(leaf, nritems + 1);
3015
3016 /* write the data for the start of the original item */
3017 write_extent_buffer(leaf, buf,
3018 btrfs_item_ptr_offset(leaf, path->slots[0]),
3019 split_offset);
3020
3021 /* write the data for the new item */
3022 write_extent_buffer(leaf, buf + split_offset,
3023 btrfs_item_ptr_offset(leaf, slot),
3024 item_size - split_offset);
3025 btrfs_mark_buffer_dirty(leaf);
3026
ad48fd75 3027 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
459931ec 3028 kfree(buf);
ad48fd75
YZ
3029 return 0;
3030}
3031
3032/*
3033 * This function splits a single item into two items,
3034 * giving 'new_key' to the new item and splitting the
3035 * old one at split_offset (from the start of the item).
3036 *
3037 * The path may be released by this operation. After
3038 * the split, the path is pointing to the old item. The
3039 * new item is going to be in the same node as the old one.
3040 *
3041 * Note, the item being split must be smaller enough to live alone on
3042 * a tree block with room for one extra struct btrfs_item
3043 *
3044 * This allows us to split the item in place, keeping a lock on the
3045 * leaf the entire time.
3046 */
3047int btrfs_split_item(struct btrfs_trans_handle *trans,
3048 struct btrfs_root *root,
3049 struct btrfs_path *path,
3050 struct btrfs_key *new_key,
3051 unsigned long split_offset)
3052{
3053 int ret;
3054 ret = setup_leaf_for_split(trans, root, path,
3055 sizeof(struct btrfs_item));
3056 if (ret)
3057 return ret;
3058
3059 ret = split_item(trans, root, path, new_key, split_offset);
459931ec
CM
3060 return ret;
3061}
3062
ad48fd75
YZ
3063/*
3064 * This function duplicate a item, giving 'new_key' to the new item.
3065 * It guarantees both items live in the same tree leaf and the new item
3066 * is contiguous with the original item.
3067 *
3068 * This allows us to split file extent in place, keeping a lock on the
3069 * leaf the entire time.
3070 */
3071int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
3072 struct btrfs_root *root,
3073 struct btrfs_path *path,
3074 struct btrfs_key *new_key)
3075{
3076 struct extent_buffer *leaf;
3077 int ret;
3078 u32 item_size;
3079
3080 leaf = path->nodes[0];
3081 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3082 ret = setup_leaf_for_split(trans, root, path,
3083 item_size + sizeof(struct btrfs_item));
3084 if (ret)
3085 return ret;
3086
3087 path->slots[0]++;
3088 ret = setup_items_for_insert(trans, root, path, new_key, &item_size,
3089 item_size, item_size +
3090 sizeof(struct btrfs_item), 1);
3091 BUG_ON(ret);
3092
3093 leaf = path->nodes[0];
3094 memcpy_extent_buffer(leaf,
3095 btrfs_item_ptr_offset(leaf, path->slots[0]),
3096 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
3097 item_size);
3098 return 0;
3099}
3100
d352ac68
CM
3101/*
3102 * make the item pointed to by the path smaller. new_size indicates
3103 * how small to make it, and from_end tells us if we just chop bytes
3104 * off the end of the item or if we shift the item to chop bytes off
3105 * the front.
3106 */
b18c6685
CM
3107int btrfs_truncate_item(struct btrfs_trans_handle *trans,
3108 struct btrfs_root *root,
3109 struct btrfs_path *path,
179e29e4 3110 u32 new_size, int from_end)
b18c6685 3111{
b18c6685 3112 int slot;
5f39d397
CM
3113 struct extent_buffer *leaf;
3114 struct btrfs_item *item;
b18c6685
CM
3115 u32 nritems;
3116 unsigned int data_end;
3117 unsigned int old_data_start;
3118 unsigned int old_size;
3119 unsigned int size_diff;
3120 int i;
3121
5f39d397 3122 leaf = path->nodes[0];
179e29e4
CM
3123 slot = path->slots[0];
3124
3125 old_size = btrfs_item_size_nr(leaf, slot);
3126 if (old_size == new_size)
3127 return 0;
b18c6685 3128
5f39d397 3129 nritems = btrfs_header_nritems(leaf);
b18c6685
CM
3130 data_end = leaf_data_end(root, leaf);
3131
5f39d397 3132 old_data_start = btrfs_item_offset_nr(leaf, slot);
179e29e4 3133
b18c6685
CM
3134 size_diff = old_size - new_size;
3135
3136 BUG_ON(slot < 0);
3137 BUG_ON(slot >= nritems);
3138
3139 /*
3140 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3141 */
3142 /* first correct the data pointers */
3143 for (i = slot; i < nritems; i++) {
5f39d397
CM
3144 u32 ioff;
3145 item = btrfs_item_nr(leaf, i);
db94535d 3146
5f39d397
CM
3147 ioff = btrfs_item_offset(leaf, item);
3148 btrfs_set_item_offset(leaf, item, ioff + size_diff);
b18c6685 3149 }
db94535d 3150
b18c6685 3151 /* shift the data */
179e29e4
CM
3152 if (from_end) {
3153 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3154 data_end + size_diff, btrfs_leaf_data(leaf) +
3155 data_end, old_data_start + new_size - data_end);
3156 } else {
3157 struct btrfs_disk_key disk_key;
3158 u64 offset;
3159
3160 btrfs_item_key(leaf, &disk_key, slot);
3161
3162 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
3163 unsigned long ptr;
3164 struct btrfs_file_extent_item *fi;
3165
3166 fi = btrfs_item_ptr(leaf, slot,
3167 struct btrfs_file_extent_item);
3168 fi = (struct btrfs_file_extent_item *)(
3169 (unsigned long)fi - size_diff);
3170
3171 if (btrfs_file_extent_type(leaf, fi) ==
3172 BTRFS_FILE_EXTENT_INLINE) {
3173 ptr = btrfs_item_ptr_offset(leaf, slot);
3174 memmove_extent_buffer(leaf, ptr,
d397712b
CM
3175 (unsigned long)fi,
3176 offsetof(struct btrfs_file_extent_item,
179e29e4
CM
3177 disk_bytenr));
3178 }
3179 }
3180
3181 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3182 data_end + size_diff, btrfs_leaf_data(leaf) +
3183 data_end, old_data_start - data_end);
3184
3185 offset = btrfs_disk_key_offset(&disk_key);
3186 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
3187 btrfs_set_item_key(leaf, &disk_key, slot);
3188 if (slot == 0)
3189 fixup_low_keys(trans, root, path, &disk_key, 1);
3190 }
5f39d397
CM
3191
3192 item = btrfs_item_nr(leaf, slot);
3193 btrfs_set_item_size(leaf, item, new_size);
3194 btrfs_mark_buffer_dirty(leaf);
b18c6685 3195
5f39d397
CM
3196 if (btrfs_leaf_free_space(root, leaf) < 0) {
3197 btrfs_print_leaf(root, leaf);
b18c6685 3198 BUG();
5f39d397 3199 }
1cd30799 3200 return 0;
b18c6685
CM
3201}
3202
d352ac68
CM
3203/*
3204 * make the item pointed to by the path bigger, data_size is the new size.
3205 */
5f39d397
CM
3206int btrfs_extend_item(struct btrfs_trans_handle *trans,
3207 struct btrfs_root *root, struct btrfs_path *path,
3208 u32 data_size)
6567e837 3209{
6567e837 3210 int slot;
5f39d397
CM
3211 struct extent_buffer *leaf;
3212 struct btrfs_item *item;
6567e837
CM
3213 u32 nritems;
3214 unsigned int data_end;
3215 unsigned int old_data;
3216 unsigned int old_size;
3217 int i;
3218
5f39d397 3219 leaf = path->nodes[0];
6567e837 3220
5f39d397 3221 nritems = btrfs_header_nritems(leaf);
6567e837
CM
3222 data_end = leaf_data_end(root, leaf);
3223
5f39d397
CM
3224 if (btrfs_leaf_free_space(root, leaf) < data_size) {
3225 btrfs_print_leaf(root, leaf);
6567e837 3226 BUG();
5f39d397 3227 }
6567e837 3228 slot = path->slots[0];
5f39d397 3229 old_data = btrfs_item_end_nr(leaf, slot);
6567e837
CM
3230
3231 BUG_ON(slot < 0);
3326d1b0
CM
3232 if (slot >= nritems) {
3233 btrfs_print_leaf(root, leaf);
d397712b
CM
3234 printk(KERN_CRIT "slot %d too large, nritems %d\n",
3235 slot, nritems);
3326d1b0
CM
3236 BUG_ON(1);
3237 }
6567e837
CM
3238
3239 /*
3240 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3241 */
3242 /* first correct the data pointers */
3243 for (i = slot; i < nritems; i++) {
5f39d397
CM
3244 u32 ioff;
3245 item = btrfs_item_nr(leaf, i);
db94535d 3246
5f39d397
CM
3247 ioff = btrfs_item_offset(leaf, item);
3248 btrfs_set_item_offset(leaf, item, ioff - data_size);
6567e837 3249 }
5f39d397 3250
6567e837 3251 /* shift the data */
5f39d397 3252 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
6567e837
CM
3253 data_end - data_size, btrfs_leaf_data(leaf) +
3254 data_end, old_data - data_end);
5f39d397 3255
6567e837 3256 data_end = old_data;
5f39d397
CM
3257 old_size = btrfs_item_size_nr(leaf, slot);
3258 item = btrfs_item_nr(leaf, slot);
3259 btrfs_set_item_size(leaf, item, old_size + data_size);
3260 btrfs_mark_buffer_dirty(leaf);
6567e837 3261
5f39d397
CM
3262 if (btrfs_leaf_free_space(root, leaf) < 0) {
3263 btrfs_print_leaf(root, leaf);
6567e837 3264 BUG();
5f39d397 3265 }
1cd30799 3266 return 0;
6567e837
CM
3267}
3268
f3465ca4
JB
3269/*
3270 * Given a key and some data, insert items into the tree.
3271 * This does all the path init required, making room in the tree if needed.
3272 * Returns the number of keys that were inserted.
3273 */
3274int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
3275 struct btrfs_root *root,
3276 struct btrfs_path *path,
3277 struct btrfs_key *cpu_key, u32 *data_size,
3278 int nr)
3279{
3280 struct extent_buffer *leaf;
3281 struct btrfs_item *item;
3282 int ret = 0;
3283 int slot;
f3465ca4
JB
3284 int i;
3285 u32 nritems;
3286 u32 total_data = 0;
3287 u32 total_size = 0;
3288 unsigned int data_end;
3289 struct btrfs_disk_key disk_key;
3290 struct btrfs_key found_key;
3291
87b29b20
YZ
3292 for (i = 0; i < nr; i++) {
3293 if (total_size + data_size[i] + sizeof(struct btrfs_item) >
3294 BTRFS_LEAF_DATA_SIZE(root)) {
3295 break;
3296 nr = i;
3297 }
f3465ca4 3298 total_data += data_size[i];
87b29b20
YZ
3299 total_size += data_size[i] + sizeof(struct btrfs_item);
3300 }
3301 BUG_ON(nr == 0);
f3465ca4 3302
f3465ca4
JB
3303 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3304 if (ret == 0)
3305 return -EEXIST;
3306 if (ret < 0)
3307 goto out;
3308
f3465ca4
JB
3309 leaf = path->nodes[0];
3310
3311 nritems = btrfs_header_nritems(leaf);
3312 data_end = leaf_data_end(root, leaf);
3313
3314 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3315 for (i = nr; i >= 0; i--) {
3316 total_data -= data_size[i];
3317 total_size -= data_size[i] + sizeof(struct btrfs_item);
3318 if (total_size < btrfs_leaf_free_space(root, leaf))
3319 break;
3320 }
3321 nr = i;
3322 }
3323
3324 slot = path->slots[0];
3325 BUG_ON(slot < 0);
3326
3327 if (slot != nritems) {
3328 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3329
3330 item = btrfs_item_nr(leaf, slot);
3331 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3332
3333 /* figure out how many keys we can insert in here */
3334 total_data = data_size[0];
3335 for (i = 1; i < nr; i++) {
5d4f98a2 3336 if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0)
f3465ca4
JB
3337 break;
3338 total_data += data_size[i];
3339 }
3340 nr = i;
3341
3342 if (old_data < data_end) {
3343 btrfs_print_leaf(root, leaf);
d397712b 3344 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
f3465ca4
JB
3345 slot, old_data, data_end);
3346 BUG_ON(1);
3347 }
3348 /*
3349 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3350 */
3351 /* first correct the data pointers */
f3465ca4
JB
3352 for (i = slot; i < nritems; i++) {
3353 u32 ioff;
3354
3355 item = btrfs_item_nr(leaf, i);
f3465ca4
JB
3356 ioff = btrfs_item_offset(leaf, item);
3357 btrfs_set_item_offset(leaf, item, ioff - total_data);
3358 }
f3465ca4
JB
3359 /* shift the items */
3360 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3361 btrfs_item_nr_offset(slot),
3362 (nritems - slot) * sizeof(struct btrfs_item));
3363
3364 /* shift the data */
3365 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3366 data_end - total_data, btrfs_leaf_data(leaf) +
3367 data_end, old_data - data_end);
3368 data_end = old_data;
3369 } else {
3370 /*
3371 * this sucks but it has to be done, if we are inserting at
3372 * the end of the leaf only insert 1 of the items, since we
3373 * have no way of knowing whats on the next leaf and we'd have
3374 * to drop our current locks to figure it out
3375 */
3376 nr = 1;
3377 }
3378
3379 /* setup the item for the new data */
3380 for (i = 0; i < nr; i++) {
3381 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3382 btrfs_set_item_key(leaf, &disk_key, slot + i);
3383 item = btrfs_item_nr(leaf, slot + i);
3384 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3385 data_end -= data_size[i];
3386 btrfs_set_item_size(leaf, item, data_size[i]);
3387 }
3388 btrfs_set_header_nritems(leaf, nritems + nr);
3389 btrfs_mark_buffer_dirty(leaf);
3390
3391 ret = 0;
3392 if (slot == 0) {
3393 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3394 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3395 }
3396
3397 if (btrfs_leaf_free_space(root, leaf) < 0) {
3398 btrfs_print_leaf(root, leaf);
3399 BUG();
3400 }
3401out:
3402 if (!ret)
3403 ret = nr;
3404 return ret;
3405}
3406
74123bd7 3407/*
44871b1b
CM
3408 * this is a helper for btrfs_insert_empty_items, the main goal here is
3409 * to save stack depth by doing the bulk of the work in a function
3410 * that doesn't call btrfs_search_slot
74123bd7 3411 */
16cdcec7
MX
3412int setup_items_for_insert(struct btrfs_trans_handle *trans,
3413 struct btrfs_root *root, struct btrfs_path *path,
3414 struct btrfs_key *cpu_key, u32 *data_size,
3415 u32 total_data, u32 total_size, int nr)
be0e5c09 3416{
5f39d397 3417 struct btrfs_item *item;
9c58309d 3418 int i;
7518a238 3419 u32 nritems;
be0e5c09 3420 unsigned int data_end;
e2fa7227 3421 struct btrfs_disk_key disk_key;
44871b1b
CM
3422 int ret;
3423 struct extent_buffer *leaf;
3424 int slot;
e2fa7227 3425
5f39d397 3426 leaf = path->nodes[0];
44871b1b 3427 slot = path->slots[0];
74123bd7 3428
5f39d397 3429 nritems = btrfs_header_nritems(leaf);
123abc88 3430 data_end = leaf_data_end(root, leaf);
eb60ceac 3431
f25956cc 3432 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3326d1b0 3433 btrfs_print_leaf(root, leaf);
d397712b 3434 printk(KERN_CRIT "not enough freespace need %u have %d\n",
9c58309d 3435 total_size, btrfs_leaf_free_space(root, leaf));
be0e5c09 3436 BUG();
d4dbff95 3437 }
5f39d397 3438
be0e5c09 3439 if (slot != nritems) {
5f39d397 3440 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
be0e5c09 3441
5f39d397
CM
3442 if (old_data < data_end) {
3443 btrfs_print_leaf(root, leaf);
d397712b 3444 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
5f39d397
CM
3445 slot, old_data, data_end);
3446 BUG_ON(1);
3447 }
be0e5c09
CM
3448 /*
3449 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3450 */
3451 /* first correct the data pointers */
0783fcfc 3452 for (i = slot; i < nritems; i++) {
5f39d397 3453 u32 ioff;
db94535d 3454
5f39d397
CM
3455 item = btrfs_item_nr(leaf, i);
3456 ioff = btrfs_item_offset(leaf, item);
9c58309d 3457 btrfs_set_item_offset(leaf, item, ioff - total_data);
0783fcfc 3458 }
be0e5c09 3459 /* shift the items */
9c58309d 3460 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
5f39d397 3461 btrfs_item_nr_offset(slot),
d6025579 3462 (nritems - slot) * sizeof(struct btrfs_item));
be0e5c09
CM
3463
3464 /* shift the data */
5f39d397 3465 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
9c58309d 3466 data_end - total_data, btrfs_leaf_data(leaf) +
d6025579 3467 data_end, old_data - data_end);
be0e5c09
CM
3468 data_end = old_data;
3469 }
5f39d397 3470
62e2749e 3471 /* setup the item for the new data */
9c58309d
CM
3472 for (i = 0; i < nr; i++) {
3473 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3474 btrfs_set_item_key(leaf, &disk_key, slot + i);
3475 item = btrfs_item_nr(leaf, slot + i);
3476 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3477 data_end -= data_size[i];
3478 btrfs_set_item_size(leaf, item, data_size[i]);
3479 }
44871b1b 3480
9c58309d 3481 btrfs_set_header_nritems(leaf, nritems + nr);
aa5d6bed
CM
3482
3483 ret = 0;
5a01a2e3
CM
3484 if (slot == 0) {
3485 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
e089f05c 3486 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
5a01a2e3 3487 }
b9473439
CM
3488 btrfs_unlock_up_safe(path, 1);
3489 btrfs_mark_buffer_dirty(leaf);
aa5d6bed 3490
5f39d397
CM
3491 if (btrfs_leaf_free_space(root, leaf) < 0) {
3492 btrfs_print_leaf(root, leaf);
be0e5c09 3493 BUG();
5f39d397 3494 }
44871b1b
CM
3495 return ret;
3496}
3497
3498/*
3499 * Given a key and some data, insert items into the tree.
3500 * This does all the path init required, making room in the tree if needed.
3501 */
3502int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3503 struct btrfs_root *root,
3504 struct btrfs_path *path,
3505 struct btrfs_key *cpu_key, u32 *data_size,
3506 int nr)
3507{
44871b1b
CM
3508 int ret = 0;
3509 int slot;
3510 int i;
3511 u32 total_size = 0;
3512 u32 total_data = 0;
3513
3514 for (i = 0; i < nr; i++)
3515 total_data += data_size[i];
3516
3517 total_size = total_data + (nr * sizeof(struct btrfs_item));
3518 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3519 if (ret == 0)
3520 return -EEXIST;
3521 if (ret < 0)
3522 goto out;
3523
44871b1b
CM
3524 slot = path->slots[0];
3525 BUG_ON(slot < 0);
3526
3527 ret = setup_items_for_insert(trans, root, path, cpu_key, data_size,
3528 total_data, total_size, nr);
3529
ed2ff2cb 3530out:
62e2749e
CM
3531 return ret;
3532}
3533
3534/*
3535 * Given a key and some data, insert an item into the tree.
3536 * This does all the path init required, making room in the tree if needed.
3537 */
e089f05c
CM
3538int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
3539 *root, struct btrfs_key *cpu_key, void *data, u32
3540 data_size)
62e2749e
CM
3541{
3542 int ret = 0;
2c90e5d6 3543 struct btrfs_path *path;
5f39d397
CM
3544 struct extent_buffer *leaf;
3545 unsigned long ptr;
62e2749e 3546
2c90e5d6 3547 path = btrfs_alloc_path();
db5b493a
TI
3548 if (!path)
3549 return -ENOMEM;
2c90e5d6 3550 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
62e2749e 3551 if (!ret) {
5f39d397
CM
3552 leaf = path->nodes[0];
3553 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3554 write_extent_buffer(leaf, data, ptr, data_size);
3555 btrfs_mark_buffer_dirty(leaf);
62e2749e 3556 }
2c90e5d6 3557 btrfs_free_path(path);
aa5d6bed 3558 return ret;
be0e5c09
CM
3559}
3560
74123bd7 3561/*
5de08d7d 3562 * delete the pointer from a given node.
74123bd7 3563 *
d352ac68
CM
3564 * the tree should have been previously balanced so the deletion does not
3565 * empty a node.
74123bd7 3566 */
e089f05c
CM
3567static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3568 struct btrfs_path *path, int level, int slot)
be0e5c09 3569{
5f39d397 3570 struct extent_buffer *parent = path->nodes[level];
7518a238 3571 u32 nritems;
aa5d6bed 3572 int ret = 0;
bb803951 3573 int wret;
be0e5c09 3574
5f39d397 3575 nritems = btrfs_header_nritems(parent);
d397712b 3576 if (slot != nritems - 1) {
5f39d397
CM
3577 memmove_extent_buffer(parent,
3578 btrfs_node_key_ptr_offset(slot),
3579 btrfs_node_key_ptr_offset(slot + 1),
d6025579
CM
3580 sizeof(struct btrfs_key_ptr) *
3581 (nritems - slot - 1));
bb803951 3582 }
7518a238 3583 nritems--;
5f39d397 3584 btrfs_set_header_nritems(parent, nritems);
7518a238 3585 if (nritems == 0 && parent == root->node) {
5f39d397 3586 BUG_ON(btrfs_header_level(root->node) != 1);
bb803951 3587 /* just turn the root into a leaf and break */
5f39d397 3588 btrfs_set_header_level(root->node, 0);
bb803951 3589 } else if (slot == 0) {
5f39d397
CM
3590 struct btrfs_disk_key disk_key;
3591
3592 btrfs_node_key(parent, &disk_key, 0);
3593 wret = fixup_low_keys(trans, root, path, &disk_key, level + 1);
0f70abe2
CM
3594 if (wret)
3595 ret = wret;
be0e5c09 3596 }
d6025579 3597 btrfs_mark_buffer_dirty(parent);
aa5d6bed 3598 return ret;
be0e5c09
CM
3599}
3600
323ac95b
CM
3601/*
3602 * a helper function to delete the leaf pointed to by path->slots[1] and
5d4f98a2 3603 * path->nodes[1].
323ac95b
CM
3604 *
3605 * This deletes the pointer in path->nodes[1] and frees the leaf
3606 * block extent. zero is returned if it all worked out, < 0 otherwise.
3607 *
3608 * The path must have already been setup for deleting the leaf, including
3609 * all the proper balancing. path->nodes[1] must be locked.
3610 */
5d4f98a2
YZ
3611static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
3612 struct btrfs_root *root,
3613 struct btrfs_path *path,
3614 struct extent_buffer *leaf)
323ac95b
CM
3615{
3616 int ret;
323ac95b 3617
5d4f98a2 3618 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
323ac95b
CM
3619 ret = del_ptr(trans, root, path, 1, path->slots[1]);
3620 if (ret)
3621 return ret;
3622
4d081c41
CM
3623 /*
3624 * btrfs_free_extent is expensive, we want to make sure we
3625 * aren't holding any locks when we call it
3626 */
3627 btrfs_unlock_up_safe(path, 0);
3628
f0486c68
YZ
3629 root_sub_used(root, leaf->len);
3630
3631 btrfs_free_tree_block(trans, root, leaf, 0, 1);
3632 return 0;
323ac95b 3633}
74123bd7
CM
3634/*
3635 * delete the item at the leaf level in path. If that empties
3636 * the leaf, remove it from the tree
3637 */
85e21bac
CM
3638int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3639 struct btrfs_path *path, int slot, int nr)
be0e5c09 3640{
5f39d397
CM
3641 struct extent_buffer *leaf;
3642 struct btrfs_item *item;
85e21bac
CM
3643 int last_off;
3644 int dsize = 0;
aa5d6bed
CM
3645 int ret = 0;
3646 int wret;
85e21bac 3647 int i;
7518a238 3648 u32 nritems;
be0e5c09 3649
5f39d397 3650 leaf = path->nodes[0];
85e21bac
CM
3651 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
3652
3653 for (i = 0; i < nr; i++)
3654 dsize += btrfs_item_size_nr(leaf, slot + i);
3655
5f39d397 3656 nritems = btrfs_header_nritems(leaf);
be0e5c09 3657
85e21bac 3658 if (slot + nr != nritems) {
123abc88 3659 int data_end = leaf_data_end(root, leaf);
5f39d397
CM
3660
3661 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
d6025579
CM
3662 data_end + dsize,
3663 btrfs_leaf_data(leaf) + data_end,
85e21bac 3664 last_off - data_end);
5f39d397 3665
85e21bac 3666 for (i = slot + nr; i < nritems; i++) {
5f39d397 3667 u32 ioff;
db94535d 3668
5f39d397
CM
3669 item = btrfs_item_nr(leaf, i);
3670 ioff = btrfs_item_offset(leaf, item);
3671 btrfs_set_item_offset(leaf, item, ioff + dsize);
0783fcfc 3672 }
db94535d 3673
5f39d397 3674 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
85e21bac 3675 btrfs_item_nr_offset(slot + nr),
d6025579 3676 sizeof(struct btrfs_item) *
85e21bac 3677 (nritems - slot - nr));
be0e5c09 3678 }
85e21bac
CM
3679 btrfs_set_header_nritems(leaf, nritems - nr);
3680 nritems -= nr;
5f39d397 3681
74123bd7 3682 /* delete the leaf if we've emptied it */
7518a238 3683 if (nritems == 0) {
5f39d397
CM
3684 if (leaf == root->node) {
3685 btrfs_set_header_level(leaf, 0);
9a8dd150 3686 } else {
f0486c68
YZ
3687 btrfs_set_path_blocking(path);
3688 clean_tree_block(trans, root, leaf);
5d4f98a2 3689 ret = btrfs_del_leaf(trans, root, path, leaf);
323ac95b 3690 BUG_ON(ret);
9a8dd150 3691 }
be0e5c09 3692 } else {
7518a238 3693 int used = leaf_space_used(leaf, 0, nritems);
aa5d6bed 3694 if (slot == 0) {
5f39d397
CM
3695 struct btrfs_disk_key disk_key;
3696
3697 btrfs_item_key(leaf, &disk_key, 0);
e089f05c 3698 wret = fixup_low_keys(trans, root, path,
5f39d397 3699 &disk_key, 1);
aa5d6bed
CM
3700 if (wret)
3701 ret = wret;
3702 }
aa5d6bed 3703
74123bd7 3704 /* delete the leaf if it is mostly empty */
d717aa1d 3705 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
be0e5c09
CM
3706 /* push_leaf_left fixes the path.
3707 * make sure the path still points to our leaf
3708 * for possible call to del_ptr below
3709 */
4920c9ac 3710 slot = path->slots[1];
5f39d397
CM
3711 extent_buffer_get(leaf);
3712
b9473439 3713 btrfs_set_path_blocking(path);
99d8f83c
CM
3714 wret = push_leaf_left(trans, root, path, 1, 1,
3715 1, (u32)-1);
54aa1f4d 3716 if (wret < 0 && wret != -ENOSPC)
aa5d6bed 3717 ret = wret;
5f39d397
CM
3718
3719 if (path->nodes[0] == leaf &&
3720 btrfs_header_nritems(leaf)) {
99d8f83c
CM
3721 wret = push_leaf_right(trans, root, path, 1,
3722 1, 1, 0);
54aa1f4d 3723 if (wret < 0 && wret != -ENOSPC)
aa5d6bed
CM
3724 ret = wret;
3725 }
5f39d397
CM
3726
3727 if (btrfs_header_nritems(leaf) == 0) {
323ac95b 3728 path->slots[1] = slot;
5d4f98a2 3729 ret = btrfs_del_leaf(trans, root, path, leaf);
323ac95b 3730 BUG_ON(ret);
5f39d397 3731 free_extent_buffer(leaf);
5de08d7d 3732 } else {
925baedd
CM
3733 /* if we're still in the path, make sure
3734 * we're dirty. Otherwise, one of the
3735 * push_leaf functions must have already
3736 * dirtied this buffer
3737 */
3738 if (path->nodes[0] == leaf)
3739 btrfs_mark_buffer_dirty(leaf);
5f39d397 3740 free_extent_buffer(leaf);
be0e5c09 3741 }
d5719762 3742 } else {
5f39d397 3743 btrfs_mark_buffer_dirty(leaf);
be0e5c09
CM
3744 }
3745 }
aa5d6bed 3746 return ret;
be0e5c09
CM
3747}
3748
7bb86316 3749/*
925baedd 3750 * search the tree again to find a leaf with lesser keys
7bb86316
CM
3751 * returns 0 if it found something or 1 if there are no lesser leaves.
3752 * returns < 0 on io errors.
d352ac68
CM
3753 *
3754 * This may release the path, and so you may lose any locks held at the
3755 * time you call it.
7bb86316
CM
3756 */
3757int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
3758{
925baedd
CM
3759 struct btrfs_key key;
3760 struct btrfs_disk_key found_key;
3761 int ret;
7bb86316 3762
925baedd 3763 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
7bb86316 3764
925baedd
CM
3765 if (key.offset > 0)
3766 key.offset--;
3767 else if (key.type > 0)
3768 key.type--;
3769 else if (key.objectid > 0)
3770 key.objectid--;
3771 else
3772 return 1;
7bb86316 3773
b3b4aa74 3774 btrfs_release_path(path);
925baedd
CM
3775 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3776 if (ret < 0)
3777 return ret;
3778 btrfs_item_key(path->nodes[0], &found_key, 0);
3779 ret = comp_keys(&found_key, &key);
3780 if (ret < 0)
3781 return 0;
3782 return 1;
7bb86316
CM
3783}
3784
3f157a2f
CM
3785/*
3786 * A helper function to walk down the tree starting at min_key, and looking
3787 * for nodes or leaves that are either in cache or have a minimum
d352ac68 3788 * transaction id. This is used by the btree defrag code, and tree logging
3f157a2f
CM
3789 *
3790 * This does not cow, but it does stuff the starting key it finds back
3791 * into min_key, so you can call btrfs_search_slot with cow=1 on the
3792 * key and get a writable path.
3793 *
3794 * This does lock as it descends, and path->keep_locks should be set
3795 * to 1 by the caller.
3796 *
3797 * This honors path->lowest_level to prevent descent past a given level
3798 * of the tree.
3799 *
d352ac68
CM
3800 * min_trans indicates the oldest transaction that you are interested
3801 * in walking through. Any nodes or leaves older than min_trans are
3802 * skipped over (without reading them).
3803 *
3f157a2f
CM
3804 * returns zero if something useful was found, < 0 on error and 1 if there
3805 * was nothing in the tree that matched the search criteria.
3806 */
3807int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
e02119d5 3808 struct btrfs_key *max_key,
3f157a2f
CM
3809 struct btrfs_path *path, int cache_only,
3810 u64 min_trans)
3811{
3812 struct extent_buffer *cur;
3813 struct btrfs_key found_key;
3814 int slot;
9652480b 3815 int sret;
3f157a2f
CM
3816 u32 nritems;
3817 int level;
3818 int ret = 1;
3819
934d375b 3820 WARN_ON(!path->keep_locks);
3f157a2f
CM
3821again:
3822 cur = btrfs_lock_root_node(root);
3823 level = btrfs_header_level(cur);
e02119d5 3824 WARN_ON(path->nodes[level]);
3f157a2f
CM
3825 path->nodes[level] = cur;
3826 path->locks[level] = 1;
3827
3828 if (btrfs_header_generation(cur) < min_trans) {
3829 ret = 1;
3830 goto out;
3831 }
d397712b 3832 while (1) {
3f157a2f
CM
3833 nritems = btrfs_header_nritems(cur);
3834 level = btrfs_header_level(cur);
9652480b 3835 sret = bin_search(cur, min_key, level, &slot);
3f157a2f 3836
323ac95b
CM
3837 /* at the lowest level, we're done, setup the path and exit */
3838 if (level == path->lowest_level) {
e02119d5
CM
3839 if (slot >= nritems)
3840 goto find_next_key;
3f157a2f
CM
3841 ret = 0;
3842 path->slots[level] = slot;
3843 btrfs_item_key_to_cpu(cur, &found_key, slot);
3844 goto out;
3845 }
9652480b
Y
3846 if (sret && slot > 0)
3847 slot--;
3f157a2f
CM
3848 /*
3849 * check this node pointer against the cache_only and
3850 * min_trans parameters. If it isn't in cache or is too
3851 * old, skip to the next one.
3852 */
d397712b 3853 while (slot < nritems) {
3f157a2f
CM
3854 u64 blockptr;
3855 u64 gen;
3856 struct extent_buffer *tmp;
e02119d5
CM
3857 struct btrfs_disk_key disk_key;
3858
3f157a2f
CM
3859 blockptr = btrfs_node_blockptr(cur, slot);
3860 gen = btrfs_node_ptr_generation(cur, slot);
3861 if (gen < min_trans) {
3862 slot++;
3863 continue;
3864 }
3865 if (!cache_only)
3866 break;
3867
e02119d5
CM
3868 if (max_key) {
3869 btrfs_node_key(cur, &disk_key, slot);
3870 if (comp_keys(&disk_key, max_key) >= 0) {
3871 ret = 1;
3872 goto out;
3873 }
3874 }
3875
3f157a2f
CM
3876 tmp = btrfs_find_tree_block(root, blockptr,
3877 btrfs_level_size(root, level - 1));
3878
3879 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
3880 free_extent_buffer(tmp);
3881 break;
3882 }
3883 if (tmp)
3884 free_extent_buffer(tmp);
3885 slot++;
3886 }
e02119d5 3887find_next_key:
3f157a2f
CM
3888 /*
3889 * we didn't find a candidate key in this node, walk forward
3890 * and find another one
3891 */
3892 if (slot >= nritems) {
e02119d5 3893 path->slots[level] = slot;
b4ce94de 3894 btrfs_set_path_blocking(path);
e02119d5 3895 sret = btrfs_find_next_key(root, path, min_key, level,
3f157a2f 3896 cache_only, min_trans);
e02119d5 3897 if (sret == 0) {
b3b4aa74 3898 btrfs_release_path(path);
3f157a2f
CM
3899 goto again;
3900 } else {
3901 goto out;
3902 }
3903 }
3904 /* save our key for returning back */
3905 btrfs_node_key_to_cpu(cur, &found_key, slot);
3906 path->slots[level] = slot;
3907 if (level == path->lowest_level) {
3908 ret = 0;
3909 unlock_up(path, level, 1);
3910 goto out;
3911 }
b4ce94de 3912 btrfs_set_path_blocking(path);
3f157a2f 3913 cur = read_node_slot(root, cur, slot);
97d9a8a4 3914 BUG_ON(!cur);
3f157a2f
CM
3915
3916 btrfs_tree_lock(cur);
b4ce94de 3917
3f157a2f
CM
3918 path->locks[level - 1] = 1;
3919 path->nodes[level - 1] = cur;
3920 unlock_up(path, level, 1);
4008c04a 3921 btrfs_clear_path_blocking(path, NULL);
3f157a2f
CM
3922 }
3923out:
3924 if (ret == 0)
3925 memcpy(min_key, &found_key, sizeof(found_key));
b4ce94de 3926 btrfs_set_path_blocking(path);
3f157a2f
CM
3927 return ret;
3928}
3929
3930/*
3931 * this is similar to btrfs_next_leaf, but does not try to preserve
3932 * and fixup the path. It looks for and returns the next key in the
3933 * tree based on the current path and the cache_only and min_trans
3934 * parameters.
3935 *
3936 * 0 is returned if another key is found, < 0 if there are any errors
3937 * and 1 is returned if there are no higher keys in the tree
3938 *
3939 * path->keep_locks should be set to 1 on the search made before
3940 * calling this function.
3941 */
e7a84565 3942int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
33c66f43 3943 struct btrfs_key *key, int level,
3f157a2f 3944 int cache_only, u64 min_trans)
e7a84565 3945{
e7a84565
CM
3946 int slot;
3947 struct extent_buffer *c;
3948
934d375b 3949 WARN_ON(!path->keep_locks);
d397712b 3950 while (level < BTRFS_MAX_LEVEL) {
e7a84565
CM
3951 if (!path->nodes[level])
3952 return 1;
3953
3954 slot = path->slots[level] + 1;
3955 c = path->nodes[level];
3f157a2f 3956next:
e7a84565 3957 if (slot >= btrfs_header_nritems(c)) {
33c66f43
YZ
3958 int ret;
3959 int orig_lowest;
3960 struct btrfs_key cur_key;
3961 if (level + 1 >= BTRFS_MAX_LEVEL ||
3962 !path->nodes[level + 1])
e7a84565 3963 return 1;
33c66f43
YZ
3964
3965 if (path->locks[level + 1]) {
3966 level++;
3967 continue;
3968 }
3969
3970 slot = btrfs_header_nritems(c) - 1;
3971 if (level == 0)
3972 btrfs_item_key_to_cpu(c, &cur_key, slot);
3973 else
3974 btrfs_node_key_to_cpu(c, &cur_key, slot);
3975
3976 orig_lowest = path->lowest_level;
b3b4aa74 3977 btrfs_release_path(path);
33c66f43
YZ
3978 path->lowest_level = level;
3979 ret = btrfs_search_slot(NULL, root, &cur_key, path,
3980 0, 0);
3981 path->lowest_level = orig_lowest;
3982 if (ret < 0)
3983 return ret;
3984
3985 c = path->nodes[level];
3986 slot = path->slots[level];
3987 if (ret == 0)
3988 slot++;
3989 goto next;
e7a84565 3990 }
33c66f43 3991
e7a84565
CM
3992 if (level == 0)
3993 btrfs_item_key_to_cpu(c, key, slot);
3f157a2f
CM
3994 else {
3995 u64 blockptr = btrfs_node_blockptr(c, slot);
3996 u64 gen = btrfs_node_ptr_generation(c, slot);
3997
3998 if (cache_only) {
3999 struct extent_buffer *cur;
4000 cur = btrfs_find_tree_block(root, blockptr,
4001 btrfs_level_size(root, level - 1));
4002 if (!cur || !btrfs_buffer_uptodate(cur, gen)) {
4003 slot++;
4004 if (cur)
4005 free_extent_buffer(cur);
4006 goto next;
4007 }
4008 free_extent_buffer(cur);
4009 }
4010 if (gen < min_trans) {
4011 slot++;
4012 goto next;
4013 }
e7a84565 4014 btrfs_node_key_to_cpu(c, key, slot);
3f157a2f 4015 }
e7a84565
CM
4016 return 0;
4017 }
4018 return 1;
4019}
4020
97571fd0 4021/*
925baedd 4022 * search the tree again to find a leaf with greater keys
0f70abe2
CM
4023 * returns 0 if it found something or 1 if there are no greater leaves.
4024 * returns < 0 on io errors.
97571fd0 4025 */
234b63a0 4026int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
d97e63b6
CM
4027{
4028 int slot;
8e73f275 4029 int level;
5f39d397 4030 struct extent_buffer *c;
8e73f275 4031 struct extent_buffer *next;
925baedd
CM
4032 struct btrfs_key key;
4033 u32 nritems;
4034 int ret;
8e73f275
CM
4035 int old_spinning = path->leave_spinning;
4036 int force_blocking = 0;
925baedd
CM
4037
4038 nritems = btrfs_header_nritems(path->nodes[0]);
d397712b 4039 if (nritems == 0)
925baedd 4040 return 1;
925baedd 4041
8e73f275
CM
4042 /*
4043 * we take the blocks in an order that upsets lockdep. Using
4044 * blocking mode is the only way around it.
4045 */
4046#ifdef CONFIG_DEBUG_LOCK_ALLOC
4047 force_blocking = 1;
4048#endif
925baedd 4049
8e73f275
CM
4050 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4051again:
4052 level = 1;
4053 next = NULL;
b3b4aa74 4054 btrfs_release_path(path);
8e73f275 4055
a2135011 4056 path->keep_locks = 1;
8e73f275
CM
4057
4058 if (!force_blocking)
4059 path->leave_spinning = 1;
4060
925baedd
CM
4061 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4062 path->keep_locks = 0;
4063
4064 if (ret < 0)
4065 return ret;
4066
a2135011 4067 nritems = btrfs_header_nritems(path->nodes[0]);
168fd7d2
CM
4068 /*
4069 * by releasing the path above we dropped all our locks. A balance
4070 * could have added more items next to the key that used to be
4071 * at the very end of the block. So, check again here and
4072 * advance the path if there are now more items available.
4073 */
a2135011 4074 if (nritems > 0 && path->slots[0] < nritems - 1) {
e457afec
YZ
4075 if (ret == 0)
4076 path->slots[0]++;
8e73f275 4077 ret = 0;
925baedd
CM
4078 goto done;
4079 }
d97e63b6 4080
d397712b 4081 while (level < BTRFS_MAX_LEVEL) {
8e73f275
CM
4082 if (!path->nodes[level]) {
4083 ret = 1;
4084 goto done;
4085 }
5f39d397 4086
d97e63b6
CM
4087 slot = path->slots[level] + 1;
4088 c = path->nodes[level];
5f39d397 4089 if (slot >= btrfs_header_nritems(c)) {
d97e63b6 4090 level++;
8e73f275
CM
4091 if (level == BTRFS_MAX_LEVEL) {
4092 ret = 1;
4093 goto done;
4094 }
d97e63b6
CM
4095 continue;
4096 }
5f39d397 4097
925baedd
CM
4098 if (next) {
4099 btrfs_tree_unlock(next);
5f39d397 4100 free_extent_buffer(next);
925baedd 4101 }
5f39d397 4102
8e73f275
CM
4103 next = c;
4104 ret = read_block_for_search(NULL, root, path, &next, level,
4105 slot, &key);
4106 if (ret == -EAGAIN)
4107 goto again;
5f39d397 4108
76a05b35 4109 if (ret < 0) {
b3b4aa74 4110 btrfs_release_path(path);
76a05b35
CM
4111 goto done;
4112 }
4113
5cd57b2c 4114 if (!path->skip_locking) {
8e73f275
CM
4115 ret = btrfs_try_spin_lock(next);
4116 if (!ret) {
4117 btrfs_set_path_blocking(path);
4118 btrfs_tree_lock(next);
4119 if (!force_blocking)
4120 btrfs_clear_path_blocking(path, next);
4121 }
4122 if (force_blocking)
4123 btrfs_set_lock_blocking(next);
5cd57b2c 4124 }
d97e63b6
CM
4125 break;
4126 }
4127 path->slots[level] = slot;
d397712b 4128 while (1) {
d97e63b6
CM
4129 level--;
4130 c = path->nodes[level];
925baedd
CM
4131 if (path->locks[level])
4132 btrfs_tree_unlock(c);
8e73f275 4133
5f39d397 4134 free_extent_buffer(c);
d97e63b6
CM
4135 path->nodes[level] = next;
4136 path->slots[level] = 0;
a74a4b97
CM
4137 if (!path->skip_locking)
4138 path->locks[level] = 1;
8e73f275 4139
d97e63b6
CM
4140 if (!level)
4141 break;
b4ce94de 4142
8e73f275
CM
4143 ret = read_block_for_search(NULL, root, path, &next, level,
4144 0, &key);
4145 if (ret == -EAGAIN)
4146 goto again;
4147
76a05b35 4148 if (ret < 0) {
b3b4aa74 4149 btrfs_release_path(path);
76a05b35
CM
4150 goto done;
4151 }
4152
5cd57b2c 4153 if (!path->skip_locking) {
b9447ef8 4154 btrfs_assert_tree_locked(path->nodes[level]);
8e73f275
CM
4155 ret = btrfs_try_spin_lock(next);
4156 if (!ret) {
4157 btrfs_set_path_blocking(path);
4158 btrfs_tree_lock(next);
4159 if (!force_blocking)
4160 btrfs_clear_path_blocking(path, next);
4161 }
4162 if (force_blocking)
4163 btrfs_set_lock_blocking(next);
5cd57b2c 4164 }
d97e63b6 4165 }
8e73f275 4166 ret = 0;
925baedd
CM
4167done:
4168 unlock_up(path, 0, 1);
8e73f275
CM
4169 path->leave_spinning = old_spinning;
4170 if (!old_spinning)
4171 btrfs_set_path_blocking(path);
4172
4173 return ret;
d97e63b6 4174}
0b86a832 4175
3f157a2f
CM
4176/*
4177 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4178 * searching until it gets past min_objectid or finds an item of 'type'
4179 *
4180 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4181 */
0b86a832
CM
4182int btrfs_previous_item(struct btrfs_root *root,
4183 struct btrfs_path *path, u64 min_objectid,
4184 int type)
4185{
4186 struct btrfs_key found_key;
4187 struct extent_buffer *leaf;
e02119d5 4188 u32 nritems;
0b86a832
CM
4189 int ret;
4190
d397712b 4191 while (1) {
0b86a832 4192 if (path->slots[0] == 0) {
b4ce94de 4193 btrfs_set_path_blocking(path);
0b86a832
CM
4194 ret = btrfs_prev_leaf(root, path);
4195 if (ret != 0)
4196 return ret;
4197 } else {
4198 path->slots[0]--;
4199 }
4200 leaf = path->nodes[0];
e02119d5
CM
4201 nritems = btrfs_header_nritems(leaf);
4202 if (nritems == 0)
4203 return 1;
4204 if (path->slots[0] == nritems)
4205 path->slots[0]--;
4206
0b86a832 4207 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
e02119d5
CM
4208 if (found_key.objectid < min_objectid)
4209 break;
0a4eefbb
YZ
4210 if (found_key.type == type)
4211 return 0;
e02119d5
CM
4212 if (found_key.objectid == min_objectid &&
4213 found_key.type < type)
4214 break;
0b86a832
CM
4215 }
4216 return 1;
4217}