Merge tag 'cxl-fixes-6.10-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl
[linux-2.6-block.git] / fs / btrfs / ctree.h
CommitLineData
9888c340 1/* SPDX-License-Identifier: GPL-2.0 */
6cbd5570
CM
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
6cbd5570
CM
4 */
5
9888c340
DS
6#ifndef BTRFS_CTREE_H
7#define BTRFS_CTREE_H
eb60ceac 8
3b16a4e3 9#include <linux/pagemap.h>
5693a128
DS
10#include <linux/spinlock.h>
11#include <linux/rbtree.h>
12#include <linux/mutex.h>
13#include <linux/wait.h>
14#include <linux/list.h>
15#include <linux/atomic.h>
16#include <linux/xarray.h>
17#include <linux/refcount.h>
18#include <uapi/linux/btrfs_tree.h>
2992df73 19#include "locking.h"
a56159d4 20#include "fs.h"
79d25df0 21#include "accessors.h"
5693a128 22#include "extent-io-tree.h"
e20d96d6 23
5693a128
DS
24struct extent_buffer;
25struct btrfs_block_rsv;
e089f05c 26struct btrfs_trans_handle;
32da5386 27struct btrfs_block_group;
e089f05c 28
ace75066
FM
29/* Read ahead values for struct btrfs_path.reada */
30enum {
31 READA_NONE,
32 READA_BACK,
33 READA_FORWARD,
34 /*
35 * Similar to READA_FORWARD but unlike it:
36 *
37 * 1) It will trigger readahead even for leaves that are not close to
38 * each other on disk;
39 * 2) It also triggers readahead for nodes;
40 * 3) During a search, even when a node or leaf is already in memory, it
41 * will still trigger readahead for other nodes and leaves that follow
42 * it.
43 *
44 * This is meant to be used only when we know we are iterating over the
45 * entire tree or a very large part of it.
46 */
47 READA_FORWARD_ALWAYS,
48};
49
fec577fb 50/*
234b63a0
CM
51 * btrfs_paths remember the path taken from the root down to the leaf.
52 * level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point
fec577fb
CM
53 * to any other levels that are present.
54 *
55 * The slots array records the index of the item or block pointer
56 * used while walking the tree.
57 */
234b63a0 58struct btrfs_path {
5f39d397 59 struct extent_buffer *nodes[BTRFS_MAX_LEVEL];
234b63a0 60 int slots[BTRFS_MAX_LEVEL];
925baedd 61 /* if there is real range locking, this locks field will change */
4fb72bf2 62 u8 locks[BTRFS_MAX_LEVEL];
dccabfad 63 u8 reada;
925baedd 64 /* keep some upper locks as we walk down */
7853f15b 65 u8 lowest_level;
459931ec
CM
66
67 /*
68 * set by btrfs_split_item, tells search_slot to keep all locks
69 * and to force calls to keep space in the nodes
70 */
b9473439
CM
71 unsigned int search_for_split:1;
72 unsigned int keep_locks:1;
73 unsigned int skip_locking:1;
5d4f98a2 74 unsigned int search_commit_root:1;
3f8a18cc 75 unsigned int need_commit_sem:1;
5f5bc6b1 76 unsigned int skip_release_on_error:1;
9a664971 77 /*
78 * Indicate that new item (btrfs_search_slot) is extending already
79 * existing item and ins_len contains only the data size and not item
80 * header (ie. sizeof(struct btrfs_item) is not included).
81 */
82 unsigned int search_for_extension:1;
857bc13f
JB
83 /* Stop search if any locks need to be taken (for read) */
84 unsigned int nowait:1;
eb60ceac 85};
d9d88fde 86
27cdeb70
MX
87/*
88 * The state of btrfs root
89 */
61fa90c1
DS
90enum {
91 /*
92 * btrfs_record_root_in_trans is a multi-step process, and it can race
93 * with the balancing code. But the race is very small, and only the
94 * first time the root is added to each transaction. So IN_TRANS_SETUP
95 * is used to tell us when more checks are required
96 */
97 BTRFS_ROOT_IN_TRANS_SETUP,
92a7cc42
QW
98
99 /*
100 * Set if tree blocks of this root can be shared by other roots.
101 * Only subvolume trees and their reloc trees have this bit set.
102 * Conflicts with TRACK_DIRTY bit.
103 *
104 * This affects two things:
105 *
106 * - How balance works
107 * For shareable roots, we need to use reloc tree and do path
108 * replacement for balance, and need various pre/post hooks for
109 * snapshot creation to handle them.
110 *
111 * While for non-shareable trees, we just simply do a tree search
112 * with COW.
113 *
114 * - How dirty roots are tracked
115 * For shareable roots, btrfs_record_root_in_trans() is needed to
116 * track them, while non-subvolume roots have TRACK_DIRTY bit, they
117 * don't need to set this manually.
118 */
119 BTRFS_ROOT_SHAREABLE,
61fa90c1 120 BTRFS_ROOT_TRACK_DIRTY,
fc7cbcd4 121 BTRFS_ROOT_IN_RADIX,
61fa90c1
DS
122 BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
123 BTRFS_ROOT_DEFRAG_RUNNING,
124 BTRFS_ROOT_FORCE_COW,
125 BTRFS_ROOT_MULTI_LOG_TASKS,
126 BTRFS_ROOT_DIRTY,
83354f07 127 BTRFS_ROOT_DELETING,
d2311e69
QW
128
129 /*
130 * Reloc tree is orphan, only kept here for qgroup delayed subtree scan
131 *
132 * Set for the subvolume tree owning the reloc tree.
133 */
134 BTRFS_ROOT_DEAD_RELOC_TREE,
78c52d9e
JB
135 /* Mark dead root stored on device whose cleanup needs to be resumed */
136 BTRFS_ROOT_DEAD_TREE,
47876f7c 137 /* The root has a log tree. Used for subvolume roots and the tree root. */
e7a79811 138 BTRFS_ROOT_HAS_LOG_TREE,
c53e9653
QW
139 /* Qgroup flushing is in progress */
140 BTRFS_ROOT_QGROUP_FLUSHING,
54230013
JB
141 /* We started the orphan cleanup for this root. */
142 BTRFS_ROOT_ORPHAN_CLEANUP,
b4be6aef
JB
143 /* This root has a drop operation that was started previously. */
144 BTRFS_ROOT_UNFINISHED_DROP,
b40130b2
JB
145 /* This reloc root needs to have its buffers lockdep class reset. */
146 BTRFS_ROOT_RESET_LOCKDEP_CLASS,
61fa90c1 147};
27cdeb70 148
370a11b8
QW
149/*
150 * Record swapped tree blocks of a subvolume tree for delayed subtree trace
151 * code. For detail check comment in fs/btrfs/qgroup.c.
152 */
153struct btrfs_qgroup_swapped_blocks {
154 spinlock_t lock;
155 /* RM_EMPTY_ROOT() of above blocks[] */
156 bool swapped;
157 struct rb_root blocks[BTRFS_MAX_LEVEL];
158};
159
9f5fae2f
CM
160/*
161 * in ram representation of the tree. extent_root is used for all allocations
f2458e1d 162 * and for the extent tree extent_root root.
9f5fae2f
CM
163 */
164struct btrfs_root {
abed4aaa
JB
165 struct rb_node rb_node;
166
5f39d397 167 struct extent_buffer *node;
925baedd 168
5f39d397 169 struct extent_buffer *commit_root;
e02119d5 170 struct btrfs_root *log_root;
1a40e23b 171 struct btrfs_root *reloc_root;
31153d81 172
27cdeb70 173 unsigned long state;
62e2749e
CM
174 struct btrfs_root_item root_item;
175 struct btrfs_key root_key;
9f5fae2f 176 struct btrfs_fs_info *fs_info;
d0c803c4
CM
177 struct extent_io_tree dirty_log_pages;
178
a2135011 179 struct mutex objectid_mutex;
7237f183 180
f0486c68
YZ
181 spinlock_t accounting_lock;
182 struct btrfs_block_rsv *block_rsv;
183
e02119d5 184 struct mutex log_mutex;
7237f183
YZ
185 wait_queue_head_t log_writer_wait;
186 wait_queue_head_t log_commit_wait[2];
8b050d35 187 struct list_head log_ctxs[2];
a93e0168 188 /* Used only for log trees of subvolumes, not for the log root tree */
7237f183
YZ
189 atomic_t log_writers;
190 atomic_t log_commit[2];
28a95795 191 /* Used only for log trees of subvolumes, not for the log root tree */
2ecb7923 192 atomic_t log_batch;
6008859b
FM
193 /*
194 * Protected by the 'log_mutex' lock but can be read without holding
195 * that lock to avoid unnecessary lock contention, in which case it
196 * should be read using btrfs_get_root_log_transid() except if it's a
197 * log tree in which case it can be directly accessed. Updates to this
198 * field should always use btrfs_set_root_log_transid(), except for log
199 * trees where the field can be updated directly.
200 */
bb14a59b 201 int log_transid;
d1433deb
MX
202 /* No matter the commit succeeds or not*/
203 int log_transid_committed;
f9850787
FM
204 /*
205 * Just be updated when the commit succeeds. Use
206 * btrfs_get_root_last_log_commit() and btrfs_set_root_last_log_commit()
207 * to access this field.
208 */
bb14a59b 209 int last_log_commit;
ff782e0a 210 pid_t log_start_pid;
ea8c2819 211
0f7d52f4 212 u64 last_trans;
5f39d397 213
6b8fad57 214 u64 free_objectid;
7585717f 215
6702ed49 216 struct btrfs_key defrag_progress;
0ef3e66b 217 struct btrfs_key defrag_max;
0b86a832 218
92a7cc42 219 /* The dirty list is only used by non-shareable roots */
0b86a832 220 struct list_head dirty_list;
7b128766 221
5d4f98a2
YZ
222 struct list_head root_list;
223
5d4f98a2
YZ
224 spinlock_t inode_lock;
225 /* red-black tree that keeps track of in-memory inodes */
226 struct rb_root inode_tree;
227
16cdcec7 228 /*
6140ba8a
DS
229 * Xarray that keeps track of delayed nodes of every inode, protected
230 * by @inode_lock.
16cdcec7 231 */
6140ba8a 232 struct xarray delayed_nodes;
3394e160
CM
233 /*
234 * right now this just gets used so that a root has its own devid
235 * for stat. It may be used for more later
236 */
0ee5dc67 237 dev_t anon_dev;
f1ebcc74 238
5f3ab90a 239 spinlock_t root_item_lock;
0700cea7 240 refcount_t refs;
eb73c1b7 241
573bfb72 242 struct mutex delalloc_mutex;
eb73c1b7
MX
243 spinlock_t delalloc_lock;
244 /*
245 * all of the inodes that have delalloc bytes. It is possible for
246 * this list to be empty even when there is still dirty data=ordered
247 * extents waiting to finish IO.
248 */
249 struct list_head delalloc_inodes;
250 struct list_head delalloc_root;
251 u64 nr_delalloc_inodes;
31f3d255
MX
252
253 struct mutex ordered_extent_mutex;
199c2a9c
MX
254 /*
255 * this is used by the balancing code to wait for all the pending
256 * ordered extents
257 */
258 spinlock_t ordered_extent_lock;
259
260 /*
261 * all of the data=ordered extents pending writeback
262 * these can span multiple transactions and basically include
263 * every dirty data page that isn't from nodatacow
264 */
265 struct list_head ordered_extents;
266 struct list_head ordered_root;
267 u64 nr_ordered_extents;
2c686537 268
d2311e69
QW
269 /*
270 * Not empty if this subvolume root has gone through tree block swap
271 * (relocation)
272 *
273 * Will be used by reloc_control::dirty_subvol_roots.
274 */
275 struct list_head reloc_dirty_list;
276
2c686537
DS
277 /*
278 * Number of currently running SEND ioctls to prevent
279 * manipulation with the read-only status via SUBVOL_SETFLAGS
280 */
281 int send_in_progress;
62d54f3a
FM
282 /*
283 * Number of currently running deduplication operations that have a
284 * destination inode belonging to this root. Protected by the lock
285 * root_item_lock.
286 */
287 int dedupe_in_progress;
dcc3eb96
NB
288 /* For exclusion of snapshot creation and nocow writes */
289 struct btrfs_drew_lock snapshot_lock;
290
8ecebf4d 291 atomic_t snapshot_force_cow;
8287475a
QW
292
293 /* For qgroup metadata reserved space */
294 spinlock_t qgroup_meta_rsv_lock;
295 u64 qgroup_meta_rsv_pertrans;
296 u64 qgroup_meta_rsv_prealloc;
c53e9653 297 wait_queue_head_t qgroup_flush_wait;
57ec5fb4 298
eede2bf3
OS
299 /* Number of active swapfiles */
300 atomic_t nr_swapfiles;
301
370a11b8
QW
302 /* Record pairs of swapped blocks for qgroup */
303 struct btrfs_qgroup_swapped_blocks swapped_blocks;
304
e289f03e
FM
305 /* Used only by log trees, when logging csum items */
306 struct extent_io_tree log_csum_range;
307
2672a051
BB
308 /* Used in simple quotas, track root during relocation. */
309 u64 relocation_src_root;
310
57ec5fb4
DS
311#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
312 u64 alloc_bytenr;
313#endif
bd647ce3
JB
314
315#ifdef CONFIG_BTRFS_DEBUG
316 struct list_head leak_list;
317#endif
62e2749e 318};
118c701e 319
1fe5ebc4
JB
320static inline bool btrfs_root_readonly(const struct btrfs_root *root)
321{
322 /* Byte-swap the constant at compile time, root_item::flags is LE */
323 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0;
324}
325
326static inline bool btrfs_root_dead(const struct btrfs_root *root)
327{
328 /* Byte-swap the constant at compile time, root_item::flags is LE */
329 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0;
330}
331
332static inline u64 btrfs_root_id(const struct btrfs_root *root)
333{
334 return root->root_key.objectid;
335}
336
6008859b
FM
337static inline int btrfs_get_root_log_transid(const struct btrfs_root *root)
338{
339 return READ_ONCE(root->log_transid);
340}
341
342static inline void btrfs_set_root_log_transid(struct btrfs_root *root, int log_transid)
343{
344 WRITE_ONCE(root->log_transid, log_transid);
345}
346
f9850787
FM
347static inline int btrfs_get_root_last_log_commit(const struct btrfs_root *root)
348{
349 return READ_ONCE(root->last_log_commit);
350}
351
352static inline void btrfs_set_root_last_log_commit(struct btrfs_root *root, int commit_id)
353{
354 WRITE_ONCE(root->last_log_commit, commit_id);
355}
356
bf385648
FM
357/*
358 * Structure that conveys information about an extent that is going to replace
359 * all the extents in a file range.
360 */
361struct btrfs_replace_extent_info {
690a5dbf
FM
362 u64 disk_offset;
363 u64 disk_len;
364 u64 data_offset;
365 u64 data_len;
366 u64 file_offset;
fb870f6c 367 /* Pointer to a file extent item of type regular or prealloc. */
690a5dbf 368 char *extent_buf;
8fccebfa
FM
369 /*
370 * Set to true when attempting to replace a file range with a new extent
371 * described by this structure, set to false when attempting to clone an
372 * existing extent into a file range.
373 */
374 bool is_new_extent;
983d8209
FM
375 /* Indicate if we should update the inode's mtime and ctime. */
376 bool update_times;
8fccebfa
FM
377 /* Meaningful only if is_new_extent is true. */
378 int qgroup_reserved;
379 /*
380 * Meaningful only if is_new_extent is true.
381 * Used to track how many extent items we have already inserted in a
382 * subvolume tree that refer to the extent described by this structure,
383 * so that we know when to create a new delayed ref or update an existing
384 * one.
385 */
386 int insertions;
690a5dbf
FM
387};
388
5893dfb9
FM
389/* Arguments for btrfs_drop_extents() */
390struct btrfs_drop_extents_args {
391 /* Input parameters */
392
393 /*
394 * If NULL, btrfs_drop_extents() will allocate and free its own path.
395 * If 'replace_extent' is true, this must not be NULL. Also the path
396 * is always released except if 'replace_extent' is true and
397 * btrfs_drop_extents() sets 'extent_inserted' to true, in which case
398 * the path is kept locked.
399 */
400 struct btrfs_path *path;
401 /* Start offset of the range to drop extents from */
402 u64 start;
403 /* End (exclusive, last byte + 1) of the range to drop extents from */
404 u64 end;
405 /* If true drop all the extent maps in the range */
406 bool drop_cache;
407 /*
408 * If true it means we want to insert a new extent after dropping all
409 * the extents in the range. If this is true, the 'extent_item_size'
410 * parameter must be set as well and the 'extent_inserted' field will
411 * be set to true by btrfs_drop_extents() if it could insert the new
412 * extent.
413 * Note: when this is set to true the path must not be NULL.
414 */
415 bool replace_extent;
416 /*
417 * Used if 'replace_extent' is true. Size of the file extent item to
418 * insert after dropping all existing extents in the range
419 */
420 u32 extent_item_size;
421
422 /* Output parameters */
423
424 /*
425 * Set to the minimum between the input parameter 'end' and the end
426 * (exclusive, last byte + 1) of the last dropped extent. This is always
427 * set even if btrfs_drop_extents() returns an error.
428 */
429 u64 drop_end;
2766ff61
FM
430 /*
431 * The number of allocated bytes found in the range. This can be smaller
432 * than the range's length when there are holes in the range.
433 */
434 u64 bytes_found;
5893dfb9
FM
435 /*
436 * Only set if 'replace_extent' is true. Set to true if we were able
437 * to insert a replacement extent after dropping all extents in the
438 * range, otherwise set to false by btrfs_drop_extents().
439 * Also, if btrfs_drop_extents() has set this to true it means it
440 * returned with the path locked, otherwise if it has set this to
441 * false it has returned with the path released.
442 */
443 bool extent_inserted;
444};
445
23b5ec74 446struct btrfs_file_private {
23b5ec74 447 void *filldir_buf;
9b378f6a 448 u64 last_index;
3c32c721 449 struct extent_state *llseek_cached_state;
23b5ec74
JB
450};
451
da17066c 452static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
1db1ff92 453{
118c701e 454 return info->nodesize - sizeof(struct btrfs_header);
1db1ff92
JM
455}
456
da17066c 457static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info)
1db1ff92 458{
da17066c 459 return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item);
1db1ff92
JM
460}
461
da17066c 462static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_fs_info *info)
1db1ff92 463{
da17066c 464 return BTRFS_LEAF_DATA_SIZE(info) / sizeof(struct btrfs_key_ptr);
1db1ff92
JM
465}
466
da17066c 467static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
1db1ff92 468{
da17066c 469 return BTRFS_MAX_ITEM_SIZE(info) - sizeof(struct btrfs_dir_item);
1db1ff92
JM
470}
471
2e78c927 472#define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
265fdfa6 473 ((bytes) >> (fs_info)->sectorsize_bits)
2e78c927 474
3b16a4e3
JB
475static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
476{
c62d2555 477 return mapping_gfp_constraint(mapping, ~__GFP_FS);
3b16a4e3
JB
478}
479
91701bdf 480void btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end);
2ff7e61e 481int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
1edb647b 482 u64 num_bytes, u64 *actual_bytes);
2ff7e61e 483int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
acce952b 484
dee26a9f 485/* ctree.c */
226463d7
JB
486int __init btrfs_ctree_init(void);
487void __cold btrfs_ctree_exit(void);
7b00dfff 488
fdf8d595
AJ
489int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
490 const struct btrfs_key *key, int *slot);
7b00dfff 491
e1f60a65 492int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
79d25df0
FM
493
494#ifdef __LITTLE_ENDIAN
495
496/*
497 * Compare two keys, on little-endian the disk order is same as CPU order and
498 * we can avoid the conversion.
499 */
500static inline int btrfs_comp_keys(const struct btrfs_disk_key *disk_key,
501 const struct btrfs_key *k2)
502{
503 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
504
505 return btrfs_comp_cpu_keys(k1, k2);
506}
507
508#else
509
510/* Compare two keys in a memcmp fashion. */
511static inline int btrfs_comp_keys(const struct btrfs_disk_key *disk,
512 const struct btrfs_key *k2)
513{
514 struct btrfs_key k1;
515
516 btrfs_disk_key_to_cpu(&k1, disk);
517
518 return btrfs_comp_cpu_keys(&k1, k2);
519}
520
521#endif
522
0b86a832
CM
523int btrfs_previous_item(struct btrfs_root *root,
524 struct btrfs_path *path, u64 min_objectid,
525 int type);
ade2e0b3
WS
526int btrfs_previous_extent_item(struct btrfs_root *root,
527 struct btrfs_path *path, u64 min_objectid);
50564b65 528void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
b7a0365e 529 struct btrfs_path *path,
310712b2 530 const struct btrfs_key *new_key);
925baedd 531struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
e7a84565 532int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
3f157a2f 533 struct btrfs_key *key, int lowest_level,
de78b51a 534 u64 min_trans);
3f157a2f 535int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
de78b51a 536 struct btrfs_path *path,
3f157a2f 537 u64 min_trans);
4b231ae4
DS
538struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
539 int slot);
540
5f39d397
CM
541int btrfs_cow_block(struct btrfs_trans_handle *trans,
542 struct btrfs_root *root, struct extent_buffer *buf,
543 struct extent_buffer *parent, int parent_slot,
9631e4cc
JB
544 struct extent_buffer **cow_ret,
545 enum btrfs_lock_nesting nest);
95f93bc4
FM
546int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
547 struct btrfs_root *root,
548 struct extent_buffer *buf,
549 struct extent_buffer *parent, int parent_slot,
550 struct extent_buffer **cow_ret,
551 u64 search_start, u64 empty_size,
552 enum btrfs_lock_nesting nest);
be20aa9d
CM
553int btrfs_copy_root(struct btrfs_trans_handle *trans,
554 struct btrfs_root *root,
555 struct extent_buffer *buf,
556 struct extent_buffer **cow_ret, u64 new_root_objectid);
6e5de50f
FM
557bool btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
558 struct btrfs_root *root,
559 struct extent_buffer *buf);
751a2761
FM
560int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
561 struct btrfs_path *path, int level, int slot);
50564b65
FM
562void btrfs_extend_item(struct btrfs_trans_handle *trans,
563 struct btrfs_path *path, u32 data_size);
564void btrfs_truncate_item(struct btrfs_trans_handle *trans,
565 struct btrfs_path *path, u32 new_size, int from_end);
459931ec
CM
566int btrfs_split_item(struct btrfs_trans_handle *trans,
567 struct btrfs_root *root,
568 struct btrfs_path *path,
310712b2 569 const struct btrfs_key *new_key,
459931ec 570 unsigned long split_offset);
ad48fd75
YZ
571int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
572 struct btrfs_root *root,
573 struct btrfs_path *path,
310712b2 574 const struct btrfs_key *new_key);
e33d5c3d
KN
575int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
576 u64 inum, u64 ioff, u8 key_type, struct btrfs_key *found_key);
310712b2
OS
577int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
578 const struct btrfs_key *key, struct btrfs_path *p,
579 int ins_len, int cow);
580int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
5d9e75c4 581 struct btrfs_path *p, u64 time_seq);
2f38b3e1 582int btrfs_search_slot_for_read(struct btrfs_root *root,
310712b2
OS
583 const struct btrfs_key *key,
584 struct btrfs_path *p, int find_higher,
585 int return_any);
b3b4aa74 586void btrfs_release_path(struct btrfs_path *p);
2c90e5d6
CM
587struct btrfs_path *btrfs_alloc_path(void);
588void btrfs_free_path(struct btrfs_path *p);
b4ce94de 589
85e21bac
CM
590int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
591 struct btrfs_path *path, int slot, int nr);
85e21bac
CM
592static inline int btrfs_del_item(struct btrfs_trans_handle *trans,
593 struct btrfs_root *root,
594 struct btrfs_path *path)
595{
596 return btrfs_del_items(trans, root, path, path->slots[0], 1);
597}
598
b7ef5f3a
FM
599/*
600 * Describes a batch of items to insert in a btree. This is used by
f0641656 601 * btrfs_insert_empty_items().
b7ef5f3a
FM
602 */
603struct btrfs_item_batch {
604 /*
605 * Pointer to an array containing the keys of the items to insert (in
606 * sorted order).
607 */
608 const struct btrfs_key *keys;
609 /* Pointer to an array containing the data size for each item to insert. */
610 const u32 *data_sizes;
611 /*
612 * The sum of data sizes for all items. The caller can compute this while
613 * setting up the data_sizes array, so it ends up being more efficient
614 * than having btrfs_insert_empty_items() or setup_item_for_insert()
615 * doing it, as it would avoid an extra loop over a potentially large
616 * array, and in the case of setup_item_for_insert(), we would be doing
617 * it while holding a write lock on a leaf and often on upper level nodes
618 * too, unnecessarily increasing the size of a critical section.
619 */
620 u32 total_data_size;
621 /* Size of the keys and data_sizes arrays (number of items in the batch). */
622 int nr;
623};
624
50564b65
FM
625void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
626 struct btrfs_root *root,
f0641656
FM
627 struct btrfs_path *path,
628 const struct btrfs_key *key,
629 u32 data_size);
310712b2
OS
630int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
631 const struct btrfs_key *key, void *data, u32 data_size);
9c58309d
CM
632int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
633 struct btrfs_root *root,
634 struct btrfs_path *path,
b7ef5f3a 635 const struct btrfs_item_batch *batch);
9c58309d
CM
636
637static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
638 struct btrfs_root *root,
639 struct btrfs_path *path,
310712b2 640 const struct btrfs_key *key,
9c58309d
CM
641 u32 data_size)
642{
b7ef5f3a
FM
643 struct btrfs_item_batch batch;
644
645 batch.keys = key;
646 batch.data_sizes = &data_size;
647 batch.total_data_size = data_size;
648 batch.nr = 1;
649
650 return btrfs_insert_empty_items(trans, root, path, &batch);
9c58309d
CM
651}
652
3d7806ec
JS
653int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
654 u64 time_seq);
0ff40a91
MPS
655
656int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
657 struct btrfs_path *path);
658
62142be3
GN
659int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
660 struct btrfs_path *path);
661
662/*
663 * Search in @root for a given @key, and store the slot found in @found_key.
664 *
665 * @root: The root node of the tree.
666 * @key: The key we are looking for.
667 * @found_key: Will hold the found item.
668 * @path: Holds the current slot/leaf.
669 * @iter_ret: Contains the value returned from btrfs_search_slot or
670 * btrfs_get_next_valid_item, whichever was executed last.
671 *
672 * The @iter_ret is an output variable that will contain the return value of
673 * btrfs_search_slot, if it encountered an error, or the value returned from
674 * btrfs_get_next_valid_item otherwise. That return value can be 0, if a valid
675 * slot was found, 1 if there were no more leaves, and <0 if there was an error.
676 *
677 * It's recommended to use a separate variable for iter_ret and then use it to
678 * set the function return value so there's no confusion of the 0/1/errno
679 * values stemming from btrfs_search_slot.
680 */
681#define btrfs_for_each_slot(root, key, found_key, path, iter_ret) \
682 for (iter_ret = btrfs_search_slot(NULL, (root), (key), (path), 0, 0); \
683 (iter_ret) >= 0 && \
684 (iter_ret = btrfs_get_next_valid_item((root), (found_key), (path))) == 0; \
685 (path)->slots[0]++ \
686 )
687
890d2b1a 688int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq);
809d6902
DS
689
690/*
691 * Search the tree again to find a leaf with greater keys.
692 *
693 * Returns 0 if it found something or 1 if there are no greater leaves.
694 * Returns < 0 on error.
695 */
696static inline int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
697{
698 return btrfs_next_old_leaf(root, path, 0);
699}
700
1c8f52a5
AB
701static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
702{
703 return btrfs_next_old_item(root, p, 0);
704}
6c75a589 705int btrfs_leaf_free_space(const struct extent_buffer *leaf);
babbf170 706
95a06077
JS
707static inline int is_fstree(u64 rootid)
708{
709 if (rootid == BTRFS_FS_TREE_OBJECTID ||
e09fe2d2
QW
710 ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID &&
711 !btrfs_qgroup_level(rootid)))
95a06077
JS
712 return 1;
713 return 0;
714}
210549eb 715
37f00a6d
JT
716static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root)
717{
718 return root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID;
719}
720
b3cbfb0d 721u16 btrfs_csum_type_size(u16 type);
0e6c40eb
JB
722int btrfs_super_csum_size(const struct btrfs_super_block *s);
723const char *btrfs_super_csum_name(u16 csum_type);
724const char *btrfs_super_csum_driver(u16 csum_type);
725size_t __attribute_const__ btrfs_get_num_csums(void);
726
f57ad937
QW
727/*
728 * We use page status Private2 to indicate there is an ordered extent with
729 * unfinished IO.
730 *
731 * Rename the Private2 accessors to Ordered, to improve readability.
732 */
733#define PageOrdered(page) PagePrivate2(page)
734#define SetPageOrdered(page) SetPagePrivate2(page)
735#define ClearPageOrdered(page) ClearPagePrivate2(page)
895586eb
MWO
736#define folio_test_ordered(folio) folio_test_private_2(folio)
737#define folio_set_ordered(folio) folio_set_private_2(folio)
738#define folio_clear_ordered(folio) folio_clear_private_2(folio)
f57ad937 739
eb60ceac 740#endif