btrfs: use struct qstr instead of name and namelen pairs
[linux-block.git] / fs / btrfs / ctree.h
CommitLineData
9888c340 1/* SPDX-License-Identifier: GPL-2.0 */
6cbd5570
CM
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
6cbd5570
CM
4 */
5
9888c340
DS
6#ifndef BTRFS_CTREE_H
7#define BTRFS_CTREE_H
eb60ceac 8
810191ff 9#include <linux/mm.h>
174cd4b1 10#include <linux/sched/signal.h>
810191ff 11#include <linux/highmem.h>
e20d96d6 12#include <linux/fs.h>
a2de733c 13#include <linux/rwsem.h>
803b2f54 14#include <linux/semaphore.h>
58176a96 15#include <linux/completion.h>
04160088 16#include <linux/backing-dev.h>
e6dcd2dc 17#include <linux/wait.h>
5a0e3ad6 18#include <linux/slab.h>
1abe9b8a 19#include <trace/events/btrfs.h>
65019df8 20#include <asm/unaligned.h>
3b16a4e3 21#include <linux/pagemap.h>
55e301fd 22#include <linux/btrfs.h>
db671160 23#include <linux/btrfs_tree.h>
21c7e756 24#include <linux/workqueue.h>
f667aef6 25#include <linux/security.h>
ee22184b 26#include <linux/sizes.h>
897a41b1 27#include <linux/dynamic_debug.h>
1e4f4714 28#include <linux/refcount.h>
9678c543 29#include <linux/crc32c.h>
4e4cabec 30#include <linux/iomap.h>
9c7d3a54 31#include "extent-io-tree.h"
d1310b2e 32#include "extent_io.h"
5f39d397 33#include "extent_map.h"
8b712842 34#include "async-thread.h"
d12ffdd1 35#include "block-rsv.h"
2992df73 36#include "locking.h"
c7321b76 37#include "misc.h"
e20d96d6 38
e089f05c 39struct btrfs_trans_handle;
79154b1b 40struct btrfs_transaction;
a22285a6 41struct btrfs_pending_snapshot;
31890da0 42struct btrfs_delayed_ref_root;
8719aaae 43struct btrfs_space_info;
32da5386 44struct btrfs_block_group;
e6dcd2dc 45struct btrfs_ordered_sum;
82fa113f 46struct btrfs_ref;
c3a3b19b 47struct btrfs_bio;
1881fba8 48struct btrfs_ioctl_encoded_io_args;
0e75f005
JB
49struct btrfs_device;
50struct btrfs_fs_devices;
51struct btrfs_balance_control;
52struct btrfs_delayed_root;
53struct reloc_control;
e089f05c 54
7c829b72
AJ
55#define BTRFS_OLDEST_GENERATION 0ULL
56
3954401f 57#define BTRFS_EMPTY_DIR_SIZE 0
f254e52c 58
ee22184b 59#define BTRFS_DIRTY_METADATA_THRESH SZ_32M
e2d84521 60
ee22184b 61#define BTRFS_MAX_EXTENT_SIZE SZ_128M
dcab6a3b 62
0b86a832
CM
63static inline unsigned long btrfs_chunk_item_size(int num_stripes)
64{
65 BUG_ON(num_stripes == 0);
66 return sizeof(struct btrfs_chunk) +
67 sizeof(struct btrfs_stripe) * (num_stripes - 1);
68}
69
38732474
QW
70#define BTRFS_SUPER_INFO_OFFSET SZ_64K
71#define BTRFS_SUPER_INFO_SIZE 4096
4300c58f 72static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE);
38732474 73
37f85ec3
QW
74/*
75 * The reserved space at the beginning of each device.
76 * It covers the primary super block and leaves space for potential use by other
77 * tools like bootloaders or to lower potential damage of accidental overwrite.
78 */
79#define BTRFS_DEVICE_RANGE_RESERVED (SZ_1M)
80
ace75066
FM
81/* Read ahead values for struct btrfs_path.reada */
82enum {
83 READA_NONE,
84 READA_BACK,
85 READA_FORWARD,
86 /*
87 * Similar to READA_FORWARD but unlike it:
88 *
89 * 1) It will trigger readahead even for leaves that are not close to
90 * each other on disk;
91 * 2) It also triggers readahead for nodes;
92 * 3) During a search, even when a node or leaf is already in memory, it
93 * will still trigger readahead for other nodes and leaves that follow
94 * it.
95 *
96 * This is meant to be used only when we know we are iterating over the
97 * entire tree or a very large part of it.
98 */
99 READA_FORWARD_ALWAYS,
100};
101
fec577fb 102/*
234b63a0
CM
103 * btrfs_paths remember the path taken from the root down to the leaf.
104 * level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point
fec577fb
CM
105 * to any other levels that are present.
106 *
107 * The slots array records the index of the item or block pointer
108 * used while walking the tree.
109 */
234b63a0 110struct btrfs_path {
5f39d397 111 struct extent_buffer *nodes[BTRFS_MAX_LEVEL];
234b63a0 112 int slots[BTRFS_MAX_LEVEL];
925baedd 113 /* if there is real range locking, this locks field will change */
4fb72bf2 114 u8 locks[BTRFS_MAX_LEVEL];
dccabfad 115 u8 reada;
925baedd 116 /* keep some upper locks as we walk down */
7853f15b 117 u8 lowest_level;
459931ec
CM
118
119 /*
120 * set by btrfs_split_item, tells search_slot to keep all locks
121 * and to force calls to keep space in the nodes
122 */
b9473439
CM
123 unsigned int search_for_split:1;
124 unsigned int keep_locks:1;
125 unsigned int skip_locking:1;
5d4f98a2 126 unsigned int search_commit_root:1;
3f8a18cc 127 unsigned int need_commit_sem:1;
5f5bc6b1 128 unsigned int skip_release_on_error:1;
9a664971 129 /*
130 * Indicate that new item (btrfs_search_slot) is extending already
131 * existing item and ins_len contains only the data size and not item
132 * header (ie. sizeof(struct btrfs_item) is not included).
133 */
134 unsigned int search_for_extension:1;
857bc13f
JB
135 /* Stop search if any locks need to be taken (for read) */
136 unsigned int nowait:1;
eb60ceac 137};
d9d88fde 138
e922e087
SB
139struct btrfs_dev_replace {
140 u64 replace_state; /* see #define above */
a944442c
AP
141 time64_t time_started; /* seconds since 1-Jan-1970 */
142 time64_t time_stopped; /* seconds since 1-Jan-1970 */
e922e087
SB
143 atomic64_t num_write_errors;
144 atomic64_t num_uncorrectable_read_errors;
145
146 u64 cursor_left;
147 u64 committed_cursor_left;
148 u64 cursor_left_last_write_of_item;
149 u64 cursor_right;
150
151 u64 cont_reading_from_srcdev_mode; /* see #define above */
152
153 int is_valid;
154 int item_needs_writeback;
155 struct btrfs_device *srcdev;
156 struct btrfs_device *tgtdev;
157
e922e087 158 struct mutex lock_finishing_cancel_unmount;
129827e3 159 struct rw_semaphore rwsem;
e922e087
SB
160
161 struct btrfs_scrub_progress scrub_progress;
7f8d236a
DS
162
163 struct percpu_counter bio_counter;
164 wait_queue_head_t replace_wait;
e922e087
SB
165};
166
fa9c0d79
CM
167/*
168 * free clusters are used to claim free space in relatively large chunks,
583b7231
HK
169 * allowing us to do less seeky writes. They are used for all metadata
170 * allocations. In ssd_spread mode they are also used for data allocations.
fa9c0d79
CM
171 */
172struct btrfs_free_cluster {
173 spinlock_t lock;
174 spinlock_t refill_lock;
175 struct rb_root root;
176
177 /* largest extent in this cluster */
178 u64 max_size;
179
180 /* first extent starting offset */
181 u64 window_start;
182
c759c4e1
JB
183 /* We did a full search and couldn't create a cluster */
184 bool fragmented;
185
32da5386 186 struct btrfs_block_group *block_group;
fa9c0d79
CM
187 /*
188 * when a cluster is allocated from a block group, we put the
189 * cluster onto a list in the block group so that it can
190 * be freed before the block group is freed.
191 */
192 struct list_head block_group_list;
6324fbf3
CM
193};
194
b0643e59
DZ
195/* Discard control. */
196/*
197 * Async discard uses multiple lists to differentiate the discard filter
6e80d4f8
DZ
198 * parameters. Index 0 is for completely free block groups where we need to
199 * ensure the entire block group is trimmed without being lossy. Indices
200 * afterwards represent monotonically decreasing discard filter sizes to
201 * prioritize what should be discarded next.
b0643e59 202 */
7fe6d45e 203#define BTRFS_NR_DISCARD_LISTS 3
6e80d4f8
DZ
204#define BTRFS_DISCARD_INDEX_UNUSED 0
205#define BTRFS_DISCARD_INDEX_START 1
b0643e59
DZ
206
207struct btrfs_discard_ctl {
208 struct workqueue_struct *discard_workers;
209 struct delayed_work work;
210 spinlock_t lock;
211 struct btrfs_block_group *block_group;
212 struct list_head discard_list[BTRFS_NR_DISCARD_LISTS];
e93591bb 213 u64 prev_discard;
df903e5d 214 u64 prev_discard_time;
dfb79ddb 215 atomic_t discardable_extents;
5dc7c10b 216 atomic64_t discardable_bytes;
19b2a2c7 217 u64 max_discard_size;
6e88f116 218 u64 delay_ms;
a2309300 219 u32 iops_limit;
e93591bb 220 u32 kbps_limit;
9ddf648f
DZ
221 u64 discard_extent_bytes;
222 u64 discard_bitmap_bytes;
223 atomic64_t discard_bytes_saved;
b0643e59
DZ
224};
225
c3e1f96c
GR
226/*
227 * Exclusive operations (device replace, resize, device add/remove, balance)
228 */
229enum btrfs_exclusive_operation {
230 BTRFS_EXCLOP_NONE,
efc0e69c 231 BTRFS_EXCLOP_BALANCE_PAUSED,
c3e1f96c
GR
232 BTRFS_EXCLOP_BALANCE,
233 BTRFS_EXCLOP_DEV_ADD,
234 BTRFS_EXCLOP_DEV_REMOVE,
235 BTRFS_EXCLOP_DEV_REPLACE,
236 BTRFS_EXCLOP_RESIZE,
237 BTRFS_EXCLOP_SWAP_ACTIVATE,
238};
239
e55958c8
IA
240/* Store data about transaction commits, exported via sysfs. */
241struct btrfs_commit_stats {
242 /* Total number of commits */
243 u64 commit_count;
244 /* The maximum commit duration so far in ns */
245 u64 max_commit_dur;
246 /* The last commit duration in ns */
247 u64 last_commit_dur;
248 /* The total commit duration in ns */
249 u64 total_commit_dur;
250};
251
9f5fae2f 252struct btrfs_fs_info {
e17cade2 253 u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
afcdd129 254 unsigned long flags;
62e2749e 255 struct btrfs_root *tree_root;
0b86a832
CM
256 struct btrfs_root *chunk_root;
257 struct btrfs_root *dev_root;
3de4586c 258 struct btrfs_root *fs_root;
416ac51d 259 struct btrfs_root *quota_root;
f7a81ea4 260 struct btrfs_root *uuid_root;
aeb935a4 261 struct btrfs_root *data_reloc_root;
9c54e80d 262 struct btrfs_root *block_group_root;
e02119d5
CM
263
264 /* the log root tree is a directory of all the other log roots */
265 struct btrfs_root *log_root_tree;
4df27c4d 266
abed4aaa
JB
267 /* The tree that holds the global roots (csum, extent, etc) */
268 rwlock_t global_root_lock;
269 struct rb_root global_root_tree;
270
fc7cbcd4
DS
271 spinlock_t fs_roots_radix_lock;
272 struct radix_tree_root fs_roots_radix;
1a5bc167 273
0f9dd46c 274 /* block group cache stuff */
16b0c258 275 rwlock_t block_group_cache_lock;
08dddb29 276 struct rb_root_cached block_group_cache_tree;
0f9dd46c 277
2bf64758 278 /* keep track of unallocated space */
a5ed45f8 279 atomic64_t free_chunk_space;
2bf64758 280
fe119a6e
NB
281 /* Track ranges which are used by log trees blocks/logged data extents */
282 struct extent_io_tree excluded_extents;
1a5bc167 283
0b86a832 284 /* logical->physical extent mapping */
c8bf1b67 285 struct extent_map_tree mapping_tree;
0b86a832 286
16cdcec7
MX
287 /*
288 * block reservation for extent, checksum, root tree and
289 * delayed dir index item
290 */
f0486c68 291 struct btrfs_block_rsv global_block_rsv;
f0486c68
YZ
292 /* block reservation for metadata operations */
293 struct btrfs_block_rsv trans_block_rsv;
294 /* block reservation for chunk tree */
295 struct btrfs_block_rsv chunk_block_rsv;
6d668dda
JB
296 /* block reservation for delayed operations */
297 struct btrfs_block_rsv delayed_block_rsv;
ba2c4d4e
JB
298 /* block reservation for delayed refs */
299 struct btrfs_block_rsv delayed_refs_rsv;
f0486c68
YZ
300
301 struct btrfs_block_rsv empty_block_rsv;
302
293ffd5f 303 u64 generation;
15ee9bc7 304 u64 last_trans_committed;
d96b3424
FM
305 /*
306 * Generation of the last transaction used for block group relocation
307 * since the filesystem was last mounted (or 0 if none happened yet).
308 * Must be written and read while holding btrfs_fs_info::commit_root_sem.
309 */
310 u64 last_reloc_trans;
0a2b2a84 311 u64 avg_delayed_ref_runtime;
12fcfd22
CM
312
313 /*
314 * this is updated to the current trans every time a full commit
315 * is required instead of the faster short fsync log commits
316 */
317 u64 last_trans_log_full_commit;
25cd999e 318 unsigned long mount_opt;
55e5cfd3 319
261507a0 320 unsigned long compress_type:4;
f51d2b59 321 unsigned int compress_level;
d3740608 322 u32 commit_interval;
8c6a3ee6
MX
323 /*
324 * It is a suggestive number, the read side is safe even it gets a
325 * wrong number because we will write out the data into a regular
326 * extent. The write side(mount/remount) is under ->s_umount lock,
327 * so it is also safe.
328 */
6f568d35 329 u64 max_inline;
0d0c71b3 330
79154b1b 331 struct btrfs_transaction *running_transaction;
e6dcd2dc 332 wait_queue_head_t transaction_throttle;
f9295749 333 wait_queue_head_t transaction_wait;
bb9c12c9 334 wait_queue_head_t transaction_blocked_wait;
771ed689 335 wait_queue_head_t async_submit_wait;
e02119d5 336
ceda0864
MX
337 /*
338 * Used to protect the incompat_flags, compat_flags, compat_ro_flags
339 * when they are updated.
340 *
341 * Because we do not clear the flags for ever, so we needn't use
342 * the lock on the read side.
343 *
344 * We also needn't use the lock when we mount the fs, because
345 * there is no other task which will update the flag.
346 */
347 spinlock_t super_lock;
6c41761f
DS
348 struct btrfs_super_block *super_copy;
349 struct btrfs_super_block *super_for_commit;
e20d96d6 350 struct super_block *sb;
d98237b3 351 struct inode *btree_inode;
e02119d5 352 struct mutex tree_log_mutex;
a74a4b97
CM
353 struct mutex transaction_kthread_mutex;
354 struct mutex cleaner_mutex;
925baedd 355 struct mutex chunk_mutex;
53b381b3 356
1bbc621e
CM
357 /*
358 * this is taken to make sure we don't set block groups ro after
359 * the free space cache has been allocated on them
360 */
361 struct mutex ro_block_group_mutex;
362
53b381b3
DW
363 /* this is used during read/modify/write to make sure
364 * no two ios are trying to mod the same stripe at the same
365 * time
366 */
367 struct btrfs_stripe_hash_table *stripe_hash_table;
368
5a3f23d5
CM
369 /*
370 * this protects the ordered operations list only while we are
371 * processing all of the entries on it. This way we make
372 * sure the commit code doesn't find the list temporarily empty
373 * because another function happens to be doing non-waiting preflush
374 * before jumping into the main commit.
375 */
376 struct mutex ordered_operations_mutex;
9ffba8cd 377
9e351cc8 378 struct rw_semaphore commit_root_sem;
5a3f23d5 379
c71bf099 380 struct rw_semaphore cleanup_work_sem;
76dda93c 381
c71bf099 382 struct rw_semaphore subvol_sem;
76dda93c 383
a4abeea4 384 spinlock_t trans_lock;
7585717f
CM
385 /*
386 * the reloc mutex goes with the trans lock, it is taken
387 * during commit to protect us from the relocation code
388 */
389 struct mutex reloc_mutex;
390
8fd17795 391 struct list_head trans_list;
facda1e7 392 struct list_head dead_roots;
11833d66 393 struct list_head caching_block_groups;
e02119d5 394
24bbcf04
YZ
395 spinlock_t delayed_iput_lock;
396 struct list_head delayed_iputs;
034f784d
JB
397 atomic_t nr_delayed_iputs;
398 wait_queue_head_t delayed_iputs_wait;
24bbcf04 399
fc36ed7e 400 atomic64_t tree_mod_seq;
f29021b2 401
7227ff4d 402 /* this protects tree_mod_log and tree_mod_seq_list */
f29021b2
JS
403 rwlock_t tree_mod_log_lock;
404 struct rb_root tree_mod_log;
7227ff4d 405 struct list_head tree_mod_seq_list;
f29021b2 406
771ed689 407 atomic_t async_delalloc_pages;
ce9adaa5 408
3eaa2885 409 /*
199c2a9c 410 * this is used to protect the following list -- ordered_roots.
3eaa2885 411 */
199c2a9c 412 spinlock_t ordered_root_lock;
5a3f23d5
CM
413
414 /*
199c2a9c
MX
415 * all fs/file tree roots in which there are data=ordered extents
416 * pending writeback are added into this list.
417 *
5a3f23d5
CM
418 * these can span multiple transactions and basically include
419 * every dirty data page that isn't from nodatacow
420 */
199c2a9c 421 struct list_head ordered_roots;
5a3f23d5 422
573bfb72 423 struct mutex delalloc_root_mutex;
eb73c1b7
MX
424 spinlock_t delalloc_root_lock;
425 /* all fs/file tree roots that have delalloc inodes. */
426 struct list_head delalloc_roots;
3eaa2885 427
8b712842
CM
428 /*
429 * there is a pool of worker threads for checksumming during writes
430 * and a pool for checksumming after reads. This is because readers
431 * can run with FS locks held, and the writers may be waiting for
432 * those locks. We don't want ordering in the pending list to cause
433 * deadlocks, and so the two are serviced separately.
1cc127b5
CM
434 *
435 * A third pool does submit_bio to avoid deadlocking with the other
436 * two
8b712842 437 */
d458b054 438 struct btrfs_workqueue *workers;
a31b4a43 439 struct btrfs_workqueue *hipri_workers;
d458b054
QW
440 struct btrfs_workqueue *delalloc_workers;
441 struct btrfs_workqueue *flush_workers;
d7b9416f
CH
442 struct workqueue_struct *endio_workers;
443 struct workqueue_struct *endio_meta_workers;
d34e123d 444 struct workqueue_struct *endio_raid56_workers;
385de0ef 445 struct workqueue_struct *rmw_workers;
fed8a72d 446 struct workqueue_struct *compressed_write_workers;
d458b054
QW
447 struct btrfs_workqueue *endio_write_workers;
448 struct btrfs_workqueue *endio_freespace_worker;
d458b054 449 struct btrfs_workqueue *caching_workers;
bab39bf9 450
247e743c
CM
451 /*
452 * fixup workers take dirty pages that didn't properly go through
453 * the cow mechanism and make them safe to write. It happens
454 * for the sys_munmap function call path
455 */
d458b054
QW
456 struct btrfs_workqueue *fixup_workers;
457 struct btrfs_workqueue *delayed_workers;
a79b7d4b 458
a74a4b97
CM
459 struct task_struct *transaction_kthread;
460 struct task_struct *cleaner_kthread;
f7b885be 461 u32 thread_pool_size;
8b712842 462
6ab0a202 463 struct kobject *space_info_kobj;
49e5fb46 464 struct kobject *qgroups_kobj;
fb731430 465 struct kobject *discard_kobj;
9f5fae2f 466
e2d84521
MX
467 /* used to keep from writing metadata until there is a nice batch */
468 struct percpu_counter dirty_metadata_bytes;
963d678b 469 struct percpu_counter delalloc_bytes;
5deb17e1 470 struct percpu_counter ordered_bytes;
e2d84521 471 s32 dirty_metadata_batch;
963d678b
MX
472 s32 delalloc_batch;
473
0b86a832
CM
474 struct list_head dirty_cowonly_roots;
475
8a4b83cc 476 struct btrfs_fs_devices *fs_devices;
4184ea7f
CM
477
478 /*
dc2d3005
JM
479 * The space_info list is effectively read only after initial
480 * setup. It is populated at mount time and cleaned up after
481 * all block groups are removed. RCU is used to protect it.
4184ea7f 482 */
6324fbf3 483 struct list_head space_info;
4184ea7f 484
b4d7c3c9
LZ
485 struct btrfs_space_info *data_sinfo;
486
5d4f98a2
YZ
487 struct reloc_control *reloc_ctl;
488
583b7231 489 /* data_alloc_cluster is only used in ssd_spread mode */
fa9c0d79
CM
490 struct btrfs_free_cluster data_alloc_cluster;
491
492 /* all metadata allocations go through this cluster */
493 struct btrfs_free_cluster meta_alloc_cluster;
d18a2c44 494
4cb5300b
CM
495 /* auto defrag inodes go here */
496 spinlock_t defrag_inodes_lock;
497 struct rb_root defrag_inodes;
498 atomic_t defrag_running;
499
de98ced9
MX
500 /* Used to protect avail_{data, metadata, system}_alloc_bits */
501 seqlock_t profiles_lock;
a46d11a8
ID
502 /*
503 * these three are in extended format (availability of single
504 * chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other
505 * types are denoted by corresponding BTRFS_BLOCK_GROUP_* bits)
506 */
d18a2c44
CM
507 u64 avail_data_alloc_bits;
508 u64 avail_metadata_alloc_bits;
509 u64 avail_system_alloc_bits;
788f20eb 510
c9e9f97b
ID
511 /* restriper state */
512 spinlock_t balance_lock;
513 struct mutex balance_mutex;
837d5b6e 514 atomic_t balance_pause_req;
a7e99c69 515 atomic_t balance_cancel_req;
c9e9f97b 516 struct btrfs_balance_control *balance_ctl;
837d5b6e 517 wait_queue_head_t balance_wait_q;
c9e9f97b 518
907d2710
DS
519 /* Cancellation requests for chunk relocation */
520 atomic_t reloc_cancel_req;
521
d612ac59
AJ
522 u32 data_chunk_allocations;
523 u32 metadata_ratio;
97e728d4 524
788f20eb 525 void *bdev_holder;
acce952b 526
a2de733c
AJ
527 /* private scrub information */
528 struct mutex scrub_lock;
529 atomic_t scrubs_running;
530 atomic_t scrub_pause_req;
531 atomic_t scrubs_paused;
532 atomic_t scrub_cancel_req;
533 wait_queue_head_t scrub_pause_wait;
c8352942
DS
534 /*
535 * The worker pointers are NULL iff the refcount is 0, ie. scrub is not
536 * running.
537 */
ff09c4ca 538 refcount_t scrub_workers_refcnt;
be539518
CH
539 struct workqueue_struct *scrub_workers;
540 struct workqueue_struct *scrub_wr_completion_workers;
541 struct workqueue_struct *scrub_parity_workers;
8481dd80 542 struct btrfs_subpage_info *subpage_info;
a2de733c 543
b0643e59
DZ
544 struct btrfs_discard_ctl discard_ctl;
545
21adbd5c
SB
546#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
547 u32 check_integrity_print_mask;
548#endif
416ac51d
AJ
549 /* is qgroup tracking in a consistent state? */
550 u64 qgroup_flags;
551
552 /* holds configuration and tracking. Protected by qgroup_lock */
553 struct rb_root qgroup_tree;
554 spinlock_t qgroup_lock;
555
1e8f9158
WS
556 /*
557 * used to avoid frequently calling ulist_alloc()/ulist_free()
558 * when doing qgroup accounting, it must be protected by qgroup_lock.
559 */
560 struct ulist *qgroup_ulist;
561
a855fbe6
FM
562 /*
563 * Protect user change for quota operations. If a transaction is needed,
564 * it must be started before locking this lock.
565 */
f2f6ed3d
WS
566 struct mutex qgroup_ioctl_lock;
567
416ac51d
AJ
568 /* list of dirty qgroups to be written at next commit */
569 struct list_head dirty_qgroups;
570
e69bcee3 571 /* used by qgroup for an efficient tree traversal */
416ac51d 572 u64 qgroup_seq;
21adbd5c 573
2f232036
JS
574 /* qgroup rescan items */
575 struct mutex qgroup_rescan_lock; /* protects the progress item */
576 struct btrfs_key qgroup_rescan_progress;
d458b054 577 struct btrfs_workqueue *qgroup_rescan_workers;
57254b6e 578 struct completion qgroup_rescan_completion;
b382a324 579 struct btrfs_work qgroup_rescan_work;
d2c609b8 580 bool qgroup_rescan_running; /* protected by qgroup_rescan_lock */
011b46c3 581 u8 qgroup_drop_subtree_thres;
2f232036 582
acce952b 583 /* filesystem state */
87533c47 584 unsigned long fs_state;
16cdcec7
MX
585
586 struct btrfs_delayed_root *delayed_root;
af31f5e5 587
01cd3909 588 /* Extent buffer radix tree */
f28491e0 589 spinlock_t buffer_lock;
478ef886 590 /* Entries are eb->start / sectorsize */
01cd3909 591 struct radix_tree_root buffer_radix;
f28491e0 592
af31f5e5
CM
593 /* next backup root to be overwritten */
594 int backup_root_index;
5af3e8cc 595
e922e087
SB
596 /* device replace state */
597 struct btrfs_dev_replace dev_replace;
5ac00add 598
803b2f54 599 struct semaphore uuid_tree_rescan_sem;
21c7e756
MX
600
601 /* Used to reclaim the metadata space in the background. */
602 struct work_struct async_reclaim_work;
57056740 603 struct work_struct async_data_reclaim_work;
576fa348 604 struct work_struct preempt_reclaim_work;
47ab2a6c 605
18bb8bbf
JT
606 /* Reclaim partially filled block groups in the background */
607 struct work_struct reclaim_bgs_work;
608 struct list_head reclaim_bgs;
609 int bg_reclaim_threshold;
610
47ab2a6c
JB
611 spinlock_t unused_bgs_lock;
612 struct list_head unused_bgs;
d4b450cd 613 struct mutex unused_bg_unpin_mutex;
f3372065
JT
614 /* Protect block groups that are going to be deleted */
615 struct mutex reclaim_bgs_lock;
f667aef6 616
da17066c
JM
617 /* Cached block sizes */
618 u32 nodesize;
619 u32 sectorsize;
ab108d99
DS
620 /* ilog2 of sectorsize, use to avoid 64bit division */
621 u32 sectorsize_bits;
22b6331d 622 u32 csum_size;
fe5ecbe8 623 u32 csums_per_leaf;
da17066c 624 u32 stripesize;
fd708b81 625
f7b12a62
NA
626 /*
627 * Maximum size of an extent. BTRFS_MAX_EXTENT_SIZE on regular
628 * filesystem, on zoned it depends on the device constraints.
629 */
630 u64 max_extent_size;
631
eede2bf3
OS
632 /* Block groups and devices containing active swapfiles. */
633 spinlock_t swapfile_pins_lock;
634 struct rb_root swapfile_pins;
635
6d97c6e3
JT
636 struct crypto_shash *csum_shash;
637
0d7ed32c
DS
638 /* Type of exclusive operation running, protected by super_lock */
639 enum btrfs_exclusive_operation exclusive_operation;
c3e1f96c 640
b70f5097
NA
641 /*
642 * Zone size > 0 when in ZONED mode, otherwise it's used for a check
643 * if the mode is enabled
644 */
8e010b3d 645 u64 zone_size;
b70f5097 646
c2ae7b77
NA
647 /* Max size to emit ZONE_APPEND write command */
648 u64 max_zone_append_size;
0bc09ca1 649 struct mutex zoned_meta_io_lock;
40ab3be1
NA
650 spinlock_t treelog_bg_lock;
651 u64 treelog_bg;
862931c7 652
c2707a25
JT
653 /*
654 * Start of the dedicated data relocation block group, protected by
655 * relocation_bg_lock.
656 */
657 spinlock_t relocation_bg_lock;
658 u64 data_reloc_bg;
5f0addf7 659 struct mutex zoned_data_reloc_io_lock;
c2707a25 660
f7238e50
JB
661 u64 nr_global_roots;
662
afba2bc0
NA
663 spinlock_t zone_active_bgs_lock;
664 struct list_head zone_active_bgs;
665
e55958c8
IA
666 /* Updates are not protected by any lock */
667 struct btrfs_commit_stats commit_stats;
668
12a824dc
FM
669 /*
670 * Last generation where we dropped a non-relocation root.
671 * Use btrfs_set_last_root_drop_gen() and btrfs_get_last_root_drop_gen()
672 * to change it and to read it, respectively.
673 */
674 u64 last_root_drop_gen;
675
e1489b4f
IA
676 /*
677 * Annotations for transaction events (structures are empty when
678 * compiled without lockdep).
679 */
680 struct lockdep_map btrfs_trans_num_writers_map;
5a9ba670 681 struct lockdep_map btrfs_trans_num_extwriters_map;
3e738c53 682 struct lockdep_map btrfs_state_change_map[4];
8b53779e 683 struct lockdep_map btrfs_trans_pending_ordered_map;
5f4403e1 684 struct lockdep_map btrfs_ordered_extent_map;
e1489b4f 685
fd708b81
JB
686#ifdef CONFIG_BTRFS_FS_REF_VERIFY
687 spinlock_t ref_verify_lock;
688 struct rb_root block_tree;
689#endif
93945cb4
DZ
690
691#ifdef CONFIG_BTRFS_DEBUG
692 struct kobject *debug_kobj;
bd647ce3 693 struct list_head allocated_roots;
3fd63727
JB
694
695 spinlock_t eb_leak_lock;
696 struct list_head allocated_ebs;
93945cb4 697#endif
324ae4df 698};
0b86a832 699
12a824dc
FM
700static inline void btrfs_set_last_root_drop_gen(struct btrfs_fs_info *fs_info,
701 u64 gen)
702{
703 WRITE_ONCE(fs_info->last_root_drop_gen, gen);
704}
705
706static inline u64 btrfs_get_last_root_drop_gen(const struct btrfs_fs_info *fs_info)
707{
708 return READ_ONCE(fs_info->last_root_drop_gen);
709}
710
da17066c
JM
711static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
712{
713 return sb->s_fs_info;
714}
715
d9d88fde
JB
716/*
717 * Take the number of bytes to be checksummed and figure out how many leaves
718 * it would require to store the csums for that many bytes.
719 */
720static inline u64 btrfs_csum_bytes_to_leaves(
721 const struct btrfs_fs_info *fs_info, u64 csum_bytes)
722{
723 const u64 num_csums = csum_bytes >> fs_info->sectorsize_bits;
724
725 return DIV_ROUND_UP_ULL(num_csums, fs_info->csums_per_leaf);
726}
727
728/*
729 * Use this if we would be adding new items, as we could split nodes as we cow
730 * down the tree.
731 */
732static inline u64 btrfs_calc_insert_metadata_size(struct btrfs_fs_info *fs_info,
733 unsigned num_items)
734{
735 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
736}
737
738/*
739 * Doing a truncate or a modification won't result in new nodes or leaves, just
740 * what we need for COW.
741 */
742static inline u64 btrfs_calc_metadata_size(struct btrfs_fs_info *fs_info,
743 unsigned num_items)
744{
745 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
746}
747
748#define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
749 sizeof(struct btrfs_item))
750
751static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
752{
753 return fs_info->zone_size > 0;
754}
755
756/*
757 * Count how many fs_info->max_extent_size cover the @size
758 */
759static inline u32 count_max_extents(struct btrfs_fs_info *fs_info, u64 size)
760{
761#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
762 if (!fs_info)
763 return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE);
764#endif
765
766 return div_u64(size + fs_info->max_extent_size - 1, fs_info->max_extent_size);
767}
768
769bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
770 enum btrfs_exclusive_operation type);
771bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
772 enum btrfs_exclusive_operation type);
773void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info);
774void btrfs_exclop_finish(struct btrfs_fs_info *fs_info);
775void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
776 enum btrfs_exclusive_operation op);
777
27cdeb70
MX
778/*
779 * The state of btrfs root
780 */
61fa90c1
DS
781enum {
782 /*
783 * btrfs_record_root_in_trans is a multi-step process, and it can race
784 * with the balancing code. But the race is very small, and only the
785 * first time the root is added to each transaction. So IN_TRANS_SETUP
786 * is used to tell us when more checks are required
787 */
788 BTRFS_ROOT_IN_TRANS_SETUP,
92a7cc42
QW
789
790 /*
791 * Set if tree blocks of this root can be shared by other roots.
792 * Only subvolume trees and their reloc trees have this bit set.
793 * Conflicts with TRACK_DIRTY bit.
794 *
795 * This affects two things:
796 *
797 * - How balance works
798 * For shareable roots, we need to use reloc tree and do path
799 * replacement for balance, and need various pre/post hooks for
800 * snapshot creation to handle them.
801 *
802 * While for non-shareable trees, we just simply do a tree search
803 * with COW.
804 *
805 * - How dirty roots are tracked
806 * For shareable roots, btrfs_record_root_in_trans() is needed to
807 * track them, while non-subvolume roots have TRACK_DIRTY bit, they
808 * don't need to set this manually.
809 */
810 BTRFS_ROOT_SHAREABLE,
61fa90c1 811 BTRFS_ROOT_TRACK_DIRTY,
fc7cbcd4 812 BTRFS_ROOT_IN_RADIX,
61fa90c1
DS
813 BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
814 BTRFS_ROOT_DEFRAG_RUNNING,
815 BTRFS_ROOT_FORCE_COW,
816 BTRFS_ROOT_MULTI_LOG_TASKS,
817 BTRFS_ROOT_DIRTY,
83354f07 818 BTRFS_ROOT_DELETING,
d2311e69
QW
819
820 /*
821 * Reloc tree is orphan, only kept here for qgroup delayed subtree scan
822 *
823 * Set for the subvolume tree owning the reloc tree.
824 */
825 BTRFS_ROOT_DEAD_RELOC_TREE,
78c52d9e
JB
826 /* Mark dead root stored on device whose cleanup needs to be resumed */
827 BTRFS_ROOT_DEAD_TREE,
47876f7c 828 /* The root has a log tree. Used for subvolume roots and the tree root. */
e7a79811 829 BTRFS_ROOT_HAS_LOG_TREE,
c53e9653
QW
830 /* Qgroup flushing is in progress */
831 BTRFS_ROOT_QGROUP_FLUSHING,
54230013
JB
832 /* We started the orphan cleanup for this root. */
833 BTRFS_ROOT_ORPHAN_CLEANUP,
b4be6aef
JB
834 /* This root has a drop operation that was started previously. */
835 BTRFS_ROOT_UNFINISHED_DROP,
b40130b2
JB
836 /* This reloc root needs to have its buffers lockdep class reset. */
837 BTRFS_ROOT_RESET_LOCKDEP_CLASS,
61fa90c1 838};
27cdeb70 839
3e738c53
IA
840enum btrfs_lockdep_trans_states {
841 BTRFS_LOCKDEP_TRANS_COMMIT_START,
842 BTRFS_LOCKDEP_TRANS_UNBLOCKED,
843 BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED,
844 BTRFS_LOCKDEP_TRANS_COMPLETED,
845};
846
ab9a323f
IA
847/*
848 * Lockdep annotation for wait events.
849 *
850 * @owner: The struct where the lockdep map is defined
851 * @lock: The lockdep map corresponding to a wait event
852 *
853 * This macro is used to annotate a wait event. In this case a thread acquires
854 * the lockdep map as writer (exclusive lock) because it has to block until all
855 * the threads that hold the lock as readers signal the condition for the wait
856 * event and release their locks.
857 */
858#define btrfs_might_wait_for_event(owner, lock) \
859 do { \
860 rwsem_acquire(&owner->lock##_map, 0, 0, _THIS_IP_); \
861 rwsem_release(&owner->lock##_map, _THIS_IP_); \
862 } while (0)
863
864/*
865 * Protection for the resource/condition of a wait event.
866 *
867 * @owner: The struct where the lockdep map is defined
868 * @lock: The lockdep map corresponding to a wait event
869 *
870 * Many threads can modify the condition for the wait event at the same time
871 * and signal the threads that block on the wait event. The threads that modify
872 * the condition and do the signaling acquire the lock as readers (shared
873 * lock).
874 */
875#define btrfs_lockdep_acquire(owner, lock) \
876 rwsem_acquire_read(&owner->lock##_map, 0, 0, _THIS_IP_)
877
878/*
879 * Used after signaling the condition for a wait event to release the lockdep
880 * map held by a reader thread.
881 */
882#define btrfs_lockdep_release(owner, lock) \
883 rwsem_release(&owner->lock##_map, _THIS_IP_)
884
3e738c53
IA
885/*
886 * Macros for the transaction states wait events, similar to the generic wait
887 * event macros.
888 */
889#define btrfs_might_wait_for_state(owner, i) \
890 do { \
891 rwsem_acquire(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_); \
892 rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_); \
893 } while (0)
894
895#define btrfs_trans_state_lockdep_acquire(owner, i) \
896 rwsem_acquire_read(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_)
897
898#define btrfs_trans_state_lockdep_release(owner, i) \
899 rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_)
900
ab9a323f
IA
901/* Initialization of the lockdep map */
902#define btrfs_lockdep_init_map(owner, lock) \
903 do { \
904 static struct lock_class_key lock##_key; \
905 lockdep_init_map(&owner->lock##_map, #lock, &lock##_key, 0); \
906 } while (0)
907
3e738c53
IA
908/* Initialization of the transaction states lockdep maps. */
909#define btrfs_state_lockdep_init_map(owner, lock, state) \
910 do { \
911 static struct lock_class_key lock##_key; \
912 lockdep_init_map(&owner->btrfs_state_change_map[state], #lock, \
913 &lock##_key, 0); \
914 } while (0)
915
370a11b8
QW
916/*
917 * Record swapped tree blocks of a subvolume tree for delayed subtree trace
918 * code. For detail check comment in fs/btrfs/qgroup.c.
919 */
920struct btrfs_qgroup_swapped_blocks {
921 spinlock_t lock;
922 /* RM_EMPTY_ROOT() of above blocks[] */
923 bool swapped;
924 struct rb_root blocks[BTRFS_MAX_LEVEL];
925};
926
9f5fae2f
CM
927/*
928 * in ram representation of the tree. extent_root is used for all allocations
f2458e1d 929 * and for the extent tree extent_root root.
9f5fae2f
CM
930 */
931struct btrfs_root {
abed4aaa
JB
932 struct rb_node rb_node;
933
5f39d397 934 struct extent_buffer *node;
925baedd 935
5f39d397 936 struct extent_buffer *commit_root;
e02119d5 937 struct btrfs_root *log_root;
1a40e23b 938 struct btrfs_root *reloc_root;
31153d81 939
27cdeb70 940 unsigned long state;
62e2749e
CM
941 struct btrfs_root_item root_item;
942 struct btrfs_key root_key;
9f5fae2f 943 struct btrfs_fs_info *fs_info;
d0c803c4
CM
944 struct extent_io_tree dirty_log_pages;
945
a2135011 946 struct mutex objectid_mutex;
7237f183 947
f0486c68
YZ
948 spinlock_t accounting_lock;
949 struct btrfs_block_rsv *block_rsv;
950
e02119d5 951 struct mutex log_mutex;
7237f183
YZ
952 wait_queue_head_t log_writer_wait;
953 wait_queue_head_t log_commit_wait[2];
8b050d35 954 struct list_head log_ctxs[2];
a93e0168 955 /* Used only for log trees of subvolumes, not for the log root tree */
7237f183
YZ
956 atomic_t log_writers;
957 atomic_t log_commit[2];
28a95795 958 /* Used only for log trees of subvolumes, not for the log root tree */
2ecb7923 959 atomic_t log_batch;
bb14a59b 960 int log_transid;
d1433deb
MX
961 /* No matter the commit succeeds or not*/
962 int log_transid_committed;
963 /* Just be updated when the commit succeeds. */
bb14a59b 964 int last_log_commit;
ff782e0a 965 pid_t log_start_pid;
ea8c2819 966
0f7d52f4 967 u64 last_trans;
5f39d397 968
9f5fae2f 969 u32 type;
13a8a7c8 970
6b8fad57 971 u64 free_objectid;
7585717f 972
6702ed49 973 struct btrfs_key defrag_progress;
0ef3e66b 974 struct btrfs_key defrag_max;
0b86a832 975
92a7cc42 976 /* The dirty list is only used by non-shareable roots */
0b86a832 977 struct list_head dirty_list;
7b128766 978
5d4f98a2
YZ
979 struct list_head root_list;
980
2ab28f32
JB
981 spinlock_t log_extents_lock[2];
982 struct list_head logged_list[2];
983
5d4f98a2
YZ
984 spinlock_t inode_lock;
985 /* red-black tree that keeps track of in-memory inodes */
986 struct rb_root inode_tree;
987
16cdcec7 988 /*
088aea3b
DS
989 * radix tree that keeps track of delayed nodes of every inode,
990 * protected by inode_lock
16cdcec7 991 */
088aea3b 992 struct radix_tree_root delayed_nodes_tree;
3394e160
CM
993 /*
994 * right now this just gets used so that a root has its own devid
995 * for stat. It may be used for more later
996 */
0ee5dc67 997 dev_t anon_dev;
f1ebcc74 998
5f3ab90a 999 spinlock_t root_item_lock;
0700cea7 1000 refcount_t refs;
eb73c1b7 1001
573bfb72 1002 struct mutex delalloc_mutex;
eb73c1b7
MX
1003 spinlock_t delalloc_lock;
1004 /*
1005 * all of the inodes that have delalloc bytes. It is possible for
1006 * this list to be empty even when there is still dirty data=ordered
1007 * extents waiting to finish IO.
1008 */
1009 struct list_head delalloc_inodes;
1010 struct list_head delalloc_root;
1011 u64 nr_delalloc_inodes;
31f3d255
MX
1012
1013 struct mutex ordered_extent_mutex;
199c2a9c
MX
1014 /*
1015 * this is used by the balancing code to wait for all the pending
1016 * ordered extents
1017 */
1018 spinlock_t ordered_extent_lock;
1019
1020 /*
1021 * all of the data=ordered extents pending writeback
1022 * these can span multiple transactions and basically include
1023 * every dirty data page that isn't from nodatacow
1024 */
1025 struct list_head ordered_extents;
1026 struct list_head ordered_root;
1027 u64 nr_ordered_extents;
2c686537 1028
d2311e69
QW
1029 /*
1030 * Not empty if this subvolume root has gone through tree block swap
1031 * (relocation)
1032 *
1033 * Will be used by reloc_control::dirty_subvol_roots.
1034 */
1035 struct list_head reloc_dirty_list;
1036
2c686537
DS
1037 /*
1038 * Number of currently running SEND ioctls to prevent
1039 * manipulation with the read-only status via SUBVOL_SETFLAGS
1040 */
1041 int send_in_progress;
62d54f3a
FM
1042 /*
1043 * Number of currently running deduplication operations that have a
1044 * destination inode belonging to this root. Protected by the lock
1045 * root_item_lock.
1046 */
1047 int dedupe_in_progress;
dcc3eb96
NB
1048 /* For exclusion of snapshot creation and nocow writes */
1049 struct btrfs_drew_lock snapshot_lock;
1050
8ecebf4d 1051 atomic_t snapshot_force_cow;
8287475a
QW
1052
1053 /* For qgroup metadata reserved space */
1054 spinlock_t qgroup_meta_rsv_lock;
1055 u64 qgroup_meta_rsv_pertrans;
1056 u64 qgroup_meta_rsv_prealloc;
c53e9653 1057 wait_queue_head_t qgroup_flush_wait;
57ec5fb4 1058
eede2bf3
OS
1059 /* Number of active swapfiles */
1060 atomic_t nr_swapfiles;
1061
370a11b8
QW
1062 /* Record pairs of swapped blocks for qgroup */
1063 struct btrfs_qgroup_swapped_blocks swapped_blocks;
1064
e289f03e
FM
1065 /* Used only by log trees, when logging csum items */
1066 struct extent_io_tree log_csum_range;
1067
57ec5fb4
DS
1068#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1069 u64 alloc_bytenr;
1070#endif
bd647ce3
JB
1071
1072#ifdef CONFIG_BTRFS_DEBUG
1073 struct list_head leak_list;
1074#endif
62e2749e 1075};
118c701e 1076
bf385648
FM
1077/*
1078 * Structure that conveys information about an extent that is going to replace
1079 * all the extents in a file range.
1080 */
1081struct btrfs_replace_extent_info {
690a5dbf
FM
1082 u64 disk_offset;
1083 u64 disk_len;
1084 u64 data_offset;
1085 u64 data_len;
1086 u64 file_offset;
fb870f6c 1087 /* Pointer to a file extent item of type regular or prealloc. */
690a5dbf 1088 char *extent_buf;
8fccebfa
FM
1089 /*
1090 * Set to true when attempting to replace a file range with a new extent
1091 * described by this structure, set to false when attempting to clone an
1092 * existing extent into a file range.
1093 */
1094 bool is_new_extent;
983d8209
FM
1095 /* Indicate if we should update the inode's mtime and ctime. */
1096 bool update_times;
8fccebfa
FM
1097 /* Meaningful only if is_new_extent is true. */
1098 int qgroup_reserved;
1099 /*
1100 * Meaningful only if is_new_extent is true.
1101 * Used to track how many extent items we have already inserted in a
1102 * subvolume tree that refer to the extent described by this structure,
1103 * so that we know when to create a new delayed ref or update an existing
1104 * one.
1105 */
1106 int insertions;
690a5dbf
FM
1107};
1108
5893dfb9
FM
1109/* Arguments for btrfs_drop_extents() */
1110struct btrfs_drop_extents_args {
1111 /* Input parameters */
1112
1113 /*
1114 * If NULL, btrfs_drop_extents() will allocate and free its own path.
1115 * If 'replace_extent' is true, this must not be NULL. Also the path
1116 * is always released except if 'replace_extent' is true and
1117 * btrfs_drop_extents() sets 'extent_inserted' to true, in which case
1118 * the path is kept locked.
1119 */
1120 struct btrfs_path *path;
1121 /* Start offset of the range to drop extents from */
1122 u64 start;
1123 /* End (exclusive, last byte + 1) of the range to drop extents from */
1124 u64 end;
1125 /* If true drop all the extent maps in the range */
1126 bool drop_cache;
1127 /*
1128 * If true it means we want to insert a new extent after dropping all
1129 * the extents in the range. If this is true, the 'extent_item_size'
1130 * parameter must be set as well and the 'extent_inserted' field will
1131 * be set to true by btrfs_drop_extents() if it could insert the new
1132 * extent.
1133 * Note: when this is set to true the path must not be NULL.
1134 */
1135 bool replace_extent;
1136 /*
1137 * Used if 'replace_extent' is true. Size of the file extent item to
1138 * insert after dropping all existing extents in the range
1139 */
1140 u32 extent_item_size;
1141
1142 /* Output parameters */
1143
1144 /*
1145 * Set to the minimum between the input parameter 'end' and the end
1146 * (exclusive, last byte + 1) of the last dropped extent. This is always
1147 * set even if btrfs_drop_extents() returns an error.
1148 */
1149 u64 drop_end;
2766ff61
FM
1150 /*
1151 * The number of allocated bytes found in the range. This can be smaller
1152 * than the range's length when there are holes in the range.
1153 */
1154 u64 bytes_found;
5893dfb9
FM
1155 /*
1156 * Only set if 'replace_extent' is true. Set to true if we were able
1157 * to insert a replacement extent after dropping all extents in the
1158 * range, otherwise set to false by btrfs_drop_extents().
1159 * Also, if btrfs_drop_extents() has set this to true it means it
1160 * returned with the path locked, otherwise if it has set this to
1161 * false it has returned with the path released.
1162 */
1163 bool extent_inserted;
1164};
1165
23b5ec74 1166struct btrfs_file_private {
23b5ec74
JB
1167 void *filldir_buf;
1168};
1169
62e2749e 1170
da17066c 1171static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
1db1ff92 1172{
118c701e
NB
1173
1174 return info->nodesize - sizeof(struct btrfs_header);
1db1ff92
JM
1175}
1176
da17066c 1177static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info)
1db1ff92 1178{
da17066c 1179 return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item);
1db1ff92
JM
1180}
1181
da17066c 1182static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_fs_info *info)
1db1ff92 1183{
da17066c 1184 return BTRFS_LEAF_DATA_SIZE(info) / sizeof(struct btrfs_key_ptr);
1db1ff92
JM
1185}
1186
1187#define BTRFS_FILE_EXTENT_INLINE_DATA_START \
1188 (offsetof(struct btrfs_file_extent_item, disk_bytenr))
da17066c 1189static inline u32 BTRFS_MAX_INLINE_DATA_SIZE(const struct btrfs_fs_info *info)
1db1ff92 1190{
da17066c 1191 return BTRFS_MAX_ITEM_SIZE(info) -
1db1ff92
JM
1192 BTRFS_FILE_EXTENT_INLINE_DATA_START;
1193}
1194
da17066c 1195static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
1db1ff92 1196{
da17066c 1197 return BTRFS_MAX_ITEM_SIZE(info) - sizeof(struct btrfs_dir_item);
1db1ff92
JM
1198}
1199
2e78c927 1200#define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
265fdfa6 1201 ((bytes) >> (fs_info)->sectorsize_bits)
2e78c927 1202
65019df8
JT
1203static inline u32 btrfs_crc32c(u32 crc, const void *address, unsigned length)
1204{
1205 return crc32c(crc, address, length);
1206}
1207
1208static inline void btrfs_crc32c_final(u32 crc, u8 *result)
1209{
1210 put_unaligned_le32(~crc, result);
1211}
1212
9678c543
NB
1213static inline u64 btrfs_name_hash(const char *name, int len)
1214{
1215 return crc32c((u32)~1, name, len);
1216}
1217
1218/*
1219 * Figure the key offset of an extended inode ref
1220 */
1221static inline u64 btrfs_extref_hash(u64 parent_objectid, const char *name,
1222 int len)
1223{
1224 return (u64) crc32c(parent_objectid, name, len);
1225}
1226
3b16a4e3
JB
1227static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
1228{
c62d2555 1229 return mapping_gfp_constraint(mapping, ~__GFP_FS);
3b16a4e3
JB
1230}
1231
b18c6685 1232/* extent-tree.c */
28f75a0e 1233
167ce953 1234enum btrfs_inline_ref_type {
bbe339cc
DS
1235 BTRFS_REF_TYPE_INVALID,
1236 BTRFS_REF_TYPE_BLOCK,
1237 BTRFS_REF_TYPE_DATA,
1238 BTRFS_REF_TYPE_ANY,
167ce953
LB
1239};
1240
1241int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
1242 struct btrfs_extent_inline_ref *iref,
1243 enum btrfs_inline_ref_type is_data);
0785a9aa 1244u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset);
167ce953 1245
16cdcec7 1246
6f410d1b
JB
1247int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info,
1248 u64 start, u64 num_bytes);
32da5386 1249void btrfs_free_excluded_extents(struct btrfs_block_group *cache);
56bec294 1250int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
c79a70b1 1251 unsigned long count);
31890da0
JB
1252void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
1253 struct btrfs_delayed_ref_root *delayed_refs,
1254 struct btrfs_delayed_ref_head *head);
2ff7e61e 1255int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len);
a22285a6 1256int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
2ff7e61e 1257 struct btrfs_fs_info *fs_info, u64 bytenr,
3173a18f 1258 u64 offset, int metadata, u64 *refs, u64 *flags);
b25c36f8
NB
1259int btrfs_pin_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 num,
1260 int reserved);
9fce5704 1261int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
e688b725 1262 u64 bytenr, u64 num_bytes);
bcdc428c 1263int btrfs_exclude_logged_extents(struct extent_buffer *eb);
e4c3b2dc 1264int btrfs_cross_ref_exist(struct btrfs_root *root,
1a89f173
FM
1265 u64 objectid, u64 offset, u64 bytenr, bool strict,
1266 struct btrfs_path *path);
4d75f8a9 1267struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
310712b2
OS
1268 struct btrfs_root *root,
1269 u64 parent, u64 root_objectid,
1270 const struct btrfs_disk_key *key,
1271 int level, u64 hint,
9631e4cc
JB
1272 u64 empty_size,
1273 enum btrfs_lock_nesting nest);
f0486c68 1274void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
7a163608 1275 u64 root_id,
f0486c68 1276 struct extent_buffer *buf,
5581a51a 1277 u64 parent, int last_ref);
5d4f98a2 1278int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
84f7d8e6 1279 struct btrfs_root *root, u64 owner,
5846a3c2
QW
1280 u64 offset, u64 ram_bytes,
1281 struct btrfs_key *ins);
5d4f98a2 1282int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5d4f98a2
YZ
1283 u64 root_objectid, u64 owner, u64 offset,
1284 struct btrfs_key *ins);
18513091 1285int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes,
00361589 1286 u64 min_alloc_size, u64 empty_size, u64 hint_byte,
e570fd27 1287 struct btrfs_key *ins, int is_data, int delalloc);
e089f05c 1288int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
e339a6b0 1289 struct extent_buffer *buf, int full_backref);
5d4f98a2 1290int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
e339a6b0 1291 struct extent_buffer *buf, int full_backref);
5d4f98a2 1292int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2fe6a5a1 1293 struct extent_buffer *eb, u64 flags, int level);
ffd4bb2a 1294int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref);
5d4f98a2 1295
2ff7e61e
JM
1296int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
1297 u64 start, u64 len, int delalloc);
7bfc1007 1298int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start,
a0fbf736 1299 u64 len);
5ead2dd0 1300int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans);
b18c6685 1301int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
82fa113f 1302 struct btrfs_ref *generic_ref);
5d4f98a2 1303
4184ea7f 1304void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
08e007d2 1305
d5c12070
MX
1306int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
1307 struct btrfs_block_rsv *rsv,
c4c129db 1308 int nitems, bool use_global_rsv);
e85fde51 1309void btrfs_subvolume_release_metadata(struct btrfs_root *root,
7775c818 1310 struct btrfs_block_rsv *rsv);
8702ba93 1311void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes);
8b62f87b 1312
28c9b1e7 1313int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
d4135134 1314 u64 disk_num_bytes, bool noflush);
6d07bcec 1315u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
2ff7e61e 1316int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
acce952b 1317 u64 start, u64 end);
2ff7e61e 1318int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
1edb647b 1319 u64 num_bytes, u64 *actual_bytes);
2ff7e61e 1320int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
acce952b 1321
c59021f8 1322int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
bed92eae
AJ
1323int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
1324 struct btrfs_fs_info *fs_info);
ea14b57f
DS
1325int btrfs_start_write_no_snapshotting(struct btrfs_root *root);
1326void btrfs_end_write_no_snapshotting(struct btrfs_root *root);
0bc19f90 1327void btrfs_wait_for_snapshot_creation(struct btrfs_root *root);
a5ed9182 1328
dee26a9f 1329/* ctree.c */
226463d7
JB
1330int __init btrfs_ctree_init(void);
1331void __cold btrfs_ctree_exit(void);
310712b2 1332int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
e3b83361 1333 int *slot);
e1f60a65 1334int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
0b86a832
CM
1335int btrfs_previous_item(struct btrfs_root *root,
1336 struct btrfs_path *path, u64 min_objectid,
1337 int type);
ade2e0b3
WS
1338int btrfs_previous_extent_item(struct btrfs_root *root,
1339 struct btrfs_path *path, u64 min_objectid);
b7a0365e
DD
1340void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
1341 struct btrfs_path *path,
310712b2 1342 const struct btrfs_key *new_key);
925baedd 1343struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
e7a84565 1344int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
3f157a2f 1345 struct btrfs_key *key, int lowest_level,
de78b51a 1346 u64 min_trans);
3f157a2f 1347int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
de78b51a 1348 struct btrfs_path *path,
3f157a2f 1349 u64 min_trans);
4b231ae4
DS
1350struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
1351 int slot);
1352
5f39d397
CM
1353int btrfs_cow_block(struct btrfs_trans_handle *trans,
1354 struct btrfs_root *root, struct extent_buffer *buf,
1355 struct extent_buffer *parent, int parent_slot,
9631e4cc
JB
1356 struct extent_buffer **cow_ret,
1357 enum btrfs_lock_nesting nest);
be20aa9d
CM
1358int btrfs_copy_root(struct btrfs_trans_handle *trans,
1359 struct btrfs_root *root,
1360 struct extent_buffer *buf,
1361 struct extent_buffer **cow_ret, u64 new_root_objectid);
5d4f98a2
YZ
1362int btrfs_block_can_be_shared(struct btrfs_root *root,
1363 struct extent_buffer *buf);
c71dd880 1364void btrfs_extend_item(struct btrfs_path *path, u32 data_size);
78ac4f9e 1365void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end);
459931ec
CM
1366int btrfs_split_item(struct btrfs_trans_handle *trans,
1367 struct btrfs_root *root,
1368 struct btrfs_path *path,
310712b2 1369 const struct btrfs_key *new_key,
459931ec 1370 unsigned long split_offset);
ad48fd75
YZ
1371int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
1372 struct btrfs_root *root,
1373 struct btrfs_path *path,
310712b2 1374 const struct btrfs_key *new_key);
e33d5c3d
KN
1375int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
1376 u64 inum, u64 ioff, u8 key_type, struct btrfs_key *found_key);
310712b2
OS
1377int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1378 const struct btrfs_key *key, struct btrfs_path *p,
1379 int ins_len, int cow);
1380int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
5d9e75c4 1381 struct btrfs_path *p, u64 time_seq);
2f38b3e1 1382int btrfs_search_slot_for_read(struct btrfs_root *root,
310712b2
OS
1383 const struct btrfs_key *key,
1384 struct btrfs_path *p, int find_higher,
1385 int return_any);
6702ed49 1386int btrfs_realloc_node(struct btrfs_trans_handle *trans,
5f39d397 1387 struct btrfs_root *root, struct extent_buffer *parent,
de78b51a 1388 int start_slot, u64 *last_ret,
a6b6e75e 1389 struct btrfs_key *progress);
b3b4aa74 1390void btrfs_release_path(struct btrfs_path *p);
2c90e5d6
CM
1391struct btrfs_path *btrfs_alloc_path(void);
1392void btrfs_free_path(struct btrfs_path *p);
b4ce94de 1393
85e21bac
CM
1394int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1395 struct btrfs_path *path, int slot, int nr);
85e21bac
CM
1396static inline int btrfs_del_item(struct btrfs_trans_handle *trans,
1397 struct btrfs_root *root,
1398 struct btrfs_path *path)
1399{
1400 return btrfs_del_items(trans, root, path, path->slots[0], 1);
1401}
1402
b7ef5f3a
FM
1403/*
1404 * Describes a batch of items to insert in a btree. This is used by
f0641656 1405 * btrfs_insert_empty_items().
b7ef5f3a
FM
1406 */
1407struct btrfs_item_batch {
1408 /*
1409 * Pointer to an array containing the keys of the items to insert (in
1410 * sorted order).
1411 */
1412 const struct btrfs_key *keys;
1413 /* Pointer to an array containing the data size for each item to insert. */
1414 const u32 *data_sizes;
1415 /*
1416 * The sum of data sizes for all items. The caller can compute this while
1417 * setting up the data_sizes array, so it ends up being more efficient
1418 * than having btrfs_insert_empty_items() or setup_item_for_insert()
1419 * doing it, as it would avoid an extra loop over a potentially large
1420 * array, and in the case of setup_item_for_insert(), we would be doing
1421 * it while holding a write lock on a leaf and often on upper level nodes
1422 * too, unnecessarily increasing the size of a critical section.
1423 */
1424 u32 total_data_size;
1425 /* Size of the keys and data_sizes arrays (number of items in the batch). */
1426 int nr;
1427};
1428
f0641656
FM
1429void btrfs_setup_item_for_insert(struct btrfs_root *root,
1430 struct btrfs_path *path,
1431 const struct btrfs_key *key,
1432 u32 data_size);
310712b2
OS
1433int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1434 const struct btrfs_key *key, void *data, u32 data_size);
9c58309d
CM
1435int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
1436 struct btrfs_root *root,
1437 struct btrfs_path *path,
b7ef5f3a 1438 const struct btrfs_item_batch *batch);
9c58309d
CM
1439
1440static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
1441 struct btrfs_root *root,
1442 struct btrfs_path *path,
310712b2 1443 const struct btrfs_key *key,
9c58309d
CM
1444 u32 data_size)
1445{
b7ef5f3a
FM
1446 struct btrfs_item_batch batch;
1447
1448 batch.keys = key;
1449 batch.data_sizes = &data_size;
1450 batch.total_data_size = data_size;
1451 batch.nr = 1;
1452
1453 return btrfs_insert_empty_items(trans, root, path, &batch);
9c58309d
CM
1454}
1455
16e7549f 1456int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
3d7806ec
JS
1457int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
1458 u64 time_seq);
0ff40a91
MPS
1459
1460int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
1461 struct btrfs_path *path);
1462
62142be3
GN
1463int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
1464 struct btrfs_path *path);
1465
1466/*
1467 * Search in @root for a given @key, and store the slot found in @found_key.
1468 *
1469 * @root: The root node of the tree.
1470 * @key: The key we are looking for.
1471 * @found_key: Will hold the found item.
1472 * @path: Holds the current slot/leaf.
1473 * @iter_ret: Contains the value returned from btrfs_search_slot or
1474 * btrfs_get_next_valid_item, whichever was executed last.
1475 *
1476 * The @iter_ret is an output variable that will contain the return value of
1477 * btrfs_search_slot, if it encountered an error, or the value returned from
1478 * btrfs_get_next_valid_item otherwise. That return value can be 0, if a valid
1479 * slot was found, 1 if there were no more leaves, and <0 if there was an error.
1480 *
1481 * It's recommended to use a separate variable for iter_ret and then use it to
1482 * set the function return value so there's no confusion of the 0/1/errno
1483 * values stemming from btrfs_search_slot.
1484 */
1485#define btrfs_for_each_slot(root, key, found_key, path, iter_ret) \
1486 for (iter_ret = btrfs_search_slot(NULL, (root), (key), (path), 0, 0); \
1487 (iter_ret) >= 0 && \
1488 (iter_ret = btrfs_get_next_valid_item((root), (found_key), (path))) == 0; \
1489 (path)->slots[0]++ \
1490 )
1491
890d2b1a 1492int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq);
809d6902
DS
1493
1494/*
1495 * Search the tree again to find a leaf with greater keys.
1496 *
1497 * Returns 0 if it found something or 1 if there are no greater leaves.
1498 * Returns < 0 on error.
1499 */
1500static inline int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
1501{
1502 return btrfs_next_old_leaf(root, path, 0);
1503}
1504
1c8f52a5
AB
1505static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
1506{
1507 return btrfs_next_old_item(root, p, 0);
1508}
e902baac 1509int btrfs_leaf_free_space(struct extent_buffer *leaf);
0078a9f9
NB
1510int __must_check btrfs_drop_snapshot(struct btrfs_root *root, int update_ref,
1511 int for_reloc);
f82d02d9
YZ
1512int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
1513 struct btrfs_root *root,
1514 struct extent_buffer *node,
1515 struct extent_buffer *parent);
babbf170 1516
dee26a9f 1517/* root-item.c */
6025c19f 1518int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
e43eec81
STD
1519 u64 ref_id, u64 dirid, u64 sequence,
1520 const struct qstr *name);
3ee1c553 1521int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
e43eec81
STD
1522 u64 ref_id, u64 dirid, u64 *sequence,
1523 const struct qstr *name);
1cd5447e 1524int btrfs_del_root(struct btrfs_trans_handle *trans,
ab9ce7d4 1525 const struct btrfs_key *key);
310712b2
OS
1526int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1527 const struct btrfs_key *key,
1528 struct btrfs_root_item *item);
b45a9d8b
JM
1529int __must_check btrfs_update_root(struct btrfs_trans_handle *trans,
1530 struct btrfs_root *root,
1531 struct btrfs_key *key,
1532 struct btrfs_root_item *item);
310712b2 1533int btrfs_find_root(struct btrfs_root *root, const struct btrfs_key *search_key,
cb517eab
MX
1534 struct btrfs_path *path, struct btrfs_root_item *root_item,
1535 struct btrfs_key *root_key);
6bccf3ab 1536int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info);
bf5f32ec
MF
1537void btrfs_set_root_node(struct btrfs_root_item *item,
1538 struct extent_buffer *node);
08fe4db1 1539void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
8ea05e3a
AB
1540void btrfs_update_root_times(struct btrfs_trans_handle *trans,
1541 struct btrfs_root *root);
08fe4db1 1542
07b30a49 1543/* uuid-tree.c */
cdb345a8 1544int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
07b30a49 1545 u64 subid);
d1957791 1546int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
07b30a49 1547 u64 subid);
560b7a4a 1548int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info);
07b30a49 1549
dee26a9f 1550/* dir-item.c */
9c52057c 1551int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
e43eec81
STD
1552 const struct qstr *name);
1553int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
1554 const struct qstr *name, struct btrfs_inode *dir,
aec7477b 1555 struct btrfs_key *location, u8 type, u64 index);
7e38180e
CM
1556struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
1557 struct btrfs_root *root,
1558 struct btrfs_path *path, u64 dir,
e43eec81 1559 const struct qstr *name, int mod);
7e38180e
CM
1560struct btrfs_dir_item *
1561btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
1562 struct btrfs_root *root,
1563 struct btrfs_path *path, u64 dir,
e43eec81 1564 u64 index, const struct qstr *name, int mod);
4df27c4d
YZ
1565struct btrfs_dir_item *
1566btrfs_search_dir_index_item(struct btrfs_root *root,
1567 struct btrfs_path *path, u64 dirid,
e43eec81 1568 const struct qstr *name);
7e38180e
CM
1569int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
1570 struct btrfs_root *root,
1571 struct btrfs_path *path,
1572 struct btrfs_dir_item *di);
5103e947 1573int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
f34f57a3
YZ
1574 struct btrfs_root *root,
1575 struct btrfs_path *path, u64 objectid,
1576 const char *name, u16 name_len,
1577 const void *data, u16 data_len);
5103e947
JB
1578struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
1579 struct btrfs_root *root,
1580 struct btrfs_path *path, u64 dir,
1581 const char *name, u16 name_len,
1582 int mod);
2ff7e61e 1583struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
5f5bc6b1
FM
1584 struct btrfs_path *path,
1585 const char *name,
1586 int name_len);
7b128766
JB
1587
1588/* orphan.c */
1589int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
1590 struct btrfs_root *root, u64 offset);
1591int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
1592 struct btrfs_root *root, u64 offset);
4df27c4d 1593int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset);
7b128766 1594
dee26a9f 1595/* file-item.c */
459931ec 1596int btrfs_del_csums(struct btrfs_trans_handle *trans,
40e046ac 1597 struct btrfs_root *root, u64 bytenr, u64 len);
6275193e 1598blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst);
d1f68ba0
OS
1599int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans,
1600 struct btrfs_root *root, u64 objectid, u64 pos,
1601 u64 num_bytes);
dee26a9f
CM
1602int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
1603 struct btrfs_root *root,
1604 struct btrfs_path *path, u64 objectid,
db94535d 1605 u64 bytenr, int mod);
065631f6 1606int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
d20f7043 1607 struct btrfs_root *root,
e6dcd2dc 1608 struct btrfs_ordered_sum *sums);
bd242a08 1609blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
e331f6b1 1610 u64 offset, bool one_ordered);
a2de733c 1611int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
26ce9114
JB
1612 struct list_head *list, int search_commit,
1613 bool nowait);
9cdc5124 1614void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
7ffbb598
FM
1615 const struct btrfs_path *path,
1616 struct btrfs_file_extent_item *fi,
1617 const bool new_inline,
1618 struct extent_map *em);
41a2ee75
JB
1619int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start,
1620 u64 len);
1621int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start,
1622 u64 len);
76aea537 1623void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_size);
a5eeb3d1 1624u64 btrfs_file_extent_end(const struct btrfs_path *path);
7ffbb598 1625
39279cc3 1626/* inode.c */
c93104e7
CH
1627void btrfs_submit_data_write_bio(struct inode *inode, struct bio *bio, int mirror_num);
1628void btrfs_submit_data_read_bio(struct inode *inode, struct bio *bio,
1629 int mirror_num, enum btrfs_compression_type compress_type);
ae643a74
QW
1630int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
1631 u32 pgoff, u8 *csum, const u8 * const csum_expected);
81bd9328
CH
1632int btrfs_check_data_csum(struct inode *inode, struct btrfs_bio *bbio,
1633 u32 bio_offset, struct page *page, u32 pgoff);
c3a3b19b
QW
1634unsigned int btrfs_verify_data_csum(struct btrfs_bio *bbio,
1635 u32 bio_offset, struct page *page,
1636 u64 start, u64 end);
7959bd44
CH
1637int btrfs_check_data_csum(struct inode *inode, struct btrfs_bio *bbio,
1638 u32 bio_offset, struct page *page, u32 pgoff);
00361589 1639noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7ee9e440 1640 u64 *orig_start, u64 *orig_block_len,
26ce9114 1641 u64 *ram_bytes, bool nowait, bool strict);
4881ee5a 1642
2b877331
NB
1643void __btrfs_del_delalloc_inode(struct btrfs_root *root,
1644 struct btrfs_inode *inode);
3de4586c 1645struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
70ddc553 1646int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index);
e02119d5 1647int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
4ec5934e 1648 struct btrfs_inode *dir, struct btrfs_inode *inode,
e43eec81 1649 const struct qstr *name);
e02119d5 1650int btrfs_add_link(struct btrfs_trans_handle *trans,
db0a669f 1651 struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
e43eec81 1652 const struct qstr *name, int add_backref, u64 index);
f60a2364 1653int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry);
217f42eb
NB
1654int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
1655 int front);
e02119d5 1656
f9baa501 1657int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context);
9db4dc24 1658int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
3d45f221 1659 bool in_reclaim_context);
c2566f22 1660int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
e3b8a485 1661 unsigned int extra_bits,
330a5827 1662 struct extent_state **cached_state);
3538d68d
OS
1663struct btrfs_new_inode_args {
1664 /* Input */
1665 struct inode *dir;
1666 struct dentry *dentry;
1667 struct inode *inode;
1668 bool orphan;
1669 bool subvol;
1670
1671 /*
1672 * Output from btrfs_new_inode_prepare(), input to
1673 * btrfs_create_new_inode().
1674 */
1675 struct posix_acl *default_acl;
1676 struct posix_acl *acl;
1677};
1678int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
1679 unsigned int *trans_num_items);
1680int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
caae78e0 1681 struct btrfs_new_inode_args *args);
3538d68d 1682void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args);
a1fd0c35
OS
1683struct inode *btrfs_new_subvol_inode(struct user_namespace *mnt_userns,
1684 struct inode *dir);
c629732d 1685 void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state,
6d92b304 1686 u32 bits);
a36bb5f9 1687void btrfs_clear_delalloc_extent(struct inode *inode,
6d92b304 1688 struct extent_state *state, u32 bits);
5c848198
NB
1689void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new,
1690 struct extent_state *other);
abbb55f4
NB
1691void btrfs_split_delalloc_extent(struct inode *inode,
1692 struct extent_state *orig, u64 split);
d2a91064 1693void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end);
a528a241 1694vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf);
bd555975 1695void btrfs_evict_inode(struct inode *inode);
39279cc3
CM
1696struct inode *btrfs_alloc_inode(struct super_block *sb);
1697void btrfs_destroy_inode(struct inode *inode);
26602cab 1698void btrfs_free_inode(struct inode *inode);
45321ac5 1699int btrfs_drop_inode(struct inode *inode);
f5c29bd9 1700int __init btrfs_init_cachep(void);
e67c718b 1701void __cold btrfs_destroy_cachep(void);
0202e83f 1702struct inode *btrfs_iget_path(struct super_block *s, u64 ino,
4c66e0d4 1703 struct btrfs_root *root, struct btrfs_path *path);
0202e83f 1704struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root);
fc4f21b1 1705struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
de2c6615 1706 struct page *page, size_t pg_offset,
39b07b5d 1707 u64 start, u64 end);
a52d9a80 1708int btrfs_update_inode(struct btrfs_trans_handle *trans,
9a56fcd1 1709 struct btrfs_root *root, struct btrfs_inode *inode);
be6aef60 1710int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
729f7961 1711 struct btrfs_root *root, struct btrfs_inode *inode);
73f2e545
NB
1712int btrfs_orphan_add(struct btrfs_trans_handle *trans,
1713 struct btrfs_inode *inode);
66b4ffd1 1714int btrfs_orphan_cleanup(struct btrfs_root *root);
b06359a3 1715int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size);
24bbcf04 1716void btrfs_add_delayed_iput(struct inode *inode);
2ff7e61e 1717void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info);
034f784d 1718int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info);
efa56464
YZ
1719int btrfs_prealloc_file_range(struct inode *inode, int mode,
1720 u64 start, u64 num_bytes, u64 min_size,
1721 loff_t actual_len, u64 *alloc_hint);
0af3d00b
JB
1722int btrfs_prealloc_file_range_trans(struct inode *inode,
1723 struct btrfs_trans_handle *trans, int mode,
1724 u64 start, u64 num_bytes, u64 min_size,
1725 loff_t actual_len, u64 *alloc_hint);
98456b9c 1726int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
5eaad97a
NB
1727 u64 start, u64 end, int *page_started, unsigned long *nr_written,
1728 struct writeback_control *wbc);
a129ffb8 1729int btrfs_writepage_cow_fixup(struct page *page);
38a39ac7
QW
1730void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
1731 struct page *page, u64 start,
25c1252a 1732 u64 end, bool uptodate);
3ea4dc5b
OS
1733int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
1734 int compress_type);
1735int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
1736 u64 file_offset, u64 disk_bytenr,
1737 u64 disk_io_size,
1738 struct page **pages);
1881fba8
OS
1739ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
1740 struct btrfs_ioctl_encoded_io_args *encoded);
7c0c7269
OS
1741ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
1742 const struct btrfs_ioctl_encoded_io_args *encoded);
1881fba8 1743
8184620a
FM
1744ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter,
1745 size_t done_before);
1746struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter,
1747 size_t done_before);
36e8c622 1748
82d339d9 1749extern const struct dentry_operations btrfs_dentry_operations;
f46b5a66 1750
a14b78ad 1751/* Inode locking type flags, by default the exclusive lock is taken */
c7321b76
DS
1752enum btrfs_ilock_type {
1753 ENUM_BIT(BTRFS_ILOCK_SHARED),
1754 ENUM_BIT(BTRFS_ILOCK_TRY),
1755 ENUM_BIT(BTRFS_ILOCK_MMAP),
1756};
a14b78ad
GR
1757
1758int btrfs_inode_lock(struct inode *inode, unsigned int ilock_flags);
1759void btrfs_inode_unlock(struct inode *inode, unsigned int ilock_flags);
2766ff61
FM
1760void btrfs_update_inode_bytes(struct btrfs_inode *inode,
1761 const u64 add_bytes,
1762 const u64 del_bytes);
63c34cb4 1763void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end);
f46b5a66
CH
1764
1765/* ioctl.c */
1766long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
4c63c245 1767long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
97fc2977
MS
1768int btrfs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
1769int btrfs_fileattr_set(struct user_namespace *mnt_userns,
1770 struct dentry *dentry, struct fileattr *fa);
d5131b65 1771int btrfs_ioctl_get_supported_features(void __user *arg);
7b6a221e 1772void btrfs_sync_inode_flags_to_i_flags(struct inode *inode);
e1f60a65 1773int __pure btrfs_is_empty_uuid(u8 *uuid);
1ccc2e8a 1774int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
4cb5300b 1775 struct btrfs_ioctl_defrag_range_args *range,
1ccc2e8a 1776 u64 newer_than, unsigned long max_to_defrag);
008ef096
DS
1777void btrfs_get_block_group_info(struct list_head *groups_list,
1778 struct btrfs_ioctl_space_info *space);
1779void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
35a3621b
SB
1780 struct btrfs_ioctl_balance_args *bargs);
1781
39279cc3 1782/* file.c */
f5c29bd9 1783int __init btrfs_auto_defrag_init(void);
e67c718b 1784void __cold btrfs_auto_defrag_exit(void);
4cb5300b 1785int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
558732df 1786 struct btrfs_inode *inode, u32 extent_thresh);
4cb5300b 1787int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);
26176e7c 1788void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info);
02c24a82 1789int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
828c0950 1790extern const struct file_operations btrfs_file_operations;
5dc562c5 1791int btrfs_drop_extents(struct btrfs_trans_handle *trans,
5893dfb9
FM
1792 struct btrfs_root *root, struct btrfs_inode *inode,
1793 struct btrfs_drop_extents_args *args);
bfc78479
NB
1794int btrfs_replace_file_extents(struct btrfs_inode *inode,
1795 struct btrfs_path *path, const u64 start,
1796 const u64 end,
bf385648 1797 struct btrfs_replace_extent_info *extent_info,
690a5dbf 1798 struct btrfs_trans_handle **trans_out);
d899e052 1799int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
7a6d7067 1800 struct btrfs_inode *inode, u64 start, u64 end);
7c0c7269
OS
1801ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from,
1802 const struct btrfs_ioctl_encoded_io_args *encoded);
6bf13c0c 1803int btrfs_release_file(struct inode *inode, struct file *file);
088545f6 1804int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
2ff7e61e 1805 size_t num_pages, loff_t pos, size_t write_bytes,
aa8c1a41 1806 struct extent_state **cached, bool noreserve);
728404da 1807int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end);
38d37aa9 1808int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
80f9d241 1809 size_t *write_bytes, bool nowait);
38d37aa9 1810void btrfs_check_nocow_unlock(struct btrfs_inode *inode);
ac3c0d36
FM
1811bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
1812 u64 *delalloc_start_ret, u64 *delalloc_end_ret);
6bf13c0c 1813
6702ed49
CM
1814/* tree-defrag.c */
1815int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
de78b51a 1816 struct btrfs_root *root);
58176a96 1817
edbd8d4e 1818/* super.c */
2ff7e61e 1819int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
96da0919 1820 unsigned long new_flags);
6bf13c0c 1821int btrfs_sync_fs(struct super_block *sb, int wait);
c0c907a4
MPS
1822char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
1823 u64 subvol_objectid);
533574c6 1824
e9306ad4
QW
1825#if BITS_PER_LONG == 32
1826#define BTRFS_32BIT_MAX_FILE_SIZE (((u64)ULONG_MAX + 1) << PAGE_SHIFT)
1827/*
1828 * The warning threshold is 5/8th of the MAX_LFS_FILESIZE that limits the logical
1829 * addresses of extents.
1830 *
1831 * For 4K page size it's about 10T, for 64K it's 160T.
1832 */
1833#define BTRFS_32BIT_EARLY_WARN_THRESHOLD (BTRFS_32BIT_MAX_FILE_SIZE * 5 / 8)
1834void btrfs_warn_32bit_limit(struct btrfs_fs_info *fs_info);
1835void btrfs_err_32bit_limit(struct btrfs_fs_info *fs_info);
1836#endif
1837
884b07d0
QW
1838/*
1839 * Get the correct offset inside the page of extent buffer.
1840 *
1841 * @eb: target extent buffer
1842 * @start: offset inside the extent buffer
1843 *
1844 * Will handle both sectorsize == PAGE_SIZE and sectorsize < PAGE_SIZE cases.
1845 */
1846static inline size_t get_eb_offset_in_page(const struct extent_buffer *eb,
1847 unsigned long offset)
1848{
1849 /*
1850 * For sectorsize == PAGE_SIZE case, eb->start will always be aligned
1851 * to PAGE_SIZE, thus adding it won't cause any difference.
1852 *
1853 * For sectorsize < PAGE_SIZE, we must only read the data that belongs
1854 * to the eb, thus we have to take the eb->start into consideration.
1855 */
1856 return offset_in_page(offset + eb->start);
1857}
1858
1859static inline unsigned long get_eb_page_index(unsigned long offset)
1860{
1861 /*
1862 * For sectorsize == PAGE_SIZE case, plain >> PAGE_SHIFT is enough.
1863 *
1864 * For sectorsize < PAGE_SIZE case, we only support 64K PAGE_SIZE,
1865 * and have ensured that all tree blocks are contained in one page,
1866 * thus we always get index == 0.
1867 */
1868 return offset >> PAGE_SHIFT;
1869}
1870
f8f591df
JT
1871/*
1872 * Use that for functions that are conditionally exported for sanity tests but
1873 * otherwise static
1874 */
1875#ifndef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1876#define EXPORT_FOR_TESTS static
1877#else
1878#define EXPORT_FOR_TESTS
1879#endif
1880
33268eaf 1881/* acl.c */
0eda294d 1882#ifdef CONFIG_BTRFS_FS_POSIX_ACL
0cad6246 1883struct posix_acl *btrfs_get_acl(struct inode *inode, int type, bool rcu);
549c7297
CB
1884int btrfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
1885 struct posix_acl *acl, int type);
3538d68d
OS
1886int __btrfs_set_acl(struct btrfs_trans_handle *trans, struct inode *inode,
1887 struct posix_acl *acl, int type);
9b89d95a 1888#else
ed8f3737 1889#define btrfs_get_acl NULL
996a710d 1890#define btrfs_set_acl NULL
3538d68d
OS
1891static inline int __btrfs_set_acl(struct btrfs_trans_handle *trans,
1892 struct inode *inode, struct posix_acl *acl,
1893 int type)
9b89d95a 1894{
3538d68d 1895 return -EOPNOTSUPP;
9b89d95a 1896}
9b89d95a 1897#endif
0f9dd46c 1898
5d4f98a2 1899/* relocation.c */
6bccf3ab 1900int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start);
5d4f98a2
YZ
1901int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
1902 struct btrfs_root *root);
1903int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1904 struct btrfs_root *root);
7eefae6b 1905int btrfs_recover_relocation(struct btrfs_fs_info *fs_info);
7bfa9535 1906int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len);
83d4cfd4
JB
1907int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
1908 struct btrfs_root *root, struct extent_buffer *buf,
1909 struct extent_buffer *cow);
147d256e 1910void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
3fd0a558 1911 u64 *bytes_to_reserve);
49b25e05 1912int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
3fd0a558 1913 struct btrfs_pending_snapshot *pending);
726a3421 1914int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info);
2433bea5
QW
1915struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info,
1916 u64 bytenr);
55465730 1917int btrfs_should_ignore_reloc_root(struct btrfs_root *root);
a2de733c
AJ
1918
1919/* scrub.c */
aa1b8cd4
SB
1920int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
1921 u64 end, struct btrfs_scrub_progress *progress,
63a212ab 1922 int readonly, int is_dev_replace);
2ff7e61e
JM
1923void btrfs_scrub_pause(struct btrfs_fs_info *fs_info);
1924void btrfs_scrub_continue(struct btrfs_fs_info *fs_info);
aa1b8cd4 1925int btrfs_scrub_cancel(struct btrfs_fs_info *info);
163e97ee 1926int btrfs_scrub_cancel_dev(struct btrfs_device *dev);
2ff7e61e 1927int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
a2de733c 1928 struct btrfs_scrub_progress *progress);
c404e0dc
MX
1929
1930/* dev-replace.c */
1931void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info);
4245215d
MX
1932void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount);
1933
1934static inline void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info)
1935{
1936 btrfs_bio_counter_sub(fs_info, 1);
1937}
a2de733c 1938
95a06077
JS
1939static inline int is_fstree(u64 rootid)
1940{
1941 if (rootid == BTRFS_FS_TREE_OBJECTID ||
e09fe2d2
QW
1942 ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID &&
1943 !btrfs_qgroup_level(rootid)))
95a06077
JS
1944 return 1;
1945 return 0;
1946}
210549eb
DS
1947
1948static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info)
1949{
1950 return signal_pending(current);
1951}
1952
14605409
BB
1953/* verity.c */
1954#ifdef CONFIG_FS_VERITY
1955
1956extern const struct fsverity_operations btrfs_verityops;
1957int btrfs_drop_verity_items(struct btrfs_inode *inode);
38622010 1958int btrfs_get_verity_descriptor(struct inode *inode, void *buf, size_t buf_size);
14605409 1959
14605409
BB
1960#else
1961
1962static inline int btrfs_drop_verity_items(struct btrfs_inode *inode)
1963{
1964 return 0;
1965}
1966
38622010
BB
1967static inline int btrfs_get_verity_descriptor(struct inode *inode, void *buf,
1968 size_t buf_size)
1969{
1970 return -EPERM;
1971}
1972
14605409
BB
1973#endif
1974
aaedb55b
JB
1975/* Sanity test specific functions */
1976#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1977void btrfs_test_destroy_inode(struct inode *inode);
b2fa1154 1978#endif
9888c340 1979
37f00a6d
JT
1980static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root)
1981{
1982 return root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID;
1983}
1984
f57ad937
QW
1985/*
1986 * We use page status Private2 to indicate there is an ordered extent with
1987 * unfinished IO.
1988 *
1989 * Rename the Private2 accessors to Ordered, to improve readability.
1990 */
1991#define PageOrdered(page) PagePrivate2(page)
1992#define SetPageOrdered(page) SetPagePrivate2(page)
1993#define ClearPageOrdered(page) ClearPagePrivate2(page)
895586eb
MWO
1994#define folio_test_ordered(folio) folio_test_private_2(folio)
1995#define folio_set_ordered(folio) folio_set_private_2(folio)
1996#define folio_clear_ordered(folio) folio_clear_private_2(folio)
f57ad937 1997
eb60ceac 1998#endif