1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2007 Oracle. All rights reserved.
10 #include <linux/sched/signal.h>
11 #include <linux/highmem.h>
13 #include <linux/rwsem.h>
14 #include <linux/semaphore.h>
15 #include <linux/completion.h>
16 #include <linux/backing-dev.h>
17 #include <linux/wait.h>
18 #include <linux/slab.h>
19 #include <trace/events/btrfs.h>
20 #include <asm/unaligned.h>
21 #include <linux/pagemap.h>
22 #include <linux/btrfs.h>
23 #include <linux/btrfs_tree.h>
24 #include <linux/workqueue.h>
25 #include <linux/security.h>
26 #include <linux/sizes.h>
27 #include <linux/dynamic_debug.h>
28 #include <linux/refcount.h>
29 #include <linux/crc32c.h>
30 #include <linux/iomap.h>
31 #include "extent-io-tree.h"
32 #include "extent_io.h"
33 #include "extent_map.h"
34 #include "async-thread.h"
35 #include "block-rsv.h"
39 struct btrfs_trans_handle;
40 struct btrfs_transaction;
41 struct btrfs_pending_snapshot;
42 struct btrfs_delayed_ref_root;
43 struct btrfs_space_info;
44 struct btrfs_block_group;
45 struct btrfs_ordered_sum;
48 struct btrfs_ioctl_encoded_io_args;
50 struct btrfs_fs_devices;
51 struct btrfs_balance_control;
52 struct btrfs_delayed_root;
55 struct btrfs_map_token;
57 #define BTRFS_OLDEST_GENERATION 0ULL
59 #define BTRFS_EMPTY_DIR_SIZE 0
61 #define BTRFS_DIRTY_METADATA_THRESH SZ_32M
63 #define BTRFS_MAX_EXTENT_SIZE SZ_128M
65 static inline unsigned long btrfs_chunk_item_size(int num_stripes)
67 BUG_ON(num_stripes == 0);
68 return sizeof(struct btrfs_chunk) +
69 sizeof(struct btrfs_stripe) * (num_stripes - 1);
72 #define BTRFS_SUPER_INFO_OFFSET SZ_64K
73 #define BTRFS_SUPER_INFO_SIZE 4096
74 static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE);
77 * The reserved space at the beginning of each device.
78 * It covers the primary super block and leaves space for potential use by other
79 * tools like bootloaders or to lower potential damage of accidental overwrite.
81 #define BTRFS_DEVICE_RANGE_RESERVED (SZ_1M)
83 /* Read ahead values for struct btrfs_path.reada */
89 * Similar to READA_FORWARD but unlike it:
91 * 1) It will trigger readahead even for leaves that are not close to
93 * 2) It also triggers readahead for nodes;
94 * 3) During a search, even when a node or leaf is already in memory, it
95 * will still trigger readahead for other nodes and leaves that follow
98 * This is meant to be used only when we know we are iterating over the
99 * entire tree or a very large part of it.
101 READA_FORWARD_ALWAYS,
105 * btrfs_paths remember the path taken from the root down to the leaf.
106 * level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point
107 * to any other levels that are present.
109 * The slots array records the index of the item or block pointer
110 * used while walking the tree.
113 struct extent_buffer *nodes[BTRFS_MAX_LEVEL];
114 int slots[BTRFS_MAX_LEVEL];
115 /* if there is real range locking, this locks field will change */
116 u8 locks[BTRFS_MAX_LEVEL];
118 /* keep some upper locks as we walk down */
122 * set by btrfs_split_item, tells search_slot to keep all locks
123 * and to force calls to keep space in the nodes
125 unsigned int search_for_split:1;
126 unsigned int keep_locks:1;
127 unsigned int skip_locking:1;
128 unsigned int search_commit_root:1;
129 unsigned int need_commit_sem:1;
130 unsigned int skip_release_on_error:1;
132 * Indicate that new item (btrfs_search_slot) is extending already
133 * existing item and ins_len contains only the data size and not item
134 * header (ie. sizeof(struct btrfs_item) is not included).
136 unsigned int search_for_extension:1;
137 /* Stop search if any locks need to be taken (for read) */
138 unsigned int nowait:1;
141 struct btrfs_dev_replace {
142 u64 replace_state; /* see #define above */
143 time64_t time_started; /* seconds since 1-Jan-1970 */
144 time64_t time_stopped; /* seconds since 1-Jan-1970 */
145 atomic64_t num_write_errors;
146 atomic64_t num_uncorrectable_read_errors;
149 u64 committed_cursor_left;
150 u64 cursor_left_last_write_of_item;
153 u64 cont_reading_from_srcdev_mode; /* see #define above */
156 int item_needs_writeback;
157 struct btrfs_device *srcdev;
158 struct btrfs_device *tgtdev;
160 struct mutex lock_finishing_cancel_unmount;
161 struct rw_semaphore rwsem;
163 struct btrfs_scrub_progress scrub_progress;
165 struct percpu_counter bio_counter;
166 wait_queue_head_t replace_wait;
170 * free clusters are used to claim free space in relatively large chunks,
171 * allowing us to do less seeky writes. They are used for all metadata
172 * allocations. In ssd_spread mode they are also used for data allocations.
174 struct btrfs_free_cluster {
176 spinlock_t refill_lock;
179 /* largest extent in this cluster */
182 /* first extent starting offset */
185 /* We did a full search and couldn't create a cluster */
188 struct btrfs_block_group *block_group;
190 * when a cluster is allocated from a block group, we put the
191 * cluster onto a list in the block group so that it can
192 * be freed before the block group is freed.
194 struct list_head block_group_list;
197 /* Discard control. */
199 * Async discard uses multiple lists to differentiate the discard filter
200 * parameters. Index 0 is for completely free block groups where we need to
201 * ensure the entire block group is trimmed without being lossy. Indices
202 * afterwards represent monotonically decreasing discard filter sizes to
203 * prioritize what should be discarded next.
205 #define BTRFS_NR_DISCARD_LISTS 3
206 #define BTRFS_DISCARD_INDEX_UNUSED 0
207 #define BTRFS_DISCARD_INDEX_START 1
209 struct btrfs_discard_ctl {
210 struct workqueue_struct *discard_workers;
211 struct delayed_work work;
213 struct btrfs_block_group *block_group;
214 struct list_head discard_list[BTRFS_NR_DISCARD_LISTS];
216 u64 prev_discard_time;
217 atomic_t discardable_extents;
218 atomic64_t discardable_bytes;
219 u64 max_discard_size;
223 u64 discard_extent_bytes;
224 u64 discard_bitmap_bytes;
225 atomic64_t discard_bytes_saved;
229 * Exclusive operations (device replace, resize, device add/remove, balance)
231 enum btrfs_exclusive_operation {
233 BTRFS_EXCLOP_BALANCE_PAUSED,
234 BTRFS_EXCLOP_BALANCE,
235 BTRFS_EXCLOP_DEV_ADD,
236 BTRFS_EXCLOP_DEV_REMOVE,
237 BTRFS_EXCLOP_DEV_REPLACE,
239 BTRFS_EXCLOP_SWAP_ACTIVATE,
242 /* Store data about transaction commits, exported via sysfs. */
243 struct btrfs_commit_stats {
244 /* Total number of commits */
246 /* The maximum commit duration so far in ns */
248 /* The last commit duration in ns */
250 /* The total commit duration in ns */
251 u64 total_commit_dur;
254 struct btrfs_fs_info {
255 u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
257 struct btrfs_root *tree_root;
258 struct btrfs_root *chunk_root;
259 struct btrfs_root *dev_root;
260 struct btrfs_root *fs_root;
261 struct btrfs_root *quota_root;
262 struct btrfs_root *uuid_root;
263 struct btrfs_root *data_reloc_root;
264 struct btrfs_root *block_group_root;
266 /* the log root tree is a directory of all the other log roots */
267 struct btrfs_root *log_root_tree;
269 /* The tree that holds the global roots (csum, extent, etc) */
270 rwlock_t global_root_lock;
271 struct rb_root global_root_tree;
273 spinlock_t fs_roots_radix_lock;
274 struct radix_tree_root fs_roots_radix;
276 /* block group cache stuff */
277 rwlock_t block_group_cache_lock;
278 struct rb_root_cached block_group_cache_tree;
280 /* keep track of unallocated space */
281 atomic64_t free_chunk_space;
283 /* Track ranges which are used by log trees blocks/logged data extents */
284 struct extent_io_tree excluded_extents;
286 /* logical->physical extent mapping */
287 struct extent_map_tree mapping_tree;
290 * block reservation for extent, checksum, root tree and
291 * delayed dir index item
293 struct btrfs_block_rsv global_block_rsv;
294 /* block reservation for metadata operations */
295 struct btrfs_block_rsv trans_block_rsv;
296 /* block reservation for chunk tree */
297 struct btrfs_block_rsv chunk_block_rsv;
298 /* block reservation for delayed operations */
299 struct btrfs_block_rsv delayed_block_rsv;
300 /* block reservation for delayed refs */
301 struct btrfs_block_rsv delayed_refs_rsv;
303 struct btrfs_block_rsv empty_block_rsv;
306 u64 last_trans_committed;
308 * Generation of the last transaction used for block group relocation
309 * since the filesystem was last mounted (or 0 if none happened yet).
310 * Must be written and read while holding btrfs_fs_info::commit_root_sem.
312 u64 last_reloc_trans;
313 u64 avg_delayed_ref_runtime;
316 * this is updated to the current trans every time a full commit
317 * is required instead of the faster short fsync log commits
319 u64 last_trans_log_full_commit;
320 unsigned long mount_opt;
322 unsigned long compress_type:4;
323 unsigned int compress_level;
326 * It is a suggestive number, the read side is safe even it gets a
327 * wrong number because we will write out the data into a regular
328 * extent. The write side(mount/remount) is under ->s_umount lock,
329 * so it is also safe.
333 struct btrfs_transaction *running_transaction;
334 wait_queue_head_t transaction_throttle;
335 wait_queue_head_t transaction_wait;
336 wait_queue_head_t transaction_blocked_wait;
337 wait_queue_head_t async_submit_wait;
340 * Used to protect the incompat_flags, compat_flags, compat_ro_flags
341 * when they are updated.
343 * Because we do not clear the flags for ever, so we needn't use
344 * the lock on the read side.
346 * We also needn't use the lock when we mount the fs, because
347 * there is no other task which will update the flag.
349 spinlock_t super_lock;
350 struct btrfs_super_block *super_copy;
351 struct btrfs_super_block *super_for_commit;
352 struct super_block *sb;
353 struct inode *btree_inode;
354 struct mutex tree_log_mutex;
355 struct mutex transaction_kthread_mutex;
356 struct mutex cleaner_mutex;
357 struct mutex chunk_mutex;
360 * this is taken to make sure we don't set block groups ro after
361 * the free space cache has been allocated on them
363 struct mutex ro_block_group_mutex;
365 /* this is used during read/modify/write to make sure
366 * no two ios are trying to mod the same stripe at the same
369 struct btrfs_stripe_hash_table *stripe_hash_table;
372 * this protects the ordered operations list only while we are
373 * processing all of the entries on it. This way we make
374 * sure the commit code doesn't find the list temporarily empty
375 * because another function happens to be doing non-waiting preflush
376 * before jumping into the main commit.
378 struct mutex ordered_operations_mutex;
380 struct rw_semaphore commit_root_sem;
382 struct rw_semaphore cleanup_work_sem;
384 struct rw_semaphore subvol_sem;
386 spinlock_t trans_lock;
388 * the reloc mutex goes with the trans lock, it is taken
389 * during commit to protect us from the relocation code
391 struct mutex reloc_mutex;
393 struct list_head trans_list;
394 struct list_head dead_roots;
395 struct list_head caching_block_groups;
397 spinlock_t delayed_iput_lock;
398 struct list_head delayed_iputs;
399 atomic_t nr_delayed_iputs;
400 wait_queue_head_t delayed_iputs_wait;
402 atomic64_t tree_mod_seq;
404 /* this protects tree_mod_log and tree_mod_seq_list */
405 rwlock_t tree_mod_log_lock;
406 struct rb_root tree_mod_log;
407 struct list_head tree_mod_seq_list;
409 atomic_t async_delalloc_pages;
412 * this is used to protect the following list -- ordered_roots.
414 spinlock_t ordered_root_lock;
417 * all fs/file tree roots in which there are data=ordered extents
418 * pending writeback are added into this list.
420 * these can span multiple transactions and basically include
421 * every dirty data page that isn't from nodatacow
423 struct list_head ordered_roots;
425 struct mutex delalloc_root_mutex;
426 spinlock_t delalloc_root_lock;
427 /* all fs/file tree roots that have delalloc inodes. */
428 struct list_head delalloc_roots;
431 * there is a pool of worker threads for checksumming during writes
432 * and a pool for checksumming after reads. This is because readers
433 * can run with FS locks held, and the writers may be waiting for
434 * those locks. We don't want ordering in the pending list to cause
435 * deadlocks, and so the two are serviced separately.
437 * A third pool does submit_bio to avoid deadlocking with the other
440 struct btrfs_workqueue *workers;
441 struct btrfs_workqueue *hipri_workers;
442 struct btrfs_workqueue *delalloc_workers;
443 struct btrfs_workqueue *flush_workers;
444 struct workqueue_struct *endio_workers;
445 struct workqueue_struct *endio_meta_workers;
446 struct workqueue_struct *endio_raid56_workers;
447 struct workqueue_struct *rmw_workers;
448 struct workqueue_struct *compressed_write_workers;
449 struct btrfs_workqueue *endio_write_workers;
450 struct btrfs_workqueue *endio_freespace_worker;
451 struct btrfs_workqueue *caching_workers;
454 * fixup workers take dirty pages that didn't properly go through
455 * the cow mechanism and make them safe to write. It happens
456 * for the sys_munmap function call path
458 struct btrfs_workqueue *fixup_workers;
459 struct btrfs_workqueue *delayed_workers;
461 struct task_struct *transaction_kthread;
462 struct task_struct *cleaner_kthread;
463 u32 thread_pool_size;
465 struct kobject *space_info_kobj;
466 struct kobject *qgroups_kobj;
467 struct kobject *discard_kobj;
469 /* used to keep from writing metadata until there is a nice batch */
470 struct percpu_counter dirty_metadata_bytes;
471 struct percpu_counter delalloc_bytes;
472 struct percpu_counter ordered_bytes;
473 s32 dirty_metadata_batch;
476 struct list_head dirty_cowonly_roots;
478 struct btrfs_fs_devices *fs_devices;
481 * The space_info list is effectively read only after initial
482 * setup. It is populated at mount time and cleaned up after
483 * all block groups are removed. RCU is used to protect it.
485 struct list_head space_info;
487 struct btrfs_space_info *data_sinfo;
489 struct reloc_control *reloc_ctl;
491 /* data_alloc_cluster is only used in ssd_spread mode */
492 struct btrfs_free_cluster data_alloc_cluster;
494 /* all metadata allocations go through this cluster */
495 struct btrfs_free_cluster meta_alloc_cluster;
497 /* auto defrag inodes go here */
498 spinlock_t defrag_inodes_lock;
499 struct rb_root defrag_inodes;
500 atomic_t defrag_running;
502 /* Used to protect avail_{data, metadata, system}_alloc_bits */
503 seqlock_t profiles_lock;
505 * these three are in extended format (availability of single
506 * chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other
507 * types are denoted by corresponding BTRFS_BLOCK_GROUP_* bits)
509 u64 avail_data_alloc_bits;
510 u64 avail_metadata_alloc_bits;
511 u64 avail_system_alloc_bits;
513 /* restriper state */
514 spinlock_t balance_lock;
515 struct mutex balance_mutex;
516 atomic_t balance_pause_req;
517 atomic_t balance_cancel_req;
518 struct btrfs_balance_control *balance_ctl;
519 wait_queue_head_t balance_wait_q;
521 /* Cancellation requests for chunk relocation */
522 atomic_t reloc_cancel_req;
524 u32 data_chunk_allocations;
529 /* private scrub information */
530 struct mutex scrub_lock;
531 atomic_t scrubs_running;
532 atomic_t scrub_pause_req;
533 atomic_t scrubs_paused;
534 atomic_t scrub_cancel_req;
535 wait_queue_head_t scrub_pause_wait;
537 * The worker pointers are NULL iff the refcount is 0, ie. scrub is not
540 refcount_t scrub_workers_refcnt;
541 struct workqueue_struct *scrub_workers;
542 struct workqueue_struct *scrub_wr_completion_workers;
543 struct workqueue_struct *scrub_parity_workers;
544 struct btrfs_subpage_info *subpage_info;
546 struct btrfs_discard_ctl discard_ctl;
548 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
549 u32 check_integrity_print_mask;
551 /* is qgroup tracking in a consistent state? */
554 /* holds configuration and tracking. Protected by qgroup_lock */
555 struct rb_root qgroup_tree;
556 spinlock_t qgroup_lock;
559 * used to avoid frequently calling ulist_alloc()/ulist_free()
560 * when doing qgroup accounting, it must be protected by qgroup_lock.
562 struct ulist *qgroup_ulist;
565 * Protect user change for quota operations. If a transaction is needed,
566 * it must be started before locking this lock.
568 struct mutex qgroup_ioctl_lock;
570 /* list of dirty qgroups to be written at next commit */
571 struct list_head dirty_qgroups;
573 /* used by qgroup for an efficient tree traversal */
576 /* qgroup rescan items */
577 struct mutex qgroup_rescan_lock; /* protects the progress item */
578 struct btrfs_key qgroup_rescan_progress;
579 struct btrfs_workqueue *qgroup_rescan_workers;
580 struct completion qgroup_rescan_completion;
581 struct btrfs_work qgroup_rescan_work;
582 bool qgroup_rescan_running; /* protected by qgroup_rescan_lock */
583 u8 qgroup_drop_subtree_thres;
585 /* filesystem state */
586 unsigned long fs_state;
588 struct btrfs_delayed_root *delayed_root;
590 /* Extent buffer radix tree */
591 spinlock_t buffer_lock;
592 /* Entries are eb->start / sectorsize */
593 struct radix_tree_root buffer_radix;
595 /* next backup root to be overwritten */
596 int backup_root_index;
598 /* device replace state */
599 struct btrfs_dev_replace dev_replace;
601 struct semaphore uuid_tree_rescan_sem;
603 /* Used to reclaim the metadata space in the background. */
604 struct work_struct async_reclaim_work;
605 struct work_struct async_data_reclaim_work;
606 struct work_struct preempt_reclaim_work;
608 /* Reclaim partially filled block groups in the background */
609 struct work_struct reclaim_bgs_work;
610 struct list_head reclaim_bgs;
611 int bg_reclaim_threshold;
613 spinlock_t unused_bgs_lock;
614 struct list_head unused_bgs;
615 struct mutex unused_bg_unpin_mutex;
616 /* Protect block groups that are going to be deleted */
617 struct mutex reclaim_bgs_lock;
619 /* Cached block sizes */
622 /* ilog2 of sectorsize, use to avoid 64bit division */
629 * Maximum size of an extent. BTRFS_MAX_EXTENT_SIZE on regular
630 * filesystem, on zoned it depends on the device constraints.
634 /* Block groups and devices containing active swapfiles. */
635 spinlock_t swapfile_pins_lock;
636 struct rb_root swapfile_pins;
638 struct crypto_shash *csum_shash;
640 /* Type of exclusive operation running, protected by super_lock */
641 enum btrfs_exclusive_operation exclusive_operation;
644 * Zone size > 0 when in ZONED mode, otherwise it's used for a check
645 * if the mode is enabled
649 /* Max size to emit ZONE_APPEND write command */
650 u64 max_zone_append_size;
651 struct mutex zoned_meta_io_lock;
652 spinlock_t treelog_bg_lock;
656 * Start of the dedicated data relocation block group, protected by
657 * relocation_bg_lock.
659 spinlock_t relocation_bg_lock;
661 struct mutex zoned_data_reloc_io_lock;
665 spinlock_t zone_active_bgs_lock;
666 struct list_head zone_active_bgs;
668 /* Updates are not protected by any lock */
669 struct btrfs_commit_stats commit_stats;
672 * Last generation where we dropped a non-relocation root.
673 * Use btrfs_set_last_root_drop_gen() and btrfs_get_last_root_drop_gen()
674 * to change it and to read it, respectively.
676 u64 last_root_drop_gen;
679 * Annotations for transaction events (structures are empty when
680 * compiled without lockdep).
682 struct lockdep_map btrfs_trans_num_writers_map;
683 struct lockdep_map btrfs_trans_num_extwriters_map;
684 struct lockdep_map btrfs_state_change_map[4];
685 struct lockdep_map btrfs_trans_pending_ordered_map;
686 struct lockdep_map btrfs_ordered_extent_map;
688 #ifdef CONFIG_BTRFS_FS_REF_VERIFY
689 spinlock_t ref_verify_lock;
690 struct rb_root block_tree;
693 #ifdef CONFIG_BTRFS_DEBUG
694 struct kobject *debug_kobj;
695 struct list_head allocated_roots;
697 spinlock_t eb_leak_lock;
698 struct list_head allocated_ebs;
702 static inline void btrfs_set_last_root_drop_gen(struct btrfs_fs_info *fs_info,
705 WRITE_ONCE(fs_info->last_root_drop_gen, gen);
708 static inline u64 btrfs_get_last_root_drop_gen(const struct btrfs_fs_info *fs_info)
710 return READ_ONCE(fs_info->last_root_drop_gen);
713 static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
715 return sb->s_fs_info;
719 * Take the number of bytes to be checksummed and figure out how many leaves
720 * it would require to store the csums for that many bytes.
722 static inline u64 btrfs_csum_bytes_to_leaves(
723 const struct btrfs_fs_info *fs_info, u64 csum_bytes)
725 const u64 num_csums = csum_bytes >> fs_info->sectorsize_bits;
727 return DIV_ROUND_UP_ULL(num_csums, fs_info->csums_per_leaf);
731 * Use this if we would be adding new items, as we could split nodes as we cow
734 static inline u64 btrfs_calc_insert_metadata_size(struct btrfs_fs_info *fs_info,
737 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
741 * Doing a truncate or a modification won't result in new nodes or leaves, just
742 * what we need for COW.
744 static inline u64 btrfs_calc_metadata_size(struct btrfs_fs_info *fs_info,
747 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
750 #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
751 sizeof(struct btrfs_item))
753 static inline bool btrfs_is_zoned(const struct btrfs_fs_info *fs_info)
755 return fs_info->zone_size > 0;
759 * Count how many fs_info->max_extent_size cover the @size
761 static inline u32 count_max_extents(struct btrfs_fs_info *fs_info, u64 size)
763 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
765 return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE);
768 return div_u64(size + fs_info->max_extent_size - 1, fs_info->max_extent_size);
771 bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
772 enum btrfs_exclusive_operation type);
773 bool btrfs_exclop_start_try_lock(struct btrfs_fs_info *fs_info,
774 enum btrfs_exclusive_operation type);
775 void btrfs_exclop_start_unlock(struct btrfs_fs_info *fs_info);
776 void btrfs_exclop_finish(struct btrfs_fs_info *fs_info);
777 void btrfs_exclop_balance(struct btrfs_fs_info *fs_info,
778 enum btrfs_exclusive_operation op);
781 * The state of btrfs root
785 * btrfs_record_root_in_trans is a multi-step process, and it can race
786 * with the balancing code. But the race is very small, and only the
787 * first time the root is added to each transaction. So IN_TRANS_SETUP
788 * is used to tell us when more checks are required
790 BTRFS_ROOT_IN_TRANS_SETUP,
793 * Set if tree blocks of this root can be shared by other roots.
794 * Only subvolume trees and their reloc trees have this bit set.
795 * Conflicts with TRACK_DIRTY bit.
797 * This affects two things:
799 * - How balance works
800 * For shareable roots, we need to use reloc tree and do path
801 * replacement for balance, and need various pre/post hooks for
802 * snapshot creation to handle them.
804 * While for non-shareable trees, we just simply do a tree search
807 * - How dirty roots are tracked
808 * For shareable roots, btrfs_record_root_in_trans() is needed to
809 * track them, while non-subvolume roots have TRACK_DIRTY bit, they
810 * don't need to set this manually.
812 BTRFS_ROOT_SHAREABLE,
813 BTRFS_ROOT_TRACK_DIRTY,
815 BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
816 BTRFS_ROOT_DEFRAG_RUNNING,
817 BTRFS_ROOT_FORCE_COW,
818 BTRFS_ROOT_MULTI_LOG_TASKS,
823 * Reloc tree is orphan, only kept here for qgroup delayed subtree scan
825 * Set for the subvolume tree owning the reloc tree.
827 BTRFS_ROOT_DEAD_RELOC_TREE,
828 /* Mark dead root stored on device whose cleanup needs to be resumed */
829 BTRFS_ROOT_DEAD_TREE,
830 /* The root has a log tree. Used for subvolume roots and the tree root. */
831 BTRFS_ROOT_HAS_LOG_TREE,
832 /* Qgroup flushing is in progress */
833 BTRFS_ROOT_QGROUP_FLUSHING,
834 /* We started the orphan cleanup for this root. */
835 BTRFS_ROOT_ORPHAN_CLEANUP,
836 /* This root has a drop operation that was started previously. */
837 BTRFS_ROOT_UNFINISHED_DROP,
838 /* This reloc root needs to have its buffers lockdep class reset. */
839 BTRFS_ROOT_RESET_LOCKDEP_CLASS,
842 enum btrfs_lockdep_trans_states {
843 BTRFS_LOCKDEP_TRANS_COMMIT_START,
844 BTRFS_LOCKDEP_TRANS_UNBLOCKED,
845 BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED,
846 BTRFS_LOCKDEP_TRANS_COMPLETED,
850 * Lockdep annotation for wait events.
852 * @owner: The struct where the lockdep map is defined
853 * @lock: The lockdep map corresponding to a wait event
855 * This macro is used to annotate a wait event. In this case a thread acquires
856 * the lockdep map as writer (exclusive lock) because it has to block until all
857 * the threads that hold the lock as readers signal the condition for the wait
858 * event and release their locks.
860 #define btrfs_might_wait_for_event(owner, lock) \
862 rwsem_acquire(&owner->lock##_map, 0, 0, _THIS_IP_); \
863 rwsem_release(&owner->lock##_map, _THIS_IP_); \
867 * Protection for the resource/condition of a wait event.
869 * @owner: The struct where the lockdep map is defined
870 * @lock: The lockdep map corresponding to a wait event
872 * Many threads can modify the condition for the wait event at the same time
873 * and signal the threads that block on the wait event. The threads that modify
874 * the condition and do the signaling acquire the lock as readers (shared
877 #define btrfs_lockdep_acquire(owner, lock) \
878 rwsem_acquire_read(&owner->lock##_map, 0, 0, _THIS_IP_)
881 * Used after signaling the condition for a wait event to release the lockdep
882 * map held by a reader thread.
884 #define btrfs_lockdep_release(owner, lock) \
885 rwsem_release(&owner->lock##_map, _THIS_IP_)
888 * Macros for the transaction states wait events, similar to the generic wait
891 #define btrfs_might_wait_for_state(owner, i) \
893 rwsem_acquire(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_); \
894 rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_); \
897 #define btrfs_trans_state_lockdep_acquire(owner, i) \
898 rwsem_acquire_read(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_)
900 #define btrfs_trans_state_lockdep_release(owner, i) \
901 rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_)
903 /* Initialization of the lockdep map */
904 #define btrfs_lockdep_init_map(owner, lock) \
906 static struct lock_class_key lock##_key; \
907 lockdep_init_map(&owner->lock##_map, #lock, &lock##_key, 0); \
910 /* Initialization of the transaction states lockdep maps. */
911 #define btrfs_state_lockdep_init_map(owner, lock, state) \
913 static struct lock_class_key lock##_key; \
914 lockdep_init_map(&owner->btrfs_state_change_map[state], #lock, \
919 * Record swapped tree blocks of a subvolume tree for delayed subtree trace
920 * code. For detail check comment in fs/btrfs/qgroup.c.
922 struct btrfs_qgroup_swapped_blocks {
924 /* RM_EMPTY_ROOT() of above blocks[] */
926 struct rb_root blocks[BTRFS_MAX_LEVEL];
930 * in ram representation of the tree. extent_root is used for all allocations
931 * and for the extent tree extent_root root.
934 struct rb_node rb_node;
936 struct extent_buffer *node;
938 struct extent_buffer *commit_root;
939 struct btrfs_root *log_root;
940 struct btrfs_root *reloc_root;
943 struct btrfs_root_item root_item;
944 struct btrfs_key root_key;
945 struct btrfs_fs_info *fs_info;
946 struct extent_io_tree dirty_log_pages;
948 struct mutex objectid_mutex;
950 spinlock_t accounting_lock;
951 struct btrfs_block_rsv *block_rsv;
953 struct mutex log_mutex;
954 wait_queue_head_t log_writer_wait;
955 wait_queue_head_t log_commit_wait[2];
956 struct list_head log_ctxs[2];
957 /* Used only for log trees of subvolumes, not for the log root tree */
958 atomic_t log_writers;
959 atomic_t log_commit[2];
960 /* Used only for log trees of subvolumes, not for the log root tree */
963 /* No matter the commit succeeds or not*/
964 int log_transid_committed;
965 /* Just be updated when the commit succeeds. */
975 struct btrfs_key defrag_progress;
976 struct btrfs_key defrag_max;
978 /* The dirty list is only used by non-shareable roots */
979 struct list_head dirty_list;
981 struct list_head root_list;
983 spinlock_t log_extents_lock[2];
984 struct list_head logged_list[2];
986 spinlock_t inode_lock;
987 /* red-black tree that keeps track of in-memory inodes */
988 struct rb_root inode_tree;
991 * radix tree that keeps track of delayed nodes of every inode,
992 * protected by inode_lock
994 struct radix_tree_root delayed_nodes_tree;
996 * right now this just gets used so that a root has its own devid
997 * for stat. It may be used for more later
1001 spinlock_t root_item_lock;
1004 struct mutex delalloc_mutex;
1005 spinlock_t delalloc_lock;
1007 * all of the inodes that have delalloc bytes. It is possible for
1008 * this list to be empty even when there is still dirty data=ordered
1009 * extents waiting to finish IO.
1011 struct list_head delalloc_inodes;
1012 struct list_head delalloc_root;
1013 u64 nr_delalloc_inodes;
1015 struct mutex ordered_extent_mutex;
1017 * this is used by the balancing code to wait for all the pending
1020 spinlock_t ordered_extent_lock;
1023 * all of the data=ordered extents pending writeback
1024 * these can span multiple transactions and basically include
1025 * every dirty data page that isn't from nodatacow
1027 struct list_head ordered_extents;
1028 struct list_head ordered_root;
1029 u64 nr_ordered_extents;
1032 * Not empty if this subvolume root has gone through tree block swap
1035 * Will be used by reloc_control::dirty_subvol_roots.
1037 struct list_head reloc_dirty_list;
1040 * Number of currently running SEND ioctls to prevent
1041 * manipulation with the read-only status via SUBVOL_SETFLAGS
1043 int send_in_progress;
1045 * Number of currently running deduplication operations that have a
1046 * destination inode belonging to this root. Protected by the lock
1049 int dedupe_in_progress;
1050 /* For exclusion of snapshot creation and nocow writes */
1051 struct btrfs_drew_lock snapshot_lock;
1053 atomic_t snapshot_force_cow;
1055 /* For qgroup metadata reserved space */
1056 spinlock_t qgroup_meta_rsv_lock;
1057 u64 qgroup_meta_rsv_pertrans;
1058 u64 qgroup_meta_rsv_prealloc;
1059 wait_queue_head_t qgroup_flush_wait;
1061 /* Number of active swapfiles */
1062 atomic_t nr_swapfiles;
1064 /* Record pairs of swapped blocks for qgroup */
1065 struct btrfs_qgroup_swapped_blocks swapped_blocks;
1067 /* Used only by log trees, when logging csum items */
1068 struct extent_io_tree log_csum_range;
1070 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1074 #ifdef CONFIG_BTRFS_DEBUG
1075 struct list_head leak_list;
1080 * Structure that conveys information about an extent that is going to replace
1081 * all the extents in a file range.
1083 struct btrfs_replace_extent_info {
1089 /* Pointer to a file extent item of type regular or prealloc. */
1092 * Set to true when attempting to replace a file range with a new extent
1093 * described by this structure, set to false when attempting to clone an
1094 * existing extent into a file range.
1097 /* Indicate if we should update the inode's mtime and ctime. */
1099 /* Meaningful only if is_new_extent is true. */
1100 int qgroup_reserved;
1102 * Meaningful only if is_new_extent is true.
1103 * Used to track how many extent items we have already inserted in a
1104 * subvolume tree that refer to the extent described by this structure,
1105 * so that we know when to create a new delayed ref or update an existing
1111 /* Arguments for btrfs_drop_extents() */
1112 struct btrfs_drop_extents_args {
1113 /* Input parameters */
1116 * If NULL, btrfs_drop_extents() will allocate and free its own path.
1117 * If 'replace_extent' is true, this must not be NULL. Also the path
1118 * is always released except if 'replace_extent' is true and
1119 * btrfs_drop_extents() sets 'extent_inserted' to true, in which case
1120 * the path is kept locked.
1122 struct btrfs_path *path;
1123 /* Start offset of the range to drop extents from */
1125 /* End (exclusive, last byte + 1) of the range to drop extents from */
1127 /* If true drop all the extent maps in the range */
1130 * If true it means we want to insert a new extent after dropping all
1131 * the extents in the range. If this is true, the 'extent_item_size'
1132 * parameter must be set as well and the 'extent_inserted' field will
1133 * be set to true by btrfs_drop_extents() if it could insert the new
1135 * Note: when this is set to true the path must not be NULL.
1137 bool replace_extent;
1139 * Used if 'replace_extent' is true. Size of the file extent item to
1140 * insert after dropping all existing extents in the range
1142 u32 extent_item_size;
1144 /* Output parameters */
1147 * Set to the minimum between the input parameter 'end' and the end
1148 * (exclusive, last byte + 1) of the last dropped extent. This is always
1149 * set even if btrfs_drop_extents() returns an error.
1153 * The number of allocated bytes found in the range. This can be smaller
1154 * than the range's length when there are holes in the range.
1158 * Only set if 'replace_extent' is true. Set to true if we were able
1159 * to insert a replacement extent after dropping all extents in the
1160 * range, otherwise set to false by btrfs_drop_extents().
1161 * Also, if btrfs_drop_extents() has set this to true it means it
1162 * returned with the path locked, otherwise if it has set this to
1163 * false it has returned with the path released.
1165 bool extent_inserted;
1168 struct btrfs_file_private {
1173 static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
1176 return info->nodesize - sizeof(struct btrfs_header);
1179 static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info)
1181 return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item);
1184 static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_fs_info *info)
1186 return BTRFS_LEAF_DATA_SIZE(info) / sizeof(struct btrfs_key_ptr);
1189 #define BTRFS_FILE_EXTENT_INLINE_DATA_START \
1190 (offsetof(struct btrfs_file_extent_item, disk_bytenr))
1191 static inline u32 BTRFS_MAX_INLINE_DATA_SIZE(const struct btrfs_fs_info *info)
1193 return BTRFS_MAX_ITEM_SIZE(info) -
1194 BTRFS_FILE_EXTENT_INLINE_DATA_START;
1197 static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
1199 return BTRFS_MAX_ITEM_SIZE(info) - sizeof(struct btrfs_dir_item);
1202 #define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
1203 ((bytes) >> (fs_info)->sectorsize_bits)
1205 static inline u32 btrfs_crc32c(u32 crc, const void *address, unsigned length)
1207 return crc32c(crc, address, length);
1210 static inline void btrfs_crc32c_final(u32 crc, u8 *result)
1212 put_unaligned_le32(~crc, result);
1215 static inline u64 btrfs_name_hash(const char *name, int len)
1217 return crc32c((u32)~1, name, len);
1221 * Figure the key offset of an extended inode ref
1223 static inline u64 btrfs_extref_hash(u64 parent_objectid, const char *name,
1226 return (u64) crc32c(parent_objectid, name, len);
1229 static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
1231 return mapping_gfp_constraint(mapping, ~__GFP_FS);
1236 enum btrfs_inline_ref_type {
1237 BTRFS_REF_TYPE_INVALID,
1238 BTRFS_REF_TYPE_BLOCK,
1239 BTRFS_REF_TYPE_DATA,
1243 int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
1244 struct btrfs_extent_inline_ref *iref,
1245 enum btrfs_inline_ref_type is_data);
1246 u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset);
1249 int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info,
1250 u64 start, u64 num_bytes);
1251 void btrfs_free_excluded_extents(struct btrfs_block_group *cache);
1252 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
1253 unsigned long count);
1254 void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
1255 struct btrfs_delayed_ref_root *delayed_refs,
1256 struct btrfs_delayed_ref_head *head);
1257 int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len);
1258 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
1259 struct btrfs_fs_info *fs_info, u64 bytenr,
1260 u64 offset, int metadata, u64 *refs, u64 *flags);
1261 int btrfs_pin_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 num,
1263 int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
1264 u64 bytenr, u64 num_bytes);
1265 int btrfs_exclude_logged_extents(struct extent_buffer *eb);
1266 int btrfs_cross_ref_exist(struct btrfs_root *root,
1267 u64 objectid, u64 offset, u64 bytenr, bool strict,
1268 struct btrfs_path *path);
1269 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
1270 struct btrfs_root *root,
1271 u64 parent, u64 root_objectid,
1272 const struct btrfs_disk_key *key,
1273 int level, u64 hint,
1275 enum btrfs_lock_nesting nest);
1276 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
1278 struct extent_buffer *buf,
1279 u64 parent, int last_ref);
1280 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
1281 struct btrfs_root *root, u64 owner,
1282 u64 offset, u64 ram_bytes,
1283 struct btrfs_key *ins);
1284 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
1285 u64 root_objectid, u64 owner, u64 offset,
1286 struct btrfs_key *ins);
1287 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes,
1288 u64 min_alloc_size, u64 empty_size, u64 hint_byte,
1289 struct btrfs_key *ins, int is_data, int delalloc);
1290 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1291 struct extent_buffer *buf, int full_backref);
1292 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1293 struct extent_buffer *buf, int full_backref);
1294 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
1295 struct extent_buffer *eb, u64 flags, int level);
1296 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref);
1298 int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
1299 u64 start, u64 len, int delalloc);
1300 int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start,
1302 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans);
1303 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1304 struct btrfs_ref *generic_ref);
1306 void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
1308 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
1309 struct btrfs_block_rsv *rsv,
1310 int nitems, bool use_global_rsv);
1311 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
1312 struct btrfs_block_rsv *rsv);
1313 void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes);
1315 int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
1316 u64 disk_num_bytes, bool noflush);
1317 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
1318 int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
1319 u64 start, u64 end);
1320 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
1321 u64 num_bytes, u64 *actual_bytes);
1322 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
1324 int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
1325 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
1326 struct btrfs_fs_info *fs_info);
1327 int btrfs_start_write_no_snapshotting(struct btrfs_root *root);
1328 void btrfs_end_write_no_snapshotting(struct btrfs_root *root);
1329 void btrfs_wait_for_snapshot_creation(struct btrfs_root *root);
1332 int __init btrfs_ctree_init(void);
1333 void __cold btrfs_ctree_exit(void);
1334 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
1336 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
1337 int btrfs_previous_item(struct btrfs_root *root,
1338 struct btrfs_path *path, u64 min_objectid,
1340 int btrfs_previous_extent_item(struct btrfs_root *root,
1341 struct btrfs_path *path, u64 min_objectid);
1342 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
1343 struct btrfs_path *path,
1344 const struct btrfs_key *new_key);
1345 struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
1346 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
1347 struct btrfs_key *key, int lowest_level,
1349 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
1350 struct btrfs_path *path,
1352 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
1355 int btrfs_cow_block(struct btrfs_trans_handle *trans,
1356 struct btrfs_root *root, struct extent_buffer *buf,
1357 struct extent_buffer *parent, int parent_slot,
1358 struct extent_buffer **cow_ret,
1359 enum btrfs_lock_nesting nest);
1360 int btrfs_copy_root(struct btrfs_trans_handle *trans,
1361 struct btrfs_root *root,
1362 struct extent_buffer *buf,
1363 struct extent_buffer **cow_ret, u64 new_root_objectid);
1364 int btrfs_block_can_be_shared(struct btrfs_root *root,
1365 struct extent_buffer *buf);
1366 void btrfs_extend_item(struct btrfs_path *path, u32 data_size);
1367 void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end);
1368 int btrfs_split_item(struct btrfs_trans_handle *trans,
1369 struct btrfs_root *root,
1370 struct btrfs_path *path,
1371 const struct btrfs_key *new_key,
1372 unsigned long split_offset);
1373 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
1374 struct btrfs_root *root,
1375 struct btrfs_path *path,
1376 const struct btrfs_key *new_key);
1377 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
1378 u64 inum, u64 ioff, u8 key_type, struct btrfs_key *found_key);
1379 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1380 const struct btrfs_key *key, struct btrfs_path *p,
1381 int ins_len, int cow);
1382 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
1383 struct btrfs_path *p, u64 time_seq);
1384 int btrfs_search_slot_for_read(struct btrfs_root *root,
1385 const struct btrfs_key *key,
1386 struct btrfs_path *p, int find_higher,
1388 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1389 struct btrfs_root *root, struct extent_buffer *parent,
1390 int start_slot, u64 *last_ret,
1391 struct btrfs_key *progress);
1392 void btrfs_release_path(struct btrfs_path *p);
1393 struct btrfs_path *btrfs_alloc_path(void);
1394 void btrfs_free_path(struct btrfs_path *p);
1396 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1397 struct btrfs_path *path, int slot, int nr);
1398 static inline int btrfs_del_item(struct btrfs_trans_handle *trans,
1399 struct btrfs_root *root,
1400 struct btrfs_path *path)
1402 return btrfs_del_items(trans, root, path, path->slots[0], 1);
1406 * Describes a batch of items to insert in a btree. This is used by
1407 * btrfs_insert_empty_items().
1409 struct btrfs_item_batch {
1411 * Pointer to an array containing the keys of the items to insert (in
1414 const struct btrfs_key *keys;
1415 /* Pointer to an array containing the data size for each item to insert. */
1416 const u32 *data_sizes;
1418 * The sum of data sizes for all items. The caller can compute this while
1419 * setting up the data_sizes array, so it ends up being more efficient
1420 * than having btrfs_insert_empty_items() or setup_item_for_insert()
1421 * doing it, as it would avoid an extra loop over a potentially large
1422 * array, and in the case of setup_item_for_insert(), we would be doing
1423 * it while holding a write lock on a leaf and often on upper level nodes
1424 * too, unnecessarily increasing the size of a critical section.
1426 u32 total_data_size;
1427 /* Size of the keys and data_sizes arrays (number of items in the batch). */
1431 void btrfs_setup_item_for_insert(struct btrfs_root *root,
1432 struct btrfs_path *path,
1433 const struct btrfs_key *key,
1435 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1436 const struct btrfs_key *key, void *data, u32 data_size);
1437 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
1438 struct btrfs_root *root,
1439 struct btrfs_path *path,
1440 const struct btrfs_item_batch *batch);
1442 static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
1443 struct btrfs_root *root,
1444 struct btrfs_path *path,
1445 const struct btrfs_key *key,
1448 struct btrfs_item_batch batch;
1451 batch.data_sizes = &data_size;
1452 batch.total_data_size = data_size;
1455 return btrfs_insert_empty_items(trans, root, path, &batch);
1458 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
1459 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
1462 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
1463 struct btrfs_path *path);
1465 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
1466 struct btrfs_path *path);
1469 * Search in @root for a given @key, and store the slot found in @found_key.
1471 * @root: The root node of the tree.
1472 * @key: The key we are looking for.
1473 * @found_key: Will hold the found item.
1474 * @path: Holds the current slot/leaf.
1475 * @iter_ret: Contains the value returned from btrfs_search_slot or
1476 * btrfs_get_next_valid_item, whichever was executed last.
1478 * The @iter_ret is an output variable that will contain the return value of
1479 * btrfs_search_slot, if it encountered an error, or the value returned from
1480 * btrfs_get_next_valid_item otherwise. That return value can be 0, if a valid
1481 * slot was found, 1 if there were no more leaves, and <0 if there was an error.
1483 * It's recommended to use a separate variable for iter_ret and then use it to
1484 * set the function return value so there's no confusion of the 0/1/errno
1485 * values stemming from btrfs_search_slot.
1487 #define btrfs_for_each_slot(root, key, found_key, path, iter_ret) \
1488 for (iter_ret = btrfs_search_slot(NULL, (root), (key), (path), 0, 0); \
1489 (iter_ret) >= 0 && \
1490 (iter_ret = btrfs_get_next_valid_item((root), (found_key), (path))) == 0; \
1491 (path)->slots[0]++ \
1494 int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq);
1497 * Search the tree again to find a leaf with greater keys.
1499 * Returns 0 if it found something or 1 if there are no greater leaves.
1500 * Returns < 0 on error.
1502 static inline int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
1504 return btrfs_next_old_leaf(root, path, 0);
1507 static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
1509 return btrfs_next_old_item(root, p, 0);
1511 int btrfs_leaf_free_space(struct extent_buffer *leaf);
1512 int __must_check btrfs_drop_snapshot(struct btrfs_root *root, int update_ref,
1514 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
1515 struct btrfs_root *root,
1516 struct extent_buffer *node,
1517 struct extent_buffer *parent);
1520 int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
1521 u64 ref_id, u64 dirid, u64 sequence, const char *name,
1523 int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
1524 u64 ref_id, u64 dirid, u64 *sequence, const char *name,
1526 int btrfs_del_root(struct btrfs_trans_handle *trans,
1527 const struct btrfs_key *key);
1528 int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1529 const struct btrfs_key *key,
1530 struct btrfs_root_item *item);
1531 int __must_check btrfs_update_root(struct btrfs_trans_handle *trans,
1532 struct btrfs_root *root,
1533 struct btrfs_key *key,
1534 struct btrfs_root_item *item);
1535 int btrfs_find_root(struct btrfs_root *root, const struct btrfs_key *search_key,
1536 struct btrfs_path *path, struct btrfs_root_item *root_item,
1537 struct btrfs_key *root_key);
1538 int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info);
1539 void btrfs_set_root_node(struct btrfs_root_item *item,
1540 struct extent_buffer *node);
1541 void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
1542 void btrfs_update_root_times(struct btrfs_trans_handle *trans,
1543 struct btrfs_root *root);
1546 int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
1548 int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
1550 int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info);
1553 int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
1554 const char *name, int name_len);
1555 int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name,
1556 int name_len, struct btrfs_inode *dir,
1557 struct btrfs_key *location, u8 type, u64 index);
1558 struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
1559 struct btrfs_root *root,
1560 struct btrfs_path *path, u64 dir,
1561 const char *name, int name_len,
1563 struct btrfs_dir_item *
1564 btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
1565 struct btrfs_root *root,
1566 struct btrfs_path *path, u64 dir,
1567 u64 index, const char *name, int name_len,
1569 struct btrfs_dir_item *
1570 btrfs_search_dir_index_item(struct btrfs_root *root,
1571 struct btrfs_path *path, u64 dirid,
1572 const char *name, int name_len);
1573 int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
1574 struct btrfs_root *root,
1575 struct btrfs_path *path,
1576 struct btrfs_dir_item *di);
1577 int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
1578 struct btrfs_root *root,
1579 struct btrfs_path *path, u64 objectid,
1580 const char *name, u16 name_len,
1581 const void *data, u16 data_len);
1582 struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
1583 struct btrfs_root *root,
1584 struct btrfs_path *path, u64 dir,
1585 const char *name, u16 name_len,
1587 struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
1588 struct btrfs_path *path,
1593 int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
1594 struct btrfs_root *root, u64 offset);
1595 int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
1596 struct btrfs_root *root, u64 offset);
1597 int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset);
1600 int btrfs_del_csums(struct btrfs_trans_handle *trans,
1601 struct btrfs_root *root, u64 bytenr, u64 len);
1602 blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst);
1603 int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans,
1604 struct btrfs_root *root, u64 objectid, u64 pos,
1606 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
1607 struct btrfs_root *root,
1608 struct btrfs_path *path, u64 objectid,
1609 u64 bytenr, int mod);
1610 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
1611 struct btrfs_root *root,
1612 struct btrfs_ordered_sum *sums);
1613 blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
1614 u64 offset, bool one_ordered);
1615 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
1616 struct list_head *list, int search_commit,
1618 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
1619 const struct btrfs_path *path,
1620 struct btrfs_file_extent_item *fi,
1621 const bool new_inline,
1622 struct extent_map *em);
1623 int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start,
1625 int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start,
1627 void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_size);
1628 u64 btrfs_file_extent_end(const struct btrfs_path *path);
1631 void btrfs_submit_data_write_bio(struct inode *inode, struct bio *bio, int mirror_num);
1632 void btrfs_submit_data_read_bio(struct inode *inode, struct bio *bio,
1633 int mirror_num, enum btrfs_compression_type compress_type);
1634 int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
1635 u32 pgoff, u8 *csum, const u8 * const csum_expected);
1636 int btrfs_check_data_csum(struct inode *inode, struct btrfs_bio *bbio,
1637 u32 bio_offset, struct page *page, u32 pgoff);
1638 unsigned int btrfs_verify_data_csum(struct btrfs_bio *bbio,
1639 u32 bio_offset, struct page *page,
1640 u64 start, u64 end);
1641 int btrfs_check_data_csum(struct inode *inode, struct btrfs_bio *bbio,
1642 u32 bio_offset, struct page *page, u32 pgoff);
1643 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
1644 u64 *orig_start, u64 *orig_block_len,
1645 u64 *ram_bytes, bool nowait, bool strict);
1647 void __btrfs_del_delalloc_inode(struct btrfs_root *root,
1648 struct btrfs_inode *inode);
1649 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
1650 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index);
1651 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
1652 struct btrfs_inode *dir, struct btrfs_inode *inode,
1653 const char *name, int name_len);
1654 int btrfs_add_link(struct btrfs_trans_handle *trans,
1655 struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
1656 const char *name, int name_len, int add_backref, u64 index);
1657 int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry);
1658 int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
1661 int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context);
1662 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
1663 bool in_reclaim_context);
1664 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
1665 unsigned int extra_bits,
1666 struct extent_state **cached_state);
1667 struct btrfs_new_inode_args {
1670 struct dentry *dentry;
1671 struct inode *inode;
1676 * Output from btrfs_new_inode_prepare(), input to
1677 * btrfs_create_new_inode().
1679 struct posix_acl *default_acl;
1680 struct posix_acl *acl;
1682 int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
1683 unsigned int *trans_num_items);
1684 int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
1685 struct btrfs_new_inode_args *args);
1686 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args);
1687 struct inode *btrfs_new_subvol_inode(struct user_namespace *mnt_userns,
1689 void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state,
1691 void btrfs_clear_delalloc_extent(struct inode *inode,
1692 struct extent_state *state, u32 bits);
1693 void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new,
1694 struct extent_state *other);
1695 void btrfs_split_delalloc_extent(struct inode *inode,
1696 struct extent_state *orig, u64 split);
1697 void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end);
1698 vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf);
1699 void btrfs_evict_inode(struct inode *inode);
1700 struct inode *btrfs_alloc_inode(struct super_block *sb);
1701 void btrfs_destroy_inode(struct inode *inode);
1702 void btrfs_free_inode(struct inode *inode);
1703 int btrfs_drop_inode(struct inode *inode);
1704 int __init btrfs_init_cachep(void);
1705 void __cold btrfs_destroy_cachep(void);
1706 struct inode *btrfs_iget_path(struct super_block *s, u64 ino,
1707 struct btrfs_root *root, struct btrfs_path *path);
1708 struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root);
1709 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
1710 struct page *page, size_t pg_offset,
1711 u64 start, u64 end);
1712 int btrfs_update_inode(struct btrfs_trans_handle *trans,
1713 struct btrfs_root *root, struct btrfs_inode *inode);
1714 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
1715 struct btrfs_root *root, struct btrfs_inode *inode);
1716 int btrfs_orphan_add(struct btrfs_trans_handle *trans,
1717 struct btrfs_inode *inode);
1718 int btrfs_orphan_cleanup(struct btrfs_root *root);
1719 int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size);
1720 void btrfs_add_delayed_iput(struct inode *inode);
1721 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info);
1722 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info);
1723 int btrfs_prealloc_file_range(struct inode *inode, int mode,
1724 u64 start, u64 num_bytes, u64 min_size,
1725 loff_t actual_len, u64 *alloc_hint);
1726 int btrfs_prealloc_file_range_trans(struct inode *inode,
1727 struct btrfs_trans_handle *trans, int mode,
1728 u64 start, u64 num_bytes, u64 min_size,
1729 loff_t actual_len, u64 *alloc_hint);
1730 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
1731 u64 start, u64 end, int *page_started, unsigned long *nr_written,
1732 struct writeback_control *wbc);
1733 int btrfs_writepage_cow_fixup(struct page *page);
1734 void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
1735 struct page *page, u64 start,
1736 u64 end, bool uptodate);
1737 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
1739 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
1740 u64 file_offset, u64 disk_bytenr,
1742 struct page **pages);
1743 ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
1744 struct btrfs_ioctl_encoded_io_args *encoded);
1745 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
1746 const struct btrfs_ioctl_encoded_io_args *encoded);
1748 ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter,
1749 size_t done_before);
1750 struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter,
1751 size_t done_before);
1753 extern const struct dentry_operations btrfs_dentry_operations;
1755 /* Inode locking type flags, by default the exclusive lock is taken */
1756 enum btrfs_ilock_type {
1757 ENUM_BIT(BTRFS_ILOCK_SHARED),
1758 ENUM_BIT(BTRFS_ILOCK_TRY),
1759 ENUM_BIT(BTRFS_ILOCK_MMAP),
1762 int btrfs_inode_lock(struct inode *inode, unsigned int ilock_flags);
1763 void btrfs_inode_unlock(struct inode *inode, unsigned int ilock_flags);
1764 void btrfs_update_inode_bytes(struct btrfs_inode *inode,
1765 const u64 add_bytes,
1766 const u64 del_bytes);
1767 void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end);
1770 long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
1771 long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
1772 int btrfs_fileattr_get(struct dentry *dentry, struct fileattr *fa);
1773 int btrfs_fileattr_set(struct user_namespace *mnt_userns,
1774 struct dentry *dentry, struct fileattr *fa);
1775 int btrfs_ioctl_get_supported_features(void __user *arg);
1776 void btrfs_sync_inode_flags_to_i_flags(struct inode *inode);
1777 int __pure btrfs_is_empty_uuid(u8 *uuid);
1778 int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
1779 struct btrfs_ioctl_defrag_range_args *range,
1780 u64 newer_than, unsigned long max_to_defrag);
1781 void btrfs_get_block_group_info(struct list_head *groups_list,
1782 struct btrfs_ioctl_space_info *space);
1783 void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
1784 struct btrfs_ioctl_balance_args *bargs);
1787 int __init btrfs_auto_defrag_init(void);
1788 void __cold btrfs_auto_defrag_exit(void);
1789 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
1790 struct btrfs_inode *inode, u32 extent_thresh);
1791 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);
1792 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info);
1793 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
1794 extern const struct file_operations btrfs_file_operations;
1795 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
1796 struct btrfs_root *root, struct btrfs_inode *inode,
1797 struct btrfs_drop_extents_args *args);
1798 int btrfs_replace_file_extents(struct btrfs_inode *inode,
1799 struct btrfs_path *path, const u64 start,
1801 struct btrfs_replace_extent_info *extent_info,
1802 struct btrfs_trans_handle **trans_out);
1803 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1804 struct btrfs_inode *inode, u64 start, u64 end);
1805 ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from,
1806 const struct btrfs_ioctl_encoded_io_args *encoded);
1807 int btrfs_release_file(struct inode *inode, struct file *file);
1808 int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
1809 size_t num_pages, loff_t pos, size_t write_bytes,
1810 struct extent_state **cached, bool noreserve);
1811 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end);
1812 int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
1813 size_t *write_bytes, bool nowait);
1814 void btrfs_check_nocow_unlock(struct btrfs_inode *inode);
1815 bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
1816 u64 *delalloc_start_ret, u64 *delalloc_end_ret);
1819 int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
1820 struct btrfs_root *root);
1823 int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
1824 unsigned long new_flags);
1825 int btrfs_sync_fs(struct super_block *sb, int wait);
1826 char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
1827 u64 subvol_objectid);
1829 #if BITS_PER_LONG == 32
1830 #define BTRFS_32BIT_MAX_FILE_SIZE (((u64)ULONG_MAX + 1) << PAGE_SHIFT)
1832 * The warning threshold is 5/8th of the MAX_LFS_FILESIZE that limits the logical
1833 * addresses of extents.
1835 * For 4K page size it's about 10T, for 64K it's 160T.
1837 #define BTRFS_32BIT_EARLY_WARN_THRESHOLD (BTRFS_32BIT_MAX_FILE_SIZE * 5 / 8)
1838 void btrfs_warn_32bit_limit(struct btrfs_fs_info *fs_info);
1839 void btrfs_err_32bit_limit(struct btrfs_fs_info *fs_info);
1843 * Get the correct offset inside the page of extent buffer.
1845 * @eb: target extent buffer
1846 * @start: offset inside the extent buffer
1848 * Will handle both sectorsize == PAGE_SIZE and sectorsize < PAGE_SIZE cases.
1850 static inline size_t get_eb_offset_in_page(const struct extent_buffer *eb,
1851 unsigned long offset)
1854 * For sectorsize == PAGE_SIZE case, eb->start will always be aligned
1855 * to PAGE_SIZE, thus adding it won't cause any difference.
1857 * For sectorsize < PAGE_SIZE, we must only read the data that belongs
1858 * to the eb, thus we have to take the eb->start into consideration.
1860 return offset_in_page(offset + eb->start);
1863 static inline unsigned long get_eb_page_index(unsigned long offset)
1866 * For sectorsize == PAGE_SIZE case, plain >> PAGE_SHIFT is enough.
1868 * For sectorsize < PAGE_SIZE case, we only support 64K PAGE_SIZE,
1869 * and have ensured that all tree blocks are contained in one page,
1870 * thus we always get index == 0.
1872 return offset >> PAGE_SHIFT;
1876 * Use that for functions that are conditionally exported for sanity tests but
1879 #ifndef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1880 #define EXPORT_FOR_TESTS static
1882 #define EXPORT_FOR_TESTS
1886 #ifdef CONFIG_BTRFS_FS_POSIX_ACL
1887 struct posix_acl *btrfs_get_acl(struct inode *inode, int type, bool rcu);
1888 int btrfs_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
1889 struct posix_acl *acl, int type);
1890 int __btrfs_set_acl(struct btrfs_trans_handle *trans, struct inode *inode,
1891 struct posix_acl *acl, int type);
1893 #define btrfs_get_acl NULL
1894 #define btrfs_set_acl NULL
1895 static inline int __btrfs_set_acl(struct btrfs_trans_handle *trans,
1896 struct inode *inode, struct posix_acl *acl,
1904 int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start);
1905 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
1906 struct btrfs_root *root);
1907 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1908 struct btrfs_root *root);
1909 int btrfs_recover_relocation(struct btrfs_fs_info *fs_info);
1910 int btrfs_reloc_clone_csums(struct btrfs_inode *inode, u64 file_pos, u64 len);
1911 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
1912 struct btrfs_root *root, struct extent_buffer *buf,
1913 struct extent_buffer *cow);
1914 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
1915 u64 *bytes_to_reserve);
1916 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
1917 struct btrfs_pending_snapshot *pending);
1918 int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info);
1919 struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info,
1921 int btrfs_should_ignore_reloc_root(struct btrfs_root *root);
1924 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
1925 u64 end, struct btrfs_scrub_progress *progress,
1926 int readonly, int is_dev_replace);
1927 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info);
1928 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info);
1929 int btrfs_scrub_cancel(struct btrfs_fs_info *info);
1930 int btrfs_scrub_cancel_dev(struct btrfs_device *dev);
1931 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
1932 struct btrfs_scrub_progress *progress);
1935 void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info);
1936 void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount);
1938 static inline void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info)
1940 btrfs_bio_counter_sub(fs_info, 1);
1943 static inline int is_fstree(u64 rootid)
1945 if (rootid == BTRFS_FS_TREE_OBJECTID ||
1946 ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID &&
1947 !btrfs_qgroup_level(rootid)))
1952 static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info)
1954 return signal_pending(current);
1958 #ifdef CONFIG_FS_VERITY
1960 extern const struct fsverity_operations btrfs_verityops;
1961 int btrfs_drop_verity_items(struct btrfs_inode *inode);
1962 int btrfs_get_verity_descriptor(struct inode *inode, void *buf, size_t buf_size);
1966 static inline int btrfs_drop_verity_items(struct btrfs_inode *inode)
1971 static inline int btrfs_get_verity_descriptor(struct inode *inode, void *buf,
1979 /* Sanity test specific functions */
1980 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1981 void btrfs_test_destroy_inode(struct inode *inode);
1984 static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root)
1986 return root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID;
1990 * We use page status Private2 to indicate there is an ordered extent with
1993 * Rename the Private2 accessors to Ordered, to improve readability.
1995 #define PageOrdered(page) PagePrivate2(page)
1996 #define SetPageOrdered(page) SetPagePrivate2(page)
1997 #define ClearPageOrdered(page) ClearPagePrivate2(page)
1998 #define folio_test_ordered(folio) folio_test_private_2(folio)
1999 #define folio_set_ordered(folio) folio_set_private_2(folio)
2000 #define folio_clear_ordered(folio) folio_clear_private_2(folio)