Commit | Line | Data |
---|---|---|
aac0023c JB |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | ||
3 | #ifndef BTRFS_BLOCK_GROUP_H | |
4 | #define BTRFS_BLOCK_GROUP_H | |
5 | ||
67b61aef DS |
6 | #include "free-space-cache.h" |
7 | ||
aac0023c JB |
8 | enum btrfs_disk_cache_state { |
9 | BTRFS_DC_WRITTEN, | |
10 | BTRFS_DC_ERROR, | |
11 | BTRFS_DC_CLEAR, | |
12 | BTRFS_DC_SETUP, | |
13 | }; | |
14 | ||
2bee7eb8 DZ |
15 | /* |
16 | * This describes the state of the block_group for async discard. This is due | |
17 | * to the two pass nature of it where extent discarding is prioritized over | |
18 | * bitmap discarding. BTRFS_DISCARD_RESET_CURSOR is set when we are resetting | |
19 | * between lists to prevent contention for discard state variables | |
20 | * (eg. discard_cursor). | |
21 | */ | |
22 | enum btrfs_discard_state { | |
23 | BTRFS_DISCARD_EXTENTS, | |
24 | BTRFS_DISCARD_BITMAPS, | |
25 | BTRFS_DISCARD_RESET_CURSOR, | |
26 | }; | |
27 | ||
07730d87 JB |
28 | /* |
29 | * Control flags for do_chunk_alloc's force field CHUNK_ALLOC_NO_FORCE means to | |
30 | * only allocate a chunk if we really need one. | |
31 | * | |
32 | * CHUNK_ALLOC_LIMITED means to only try and allocate one if we have very few | |
33 | * chunks already allocated. This is used as part of the clustering code to | |
34 | * help make sure we have a good pool of storage to cluster in, without filling | |
35 | * the FS with empty chunks | |
36 | * | |
37 | * CHUNK_ALLOC_FORCE means it must try to allocate one | |
760e69c4 NA |
38 | * |
39 | * CHUNK_ALLOC_FORCE_FOR_EXTENT like CHUNK_ALLOC_FORCE but called from | |
40 | * find_free_extent() that also activaes the zone | |
07730d87 JB |
41 | */ |
42 | enum btrfs_chunk_alloc_enum { | |
43 | CHUNK_ALLOC_NO_FORCE, | |
44 | CHUNK_ALLOC_LIMITED, | |
45 | CHUNK_ALLOC_FORCE, | |
760e69c4 | 46 | CHUNK_ALLOC_FORCE_FOR_EXTENT, |
07730d87 JB |
47 | }; |
48 | ||
3349b57f JB |
49 | /* Block group flags set at runtime */ |
50 | enum btrfs_block_group_flags { | |
51 | BLOCK_GROUP_FLAG_IREF, | |
3349b57f JB |
52 | BLOCK_GROUP_FLAG_REMOVED, |
53 | BLOCK_GROUP_FLAG_TO_COPY, | |
54 | BLOCK_GROUP_FLAG_RELOCATING_REPAIR, | |
55 | BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, | |
56 | BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, | |
57 | BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, | |
58 | }; | |
59 | ||
16708a88 JB |
60 | enum btrfs_caching_type { |
61 | BTRFS_CACHE_NO, | |
62 | BTRFS_CACHE_STARTED, | |
63 | BTRFS_CACHE_FINISHED, | |
64 | BTRFS_CACHE_ERROR, | |
65 | }; | |
66 | ||
aac0023c JB |
67 | struct btrfs_caching_control { |
68 | struct list_head list; | |
69 | struct mutex mutex; | |
70 | wait_queue_head_t wait; | |
71 | struct btrfs_work work; | |
32da5386 | 72 | struct btrfs_block_group *block_group; |
aac0023c JB |
73 | refcount_t count; |
74 | }; | |
75 | ||
76 | /* Once caching_thread() finds this much free space, it will wake up waiters. */ | |
77 | #define CACHING_CTL_WAKE_UP SZ_2M | |
78 | ||
c29abab4 JB |
79 | /* |
80 | * Tree to record all locked full stripes of a RAID5/6 block group | |
81 | */ | |
82 | struct btrfs_full_stripe_locks_tree { | |
83 | struct rb_root root; | |
84 | struct mutex lock; | |
85 | }; | |
86 | ||
32da5386 | 87 | struct btrfs_block_group { |
aac0023c JB |
88 | struct btrfs_fs_info *fs_info; |
89 | struct inode *inode; | |
90 | spinlock_t lock; | |
b3470b5d DS |
91 | u64 start; |
92 | u64 length; | |
aac0023c JB |
93 | u64 pinned; |
94 | u64 reserved; | |
bf38be65 | 95 | u64 used; |
aac0023c JB |
96 | u64 delalloc_bytes; |
97 | u64 bytes_super; | |
98 | u64 flags; | |
99 | u64 cache_generation; | |
f7238e50 | 100 | u64 global_root_id; |
aac0023c JB |
101 | |
102 | /* | |
103 | * If the free space extent count exceeds this number, convert the block | |
104 | * group to bitmaps. | |
105 | */ | |
106 | u32 bitmap_high_thresh; | |
107 | ||
108 | /* | |
109 | * If the free space extent count drops below this number, convert the | |
110 | * block group back to extents. | |
111 | */ | |
112 | u32 bitmap_low_thresh; | |
113 | ||
114 | /* | |
115 | * It is just used for the delayed data space allocation because | |
116 | * only the data space allocation and the relative metadata update | |
117 | * can be done cross the transaction. | |
118 | */ | |
119 | struct rw_semaphore data_rwsem; | |
120 | ||
121 | /* For raid56, this is a full stripe, without parity */ | |
122 | unsigned long full_stripe_len; | |
3349b57f | 123 | unsigned long runtime_flags; |
aac0023c JB |
124 | |
125 | unsigned int ro; | |
aac0023c JB |
126 | |
127 | int disk_cache_state; | |
128 | ||
129 | /* Cache tracking stuff */ | |
130 | int cached; | |
131 | struct btrfs_caching_control *caching_ctl; | |
aac0023c JB |
132 | |
133 | struct btrfs_space_info *space_info; | |
134 | ||
135 | /* Free space cache stuff */ | |
136 | struct btrfs_free_space_ctl *free_space_ctl; | |
137 | ||
138 | /* Block group cache stuff */ | |
139 | struct rb_node cache_node; | |
140 | ||
141 | /* For block groups in the same raid type */ | |
142 | struct list_head list; | |
143 | ||
48aaeebe | 144 | refcount_t refs; |
aac0023c JB |
145 | |
146 | /* | |
147 | * List of struct btrfs_free_clusters for this block group. | |
148 | * Today it will only have one thing on it, but that may change | |
149 | */ | |
150 | struct list_head cluster_list; | |
151 | ||
152 | /* For delayed block group creation or deletion of empty block groups */ | |
153 | struct list_head bg_list; | |
154 | ||
155 | /* For read-only block groups */ | |
156 | struct list_head ro_list; | |
157 | ||
6b7304af FM |
158 | /* |
159 | * When non-zero it means the block group's logical address and its | |
160 | * device extents can not be reused for future block group allocations | |
161 | * until the counter goes down to 0. This is to prevent them from being | |
162 | * reused while some task is still using the block group after it was | |
163 | * deleted - we want to make sure they can only be reused for new block | |
164 | * groups after that task is done with the deleted block group. | |
165 | */ | |
166 | atomic_t frozen; | |
167 | ||
b0643e59 | 168 | /* For discard operations */ |
b0643e59 DZ |
169 | struct list_head discard_list; |
170 | int discard_index; | |
171 | u64 discard_eligible_time; | |
2bee7eb8 DZ |
172 | u64 discard_cursor; |
173 | enum btrfs_discard_state discard_state; | |
aac0023c JB |
174 | |
175 | /* For dirty block groups */ | |
176 | struct list_head dirty_list; | |
177 | struct list_head io_list; | |
178 | ||
179 | struct btrfs_io_ctl io_ctl; | |
180 | ||
181 | /* | |
182 | * Incremented when doing extent allocations and holding a read lock | |
183 | * on the space_info's groups_sem semaphore. | |
184 | * Decremented when an ordered extent that represents an IO against this | |
185 | * block group's range is created (after it's added to its inode's | |
186 | * root's list of ordered extents) or immediately after the allocation | |
187 | * if it's a metadata extent or fallocate extent (for these cases we | |
188 | * don't create ordered extents). | |
189 | */ | |
190 | atomic_t reservations; | |
191 | ||
192 | /* | |
193 | * Incremented while holding the spinlock *lock* by a task checking if | |
194 | * it can perform a nocow write (incremented if the value for the *ro* | |
195 | * field is 0). Decremented by such tasks once they create an ordered | |
196 | * extent or before that if some error happens before reaching that step. | |
197 | * This is to prevent races between block group relocation and nocow | |
198 | * writes through direct IO. | |
199 | */ | |
200 | atomic_t nocow_writers; | |
201 | ||
202 | /* Lock for free space tree operations. */ | |
203 | struct mutex free_space_lock; | |
204 | ||
205 | /* | |
206 | * Does the block group need to be added to the free space tree? | |
207 | * Protected by free_space_lock. | |
208 | */ | |
209 | int needs_free_space; | |
210 | ||
08f45559 JT |
211 | /* Flag indicating this block group is placed on a sequential zone */ |
212 | bool seq_zone; | |
213 | ||
195a49ea FM |
214 | /* |
215 | * Number of extents in this block group used for swap files. | |
216 | * All accesses protected by the spinlock 'lock'. | |
217 | */ | |
218 | int swap_extents; | |
219 | ||
aac0023c JB |
220 | /* Record locked full stripes for RAID5/6 block group */ |
221 | struct btrfs_full_stripe_locks_tree full_stripe_locks_root; | |
08e11a3d NA |
222 | |
223 | /* | |
224 | * Allocation offset for the block group to implement sequential | |
225 | * allocation. This is used only on a zoned filesystem. | |
226 | */ | |
227 | u64 alloc_offset; | |
169e0da9 | 228 | u64 zone_unusable; |
8eae532b | 229 | u64 zone_capacity; |
0bc09ca1 | 230 | u64 meta_write_pointer; |
dafc340d | 231 | struct map_lookup *physical_map; |
afba2bc0 | 232 | struct list_head active_bg_list; |
56fbb0a4 NA |
233 | struct work_struct zone_finish_work; |
234 | struct extent_buffer *last_eb; | |
aac0023c JB |
235 | }; |
236 | ||
b0643e59 DZ |
237 | static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group) |
238 | { | |
239 | return (block_group->start + block_group->length); | |
240 | } | |
241 | ||
5cb0724e DZ |
242 | static inline bool btrfs_is_block_group_data_only( |
243 | struct btrfs_block_group *block_group) | |
244 | { | |
245 | /* | |
246 | * In mixed mode the fragmentation is expected to be high, lowering the | |
247 | * efficiency, so only proper data block groups are considered. | |
248 | */ | |
249 | return (block_group->flags & BTRFS_BLOCK_GROUP_DATA) && | |
250 | !(block_group->flags & BTRFS_BLOCK_GROUP_METADATA); | |
251 | } | |
252 | ||
aac0023c JB |
253 | #ifdef CONFIG_BTRFS_DEBUG |
254 | static inline int btrfs_should_fragment_free_space( | |
32da5386 | 255 | struct btrfs_block_group *block_group) |
aac0023c JB |
256 | { |
257 | struct btrfs_fs_info *fs_info = block_group->fs_info; | |
258 | ||
259 | return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) && | |
260 | block_group->flags & BTRFS_BLOCK_GROUP_METADATA) || | |
261 | (btrfs_test_opt(fs_info, FRAGMENT_DATA) && | |
262 | block_group->flags & BTRFS_BLOCK_GROUP_DATA); | |
263 | } | |
264 | #endif | |
265 | ||
32da5386 | 266 | struct btrfs_block_group *btrfs_lookup_first_block_group( |
2e405ad8 | 267 | struct btrfs_fs_info *info, u64 bytenr); |
32da5386 | 268 | struct btrfs_block_group *btrfs_lookup_block_group( |
2e405ad8 | 269 | struct btrfs_fs_info *info, u64 bytenr); |
32da5386 DS |
270 | struct btrfs_block_group *btrfs_next_block_group( |
271 | struct btrfs_block_group *cache); | |
272 | void btrfs_get_block_group(struct btrfs_block_group *cache); | |
273 | void btrfs_put_block_group(struct btrfs_block_group *cache); | |
3eeb3226 JB |
274 | void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, |
275 | const u64 start); | |
32da5386 | 276 | void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg); |
2306e83e FM |
277 | struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, |
278 | u64 bytenr); | |
279 | void btrfs_dec_nocow_writers(struct btrfs_block_group *bg); | |
32da5386 DS |
280 | void btrfs_wait_nocow_writers(struct btrfs_block_group *bg); |
281 | void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, | |
676f1f75 | 282 | u64 num_bytes); |
ced8ecf0 | 283 | int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait); |
e3cb339f JB |
284 | void btrfs_put_caching_control(struct btrfs_caching_control *ctl); |
285 | struct btrfs_caching_control *btrfs_get_caching_control( | |
32da5386 DS |
286 | struct btrfs_block_group *cache); |
287 | u64 add_new_free_space(struct btrfs_block_group *block_group, | |
9f21246d | 288 | u64 start, u64 end); |
e3e0520b JB |
289 | struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( |
290 | struct btrfs_fs_info *fs_info, | |
291 | const u64 chunk_offset); | |
292 | int btrfs_remove_block_group(struct btrfs_trans_handle *trans, | |
293 | u64 group_start, struct extent_map *em); | |
294 | void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info); | |
32da5386 | 295 | void btrfs_mark_bg_unused(struct btrfs_block_group *bg); |
18bb8bbf JT |
296 | void btrfs_reclaim_bgs_work(struct work_struct *work); |
297 | void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info); | |
298 | void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg); | |
4358d963 | 299 | int btrfs_read_block_groups(struct btrfs_fs_info *info); |
79bd3712 FM |
300 | struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, |
301 | u64 bytes_used, u64 type, | |
302 | u64 chunk_offset, u64 size); | |
4358d963 | 303 | void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans); |
b12de528 QW |
304 | int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, |
305 | bool do_chunk_alloc); | |
32da5386 | 306 | void btrfs_dec_block_group_ro(struct btrfs_block_group *cache); |
77745c05 JB |
307 | int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans); |
308 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans); | |
309 | int btrfs_setup_space_cache(struct btrfs_trans_handle *trans); | |
ade4b516 | 310 | int btrfs_update_block_group(struct btrfs_trans_handle *trans, |
11b66fa6 | 311 | u64 bytenr, u64 num_bytes, bool alloc); |
32da5386 | 312 | int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, |
ade4b516 | 313 | u64 ram_bytes, u64 num_bytes, int delalloc); |
32da5386 | 314 | void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, |
ade4b516 | 315 | u64 num_bytes, int delalloc); |
07730d87 JB |
316 | int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, |
317 | enum btrfs_chunk_alloc_enum force); | |
318 | int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type); | |
319 | void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type); | |
2bb2e00e FM |
320 | void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans, |
321 | bool is_item_insertion); | |
878d7b67 | 322 | u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags); |
3e43c279 JB |
323 | void btrfs_put_block_group_cache(struct btrfs_fs_info *info); |
324 | int btrfs_free_block_groups(struct btrfs_fs_info *info); | |
138082f3 NA |
325 | int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, |
326 | struct block_device *bdev, u64 physical, u64 **logical, | |
327 | int *naddrs, int *stripe_len); | |
878d7b67 JB |
328 | |
329 | static inline u64 btrfs_data_alloc_profile(struct btrfs_fs_info *fs_info) | |
330 | { | |
331 | return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_DATA); | |
332 | } | |
333 | ||
334 | static inline u64 btrfs_metadata_alloc_profile(struct btrfs_fs_info *fs_info) | |
335 | { | |
336 | return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_METADATA); | |
337 | } | |
338 | ||
339 | static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info) | |
340 | { | |
341 | return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); | |
342 | } | |
676f1f75 | 343 | |
32da5386 | 344 | static inline int btrfs_block_group_done(struct btrfs_block_group *cache) |
676f1f75 JB |
345 | { |
346 | smp_mb(); | |
347 | return cache->cached == BTRFS_CACHE_FINISHED || | |
348 | cache->cached == BTRFS_CACHE_ERROR; | |
349 | } | |
2e405ad8 | 350 | |
684b752b FM |
351 | void btrfs_freeze_block_group(struct btrfs_block_group *cache); |
352 | void btrfs_unfreeze_block_group(struct btrfs_block_group *cache); | |
353 | ||
195a49ea FM |
354 | bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg); |
355 | void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount); | |
356 | ||
aac0023c | 357 | #endif /* BTRFS_BLOCK_GROUP_H */ |