Commit | Line | Data |
---|---|---|
2e405ad8 JB |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
52bb7a21 | 3 | #include <linux/sizes.h> |
2ca0ec77 | 4 | #include <linux/list_sort.h> |
784352fe | 5 | #include "misc.h" |
2e405ad8 JB |
6 | #include "ctree.h" |
7 | #include "block-group.h" | |
3eeb3226 | 8 | #include "space-info.h" |
9f21246d JB |
9 | #include "disk-io.h" |
10 | #include "free-space-cache.h" | |
11 | #include "free-space-tree.h" | |
e3e0520b JB |
12 | #include "volumes.h" |
13 | #include "transaction.h" | |
14 | #include "ref-verify.h" | |
4358d963 JB |
15 | #include "sysfs.h" |
16 | #include "tree-log.h" | |
77745c05 | 17 | #include "delalloc-space.h" |
b0643e59 | 18 | #include "discard.h" |
96a14336 | 19 | #include "raid56.h" |
08e11a3d | 20 | #include "zoned.h" |
c7f13d42 | 21 | #include "fs.h" |
07e81dc9 | 22 | #include "accessors.h" |
a0231804 | 23 | #include "extent-tree.h" |
2e405ad8 | 24 | |
06d61cb1 JB |
25 | #ifdef CONFIG_BTRFS_DEBUG |
26 | int btrfs_should_fragment_free_space(struct btrfs_block_group *block_group) | |
27 | { | |
28 | struct btrfs_fs_info *fs_info = block_group->fs_info; | |
29 | ||
30 | return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) && | |
31 | block_group->flags & BTRFS_BLOCK_GROUP_METADATA) || | |
32 | (btrfs_test_opt(fs_info, FRAGMENT_DATA) && | |
33 | block_group->flags & BTRFS_BLOCK_GROUP_DATA); | |
34 | } | |
35 | #endif | |
36 | ||
878d7b67 JB |
37 | /* |
38 | * Return target flags in extended format or 0 if restripe for this chunk_type | |
39 | * is not in progress | |
40 | * | |
41 | * Should be called with balance_lock held | |
42 | */ | |
e11c0406 | 43 | static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) |
878d7b67 JB |
44 | { |
45 | struct btrfs_balance_control *bctl = fs_info->balance_ctl; | |
46 | u64 target = 0; | |
47 | ||
48 | if (!bctl) | |
49 | return 0; | |
50 | ||
51 | if (flags & BTRFS_BLOCK_GROUP_DATA && | |
52 | bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { | |
53 | target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target; | |
54 | } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM && | |
55 | bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { | |
56 | target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target; | |
57 | } else if (flags & BTRFS_BLOCK_GROUP_METADATA && | |
58 | bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) { | |
59 | target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target; | |
60 | } | |
61 | ||
62 | return target; | |
63 | } | |
64 | ||
65 | /* | |
66 | * @flags: available profiles in extended format (see ctree.h) | |
67 | * | |
68 | * Return reduced profile in chunk format. If profile changing is in progress | |
69 | * (either running or paused) picks the target profile (if it's already | |
70 | * available), otherwise falls back to plain reducing. | |
71 | */ | |
72 | static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) | |
73 | { | |
74 | u64 num_devices = fs_info->fs_devices->rw_devices; | |
75 | u64 target; | |
76 | u64 raid_type; | |
77 | u64 allowed = 0; | |
78 | ||
79 | /* | |
80 | * See if restripe for this chunk_type is in progress, if so try to | |
81 | * reduce to the target profile | |
82 | */ | |
83 | spin_lock(&fs_info->balance_lock); | |
e11c0406 | 84 | target = get_restripe_target(fs_info, flags); |
878d7b67 | 85 | if (target) { |
162e0a16 JB |
86 | spin_unlock(&fs_info->balance_lock); |
87 | return extended_to_chunk(target); | |
878d7b67 JB |
88 | } |
89 | spin_unlock(&fs_info->balance_lock); | |
90 | ||
91 | /* First, mask out the RAID levels which aren't possible */ | |
92 | for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { | |
93 | if (num_devices >= btrfs_raid_array[raid_type].devs_min) | |
94 | allowed |= btrfs_raid_array[raid_type].bg_flag; | |
95 | } | |
96 | allowed &= flags; | |
97 | ||
160fe8f6 MC |
98 | /* Select the highest-redundancy RAID level. */ |
99 | if (allowed & BTRFS_BLOCK_GROUP_RAID1C4) | |
100 | allowed = BTRFS_BLOCK_GROUP_RAID1C4; | |
101 | else if (allowed & BTRFS_BLOCK_GROUP_RAID6) | |
878d7b67 | 102 | allowed = BTRFS_BLOCK_GROUP_RAID6; |
160fe8f6 MC |
103 | else if (allowed & BTRFS_BLOCK_GROUP_RAID1C3) |
104 | allowed = BTRFS_BLOCK_GROUP_RAID1C3; | |
878d7b67 JB |
105 | else if (allowed & BTRFS_BLOCK_GROUP_RAID5) |
106 | allowed = BTRFS_BLOCK_GROUP_RAID5; | |
107 | else if (allowed & BTRFS_BLOCK_GROUP_RAID10) | |
108 | allowed = BTRFS_BLOCK_GROUP_RAID10; | |
109 | else if (allowed & BTRFS_BLOCK_GROUP_RAID1) | |
110 | allowed = BTRFS_BLOCK_GROUP_RAID1; | |
160fe8f6 MC |
111 | else if (allowed & BTRFS_BLOCK_GROUP_DUP) |
112 | allowed = BTRFS_BLOCK_GROUP_DUP; | |
878d7b67 JB |
113 | else if (allowed & BTRFS_BLOCK_GROUP_RAID0) |
114 | allowed = BTRFS_BLOCK_GROUP_RAID0; | |
115 | ||
116 | flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK; | |
117 | ||
118 | return extended_to_chunk(flags | allowed); | |
119 | } | |
120 | ||
ef0a82da | 121 | u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) |
878d7b67 JB |
122 | { |
123 | unsigned seq; | |
124 | u64 flags; | |
125 | ||
126 | do { | |
127 | flags = orig_flags; | |
128 | seq = read_seqbegin(&fs_info->profiles_lock); | |
129 | ||
130 | if (flags & BTRFS_BLOCK_GROUP_DATA) | |
131 | flags |= fs_info->avail_data_alloc_bits; | |
132 | else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) | |
133 | flags |= fs_info->avail_system_alloc_bits; | |
134 | else if (flags & BTRFS_BLOCK_GROUP_METADATA) | |
135 | flags |= fs_info->avail_metadata_alloc_bits; | |
136 | } while (read_seqretry(&fs_info->profiles_lock, seq)); | |
137 | ||
138 | return btrfs_reduce_alloc_profile(fs_info, flags); | |
139 | } | |
140 | ||
32da5386 | 141 | void btrfs_get_block_group(struct btrfs_block_group *cache) |
3cad1284 | 142 | { |
48aaeebe | 143 | refcount_inc(&cache->refs); |
3cad1284 JB |
144 | } |
145 | ||
32da5386 | 146 | void btrfs_put_block_group(struct btrfs_block_group *cache) |
3cad1284 | 147 | { |
48aaeebe | 148 | if (refcount_dec_and_test(&cache->refs)) { |
3cad1284 | 149 | WARN_ON(cache->pinned > 0); |
40cdc509 FM |
150 | /* |
151 | * If there was a failure to cleanup a log tree, very likely due | |
152 | * to an IO failure on a writeback attempt of one or more of its | |
153 | * extent buffers, we could not do proper (and cheap) unaccounting | |
154 | * of their reserved space, so don't warn on reserved > 0 in that | |
155 | * case. | |
156 | */ | |
157 | if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) || | |
158 | !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info)) | |
159 | WARN_ON(cache->reserved > 0); | |
3cad1284 | 160 | |
b0643e59 DZ |
161 | /* |
162 | * A block_group shouldn't be on the discard_list anymore. | |
163 | * Remove the block_group from the discard_list to prevent us | |
164 | * from causing a panic due to NULL pointer dereference. | |
165 | */ | |
166 | if (WARN_ON(!list_empty(&cache->discard_list))) | |
167 | btrfs_discard_cancel_work(&cache->fs_info->discard_ctl, | |
168 | cache); | |
169 | ||
3cad1284 | 170 | kfree(cache->free_space_ctl); |
dafc340d | 171 | kfree(cache->physical_map); |
3cad1284 JB |
172 | kfree(cache); |
173 | } | |
174 | } | |
175 | ||
4358d963 JB |
176 | /* |
177 | * This adds the block group to the fs_info rb tree for the block group cache | |
178 | */ | |
179 | static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, | |
32da5386 | 180 | struct btrfs_block_group *block_group) |
4358d963 JB |
181 | { |
182 | struct rb_node **p; | |
183 | struct rb_node *parent = NULL; | |
32da5386 | 184 | struct btrfs_block_group *cache; |
08dddb29 | 185 | bool leftmost = true; |
4358d963 | 186 | |
9afc6649 QW |
187 | ASSERT(block_group->length != 0); |
188 | ||
16b0c258 | 189 | write_lock(&info->block_group_cache_lock); |
08dddb29 | 190 | p = &info->block_group_cache_tree.rb_root.rb_node; |
4358d963 JB |
191 | |
192 | while (*p) { | |
193 | parent = *p; | |
32da5386 | 194 | cache = rb_entry(parent, struct btrfs_block_group, cache_node); |
b3470b5d | 195 | if (block_group->start < cache->start) { |
4358d963 | 196 | p = &(*p)->rb_left; |
b3470b5d | 197 | } else if (block_group->start > cache->start) { |
4358d963 | 198 | p = &(*p)->rb_right; |
08dddb29 | 199 | leftmost = false; |
4358d963 | 200 | } else { |
16b0c258 | 201 | write_unlock(&info->block_group_cache_lock); |
4358d963 JB |
202 | return -EEXIST; |
203 | } | |
204 | } | |
205 | ||
206 | rb_link_node(&block_group->cache_node, parent, p); | |
08dddb29 FM |
207 | rb_insert_color_cached(&block_group->cache_node, |
208 | &info->block_group_cache_tree, leftmost); | |
4358d963 | 209 | |
16b0c258 | 210 | write_unlock(&info->block_group_cache_lock); |
4358d963 JB |
211 | |
212 | return 0; | |
213 | } | |
214 | ||
2e405ad8 JB |
215 | /* |
216 | * This will return the block group at or after bytenr if contains is 0, else | |
217 | * it will return the block group that contains the bytenr | |
218 | */ | |
32da5386 | 219 | static struct btrfs_block_group *block_group_cache_tree_search( |
2e405ad8 JB |
220 | struct btrfs_fs_info *info, u64 bytenr, int contains) |
221 | { | |
32da5386 | 222 | struct btrfs_block_group *cache, *ret = NULL; |
2e405ad8 JB |
223 | struct rb_node *n; |
224 | u64 end, start; | |
225 | ||
16b0c258 | 226 | read_lock(&info->block_group_cache_lock); |
08dddb29 | 227 | n = info->block_group_cache_tree.rb_root.rb_node; |
2e405ad8 JB |
228 | |
229 | while (n) { | |
32da5386 | 230 | cache = rb_entry(n, struct btrfs_block_group, cache_node); |
b3470b5d DS |
231 | end = cache->start + cache->length - 1; |
232 | start = cache->start; | |
2e405ad8 JB |
233 | |
234 | if (bytenr < start) { | |
b3470b5d | 235 | if (!contains && (!ret || start < ret->start)) |
2e405ad8 JB |
236 | ret = cache; |
237 | n = n->rb_left; | |
238 | } else if (bytenr > start) { | |
239 | if (contains && bytenr <= end) { | |
240 | ret = cache; | |
241 | break; | |
242 | } | |
243 | n = n->rb_right; | |
244 | } else { | |
245 | ret = cache; | |
246 | break; | |
247 | } | |
248 | } | |
08dddb29 | 249 | if (ret) |
2e405ad8 | 250 | btrfs_get_block_group(ret); |
16b0c258 | 251 | read_unlock(&info->block_group_cache_lock); |
2e405ad8 JB |
252 | |
253 | return ret; | |
254 | } | |
255 | ||
256 | /* | |
257 | * Return the block group that starts at or after bytenr | |
258 | */ | |
32da5386 | 259 | struct btrfs_block_group *btrfs_lookup_first_block_group( |
2e405ad8 JB |
260 | struct btrfs_fs_info *info, u64 bytenr) |
261 | { | |
262 | return block_group_cache_tree_search(info, bytenr, 0); | |
263 | } | |
264 | ||
265 | /* | |
266 | * Return the block group that contains the given bytenr | |
267 | */ | |
32da5386 | 268 | struct btrfs_block_group *btrfs_lookup_block_group( |
2e405ad8 JB |
269 | struct btrfs_fs_info *info, u64 bytenr) |
270 | { | |
271 | return block_group_cache_tree_search(info, bytenr, 1); | |
272 | } | |
273 | ||
32da5386 DS |
274 | struct btrfs_block_group *btrfs_next_block_group( |
275 | struct btrfs_block_group *cache) | |
2e405ad8 JB |
276 | { |
277 | struct btrfs_fs_info *fs_info = cache->fs_info; | |
278 | struct rb_node *node; | |
279 | ||
16b0c258 | 280 | read_lock(&fs_info->block_group_cache_lock); |
2e405ad8 JB |
281 | |
282 | /* If our block group was removed, we need a full search. */ | |
283 | if (RB_EMPTY_NODE(&cache->cache_node)) { | |
b3470b5d | 284 | const u64 next_bytenr = cache->start + cache->length; |
2e405ad8 | 285 | |
16b0c258 | 286 | read_unlock(&fs_info->block_group_cache_lock); |
2e405ad8 | 287 | btrfs_put_block_group(cache); |
8b01f931 | 288 | return btrfs_lookup_first_block_group(fs_info, next_bytenr); |
2e405ad8 JB |
289 | } |
290 | node = rb_next(&cache->cache_node); | |
291 | btrfs_put_block_group(cache); | |
292 | if (node) { | |
32da5386 | 293 | cache = rb_entry(node, struct btrfs_block_group, cache_node); |
2e405ad8 JB |
294 | btrfs_get_block_group(cache); |
295 | } else | |
296 | cache = NULL; | |
16b0c258 | 297 | read_unlock(&fs_info->block_group_cache_lock); |
2e405ad8 JB |
298 | return cache; |
299 | } | |
3eeb3226 | 300 | |
43dd529a | 301 | /* |
2306e83e FM |
302 | * Check if we can do a NOCOW write for a given extent. |
303 | * | |
304 | * @fs_info: The filesystem information object. | |
305 | * @bytenr: Logical start address of the extent. | |
306 | * | |
307 | * Check if we can do a NOCOW write for the given extent, and increments the | |
308 | * number of NOCOW writers in the block group that contains the extent, as long | |
309 | * as the block group exists and it's currently not in read-only mode. | |
310 | * | |
311 | * Returns: A non-NULL block group pointer if we can do a NOCOW write, the caller | |
312 | * is responsible for calling btrfs_dec_nocow_writers() later. | |
313 | * | |
314 | * Or NULL if we can not do a NOCOW write | |
315 | */ | |
316 | struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, | |
317 | u64 bytenr) | |
3eeb3226 | 318 | { |
32da5386 | 319 | struct btrfs_block_group *bg; |
2306e83e | 320 | bool can_nocow = true; |
3eeb3226 JB |
321 | |
322 | bg = btrfs_lookup_block_group(fs_info, bytenr); | |
323 | if (!bg) | |
2306e83e | 324 | return NULL; |
3eeb3226 JB |
325 | |
326 | spin_lock(&bg->lock); | |
327 | if (bg->ro) | |
2306e83e | 328 | can_nocow = false; |
3eeb3226 JB |
329 | else |
330 | atomic_inc(&bg->nocow_writers); | |
331 | spin_unlock(&bg->lock); | |
332 | ||
2306e83e | 333 | if (!can_nocow) { |
3eeb3226 | 334 | btrfs_put_block_group(bg); |
2306e83e FM |
335 | return NULL; |
336 | } | |
3eeb3226 | 337 | |
2306e83e FM |
338 | /* No put on block group, done by btrfs_dec_nocow_writers(). */ |
339 | return bg; | |
3eeb3226 JB |
340 | } |
341 | ||
43dd529a | 342 | /* |
2306e83e FM |
343 | * Decrement the number of NOCOW writers in a block group. |
344 | * | |
2306e83e FM |
345 | * This is meant to be called after a previous call to btrfs_inc_nocow_writers(), |
346 | * and on the block group returned by that call. Typically this is called after | |
347 | * creating an ordered extent for a NOCOW write, to prevent races with scrub and | |
348 | * relocation. | |
349 | * | |
350 | * After this call, the caller should not use the block group anymore. It it wants | |
351 | * to use it, then it should get a reference on it before calling this function. | |
352 | */ | |
353 | void btrfs_dec_nocow_writers(struct btrfs_block_group *bg) | |
3eeb3226 | 354 | { |
3eeb3226 JB |
355 | if (atomic_dec_and_test(&bg->nocow_writers)) |
356 | wake_up_var(&bg->nocow_writers); | |
2306e83e FM |
357 | |
358 | /* For the lookup done by a previous call to btrfs_inc_nocow_writers(). */ | |
3eeb3226 JB |
359 | btrfs_put_block_group(bg); |
360 | } | |
361 | ||
32da5386 | 362 | void btrfs_wait_nocow_writers(struct btrfs_block_group *bg) |
3eeb3226 JB |
363 | { |
364 | wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); | |
365 | } | |
366 | ||
367 | void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, | |
368 | const u64 start) | |
369 | { | |
32da5386 | 370 | struct btrfs_block_group *bg; |
3eeb3226 JB |
371 | |
372 | bg = btrfs_lookup_block_group(fs_info, start); | |
373 | ASSERT(bg); | |
374 | if (atomic_dec_and_test(&bg->reservations)) | |
375 | wake_up_var(&bg->reservations); | |
376 | btrfs_put_block_group(bg); | |
377 | } | |
378 | ||
32da5386 | 379 | void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg) |
3eeb3226 JB |
380 | { |
381 | struct btrfs_space_info *space_info = bg->space_info; | |
382 | ||
383 | ASSERT(bg->ro); | |
384 | ||
385 | if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA)) | |
386 | return; | |
387 | ||
388 | /* | |
389 | * Our block group is read only but before we set it to read only, | |
390 | * some task might have had allocated an extent from it already, but it | |
391 | * has not yet created a respective ordered extent (and added it to a | |
392 | * root's list of ordered extents). | |
393 | * Therefore wait for any task currently allocating extents, since the | |
394 | * block group's reservations counter is incremented while a read lock | |
395 | * on the groups' semaphore is held and decremented after releasing | |
396 | * the read access on that semaphore and creating the ordered extent. | |
397 | */ | |
398 | down_write(&space_info->groups_sem); | |
399 | up_write(&space_info->groups_sem); | |
400 | ||
401 | wait_var_event(&bg->reservations, !atomic_read(&bg->reservations)); | |
402 | } | |
9f21246d JB |
403 | |
404 | struct btrfs_caching_control *btrfs_get_caching_control( | |
32da5386 | 405 | struct btrfs_block_group *cache) |
9f21246d JB |
406 | { |
407 | struct btrfs_caching_control *ctl; | |
408 | ||
409 | spin_lock(&cache->lock); | |
410 | if (!cache->caching_ctl) { | |
411 | spin_unlock(&cache->lock); | |
412 | return NULL; | |
413 | } | |
414 | ||
415 | ctl = cache->caching_ctl; | |
416 | refcount_inc(&ctl->count); | |
417 | spin_unlock(&cache->lock); | |
418 | return ctl; | |
419 | } | |
420 | ||
421 | void btrfs_put_caching_control(struct btrfs_caching_control *ctl) | |
422 | { | |
423 | if (refcount_dec_and_test(&ctl->count)) | |
424 | kfree(ctl); | |
425 | } | |
426 | ||
427 | /* | |
428 | * When we wait for progress in the block group caching, its because our | |
429 | * allocation attempt failed at least once. So, we must sleep and let some | |
430 | * progress happen before we try again. | |
431 | * | |
432 | * This function will sleep at least once waiting for new free space to show | |
433 | * up, and then it will check the block group free space numbers for our min | |
434 | * num_bytes. Another option is to have it go ahead and look in the rbtree for | |
435 | * a free extent of a given size, but this is a good start. | |
436 | * | |
437 | * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using | |
438 | * any of the information in this block group. | |
439 | */ | |
32da5386 | 440 | void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, |
9f21246d JB |
441 | u64 num_bytes) |
442 | { | |
443 | struct btrfs_caching_control *caching_ctl; | |
fc1f91b9 | 444 | int progress; |
9f21246d JB |
445 | |
446 | caching_ctl = btrfs_get_caching_control(cache); | |
447 | if (!caching_ctl) | |
448 | return; | |
449 | ||
fc1f91b9 JB |
450 | /* |
451 | * We've already failed to allocate from this block group, so even if | |
452 | * there's enough space in the block group it isn't contiguous enough to | |
453 | * allow for an allocation, so wait for at least the next wakeup tick, | |
454 | * or for the thing to be done. | |
455 | */ | |
456 | progress = atomic_read(&caching_ctl->progress); | |
457 | ||
32da5386 | 458 | wait_event(caching_ctl->wait, btrfs_block_group_done(cache) || |
fc1f91b9 JB |
459 | (progress != atomic_read(&caching_ctl->progress) && |
460 | (cache->free_space_ctl->free_space >= num_bytes))); | |
9f21246d JB |
461 | |
462 | btrfs_put_caching_control(caching_ctl); | |
463 | } | |
464 | ||
ced8ecf0 OS |
465 | static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache, |
466 | struct btrfs_caching_control *caching_ctl) | |
467 | { | |
468 | wait_event(caching_ctl->wait, btrfs_block_group_done(cache)); | |
469 | return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0; | |
470 | } | |
471 | ||
472 | static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache) | |
9f21246d JB |
473 | { |
474 | struct btrfs_caching_control *caching_ctl; | |
ced8ecf0 | 475 | int ret; |
9f21246d JB |
476 | |
477 | caching_ctl = btrfs_get_caching_control(cache); | |
478 | if (!caching_ctl) | |
479 | return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; | |
ced8ecf0 | 480 | ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); |
9f21246d JB |
481 | btrfs_put_caching_control(caching_ctl); |
482 | return ret; | |
483 | } | |
484 | ||
485 | #ifdef CONFIG_BTRFS_DEBUG | |
32da5386 | 486 | static void fragment_free_space(struct btrfs_block_group *block_group) |
9f21246d JB |
487 | { |
488 | struct btrfs_fs_info *fs_info = block_group->fs_info; | |
b3470b5d DS |
489 | u64 start = block_group->start; |
490 | u64 len = block_group->length; | |
9f21246d JB |
491 | u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? |
492 | fs_info->nodesize : fs_info->sectorsize; | |
493 | u64 step = chunk << 1; | |
494 | ||
495 | while (len > chunk) { | |
496 | btrfs_remove_free_space(block_group, start, chunk); | |
497 | start += step; | |
498 | if (len < step) | |
499 | len = 0; | |
500 | else | |
501 | len -= step; | |
502 | } | |
503 | } | |
504 | #endif | |
505 | ||
506 | /* | |
28f60894 FM |
507 | * Add a free space range to the in memory free space cache of a block group. |
508 | * This checks if the range contains super block locations and any such | |
509 | * locations are not added to the free space cache. | |
510 | * | |
511 | * @block_group: The target block group. | |
512 | * @start: Start offset of the range. | |
513 | * @end: End offset of the range (exclusive). | |
514 | * @total_added_ret: Optional pointer to return the total amount of space | |
515 | * added to the block group's free space cache. | |
516 | * | |
517 | * Returns 0 on success or < 0 on error. | |
9f21246d | 518 | */ |
3b9f0995 FM |
519 | int btrfs_add_new_free_space(struct btrfs_block_group *block_group, u64 start, |
520 | u64 end, u64 *total_added_ret) | |
9f21246d JB |
521 | { |
522 | struct btrfs_fs_info *info = block_group->fs_info; | |
d8ccbd21 | 523 | u64 extent_start, extent_end, size; |
9f21246d JB |
524 | int ret; |
525 | ||
d8ccbd21 FM |
526 | if (total_added_ret) |
527 | *total_added_ret = 0; | |
528 | ||
9f21246d | 529 | while (start < end) { |
e5860f82 FM |
530 | if (!find_first_extent_bit(&info->excluded_extents, start, |
531 | &extent_start, &extent_end, | |
532 | EXTENT_DIRTY | EXTENT_UPTODATE, | |
533 | NULL)) | |
9f21246d JB |
534 | break; |
535 | ||
536 | if (extent_start <= start) { | |
537 | start = extent_end + 1; | |
538 | } else if (extent_start > start && extent_start < end) { | |
539 | size = extent_start - start; | |
b0643e59 DZ |
540 | ret = btrfs_add_free_space_async_trimmed(block_group, |
541 | start, size); | |
d8ccbd21 FM |
542 | if (ret) |
543 | return ret; | |
544 | if (total_added_ret) | |
545 | *total_added_ret += size; | |
9f21246d JB |
546 | start = extent_end + 1; |
547 | } else { | |
548 | break; | |
549 | } | |
550 | } | |
551 | ||
552 | if (start < end) { | |
553 | size = end - start; | |
b0643e59 DZ |
554 | ret = btrfs_add_free_space_async_trimmed(block_group, start, |
555 | size); | |
d8ccbd21 FM |
556 | if (ret) |
557 | return ret; | |
558 | if (total_added_ret) | |
559 | *total_added_ret += size; | |
9f21246d JB |
560 | } |
561 | ||
d8ccbd21 | 562 | return 0; |
9f21246d JB |
563 | } |
564 | ||
c7eec3d9 BB |
565 | /* |
566 | * Get an arbitrary extent item index / max_index through the block group | |
567 | * | |
568 | * @block_group the block group to sample from | |
569 | * @index: the integral step through the block group to grab from | |
570 | * @max_index: the granularity of the sampling | |
571 | * @key: return value parameter for the item we find | |
572 | * | |
573 | * Pre-conditions on indices: | |
574 | * 0 <= index <= max_index | |
575 | * 0 < max_index | |
576 | * | |
577 | * Returns: 0 on success, 1 if the search didn't yield a useful item, negative | |
578 | * error code on error. | |
579 | */ | |
580 | static int sample_block_group_extent_item(struct btrfs_caching_control *caching_ctl, | |
581 | struct btrfs_block_group *block_group, | |
582 | int index, int max_index, | |
12148367 | 583 | struct btrfs_key *found_key) |
c7eec3d9 BB |
584 | { |
585 | struct btrfs_fs_info *fs_info = block_group->fs_info; | |
586 | struct btrfs_root *extent_root; | |
c7eec3d9 BB |
587 | u64 search_offset; |
588 | u64 search_end = block_group->start + block_group->length; | |
589 | struct btrfs_path *path; | |
12148367 BB |
590 | struct btrfs_key search_key; |
591 | int ret = 0; | |
c7eec3d9 BB |
592 | |
593 | ASSERT(index >= 0); | |
594 | ASSERT(index <= max_index); | |
595 | ASSERT(max_index > 0); | |
596 | lockdep_assert_held(&caching_ctl->mutex); | |
597 | lockdep_assert_held_read(&fs_info->commit_root_sem); | |
598 | ||
599 | path = btrfs_alloc_path(); | |
600 | if (!path) | |
601 | return -ENOMEM; | |
602 | ||
603 | extent_root = btrfs_extent_root(fs_info, max_t(u64, block_group->start, | |
604 | BTRFS_SUPER_INFO_OFFSET)); | |
605 | ||
606 | path->skip_locking = 1; | |
607 | path->search_commit_root = 1; | |
608 | path->reada = READA_FORWARD; | |
609 | ||
610 | search_offset = index * div_u64(block_group->length, max_index); | |
12148367 BB |
611 | search_key.objectid = block_group->start + search_offset; |
612 | search_key.type = BTRFS_EXTENT_ITEM_KEY; | |
613 | search_key.offset = 0; | |
c7eec3d9 | 614 | |
12148367 | 615 | btrfs_for_each_slot(extent_root, &search_key, found_key, path, ret) { |
c7eec3d9 | 616 | /* Success; sampled an extent item in the block group */ |
12148367 BB |
617 | if (found_key->type == BTRFS_EXTENT_ITEM_KEY && |
618 | found_key->objectid >= block_group->start && | |
619 | found_key->objectid + found_key->offset <= search_end) | |
620 | break; | |
c7eec3d9 BB |
621 | |
622 | /* We can't possibly find a valid extent item anymore */ | |
12148367 | 623 | if (found_key->objectid >= search_end) { |
c7eec3d9 BB |
624 | ret = 1; |
625 | break; | |
626 | } | |
c7eec3d9 | 627 | } |
12148367 | 628 | |
c7eec3d9 BB |
629 | lockdep_assert_held(&caching_ctl->mutex); |
630 | lockdep_assert_held_read(&fs_info->commit_root_sem); | |
631 | btrfs_free_path(path); | |
632 | return ret; | |
633 | } | |
634 | ||
635 | /* | |
636 | * Best effort attempt to compute a block group's size class while caching it. | |
637 | * | |
638 | * @block_group: the block group we are caching | |
639 | * | |
640 | * We cannot infer the size class while adding free space extents, because that | |
641 | * logic doesn't care about contiguous file extents (it doesn't differentiate | |
642 | * between a 100M extent and 100 contiguous 1M extents). So we need to read the | |
643 | * file extent items. Reading all of them is quite wasteful, because usually | |
644 | * only a handful are enough to give a good answer. Therefore, we just grab 5 of | |
645 | * them at even steps through the block group and pick the smallest size class | |
646 | * we see. Since size class is best effort, and not guaranteed in general, | |
647 | * inaccuracy is acceptable. | |
648 | * | |
649 | * To be more explicit about why this algorithm makes sense: | |
650 | * | |
651 | * If we are caching in a block group from disk, then there are three major cases | |
652 | * to consider: | |
653 | * 1. the block group is well behaved and all extents in it are the same size | |
654 | * class. | |
655 | * 2. the block group is mostly one size class with rare exceptions for last | |
656 | * ditch allocations | |
657 | * 3. the block group was populated before size classes and can have a totally | |
658 | * arbitrary mix of size classes. | |
659 | * | |
660 | * In case 1, looking at any extent in the block group will yield the correct | |
661 | * result. For the mixed cases, taking the minimum size class seems like a good | |
662 | * approximation, since gaps from frees will be usable to the size class. For | |
663 | * 2., a small handful of file extents is likely to yield the right answer. For | |
664 | * 3, we can either read every file extent, or admit that this is best effort | |
665 | * anyway and try to stay fast. | |
666 | * | |
667 | * Returns: 0 on success, negative error code on error. | |
668 | */ | |
669 | static int load_block_group_size_class(struct btrfs_caching_control *caching_ctl, | |
670 | struct btrfs_block_group *block_group) | |
671 | { | |
12148367 | 672 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
c7eec3d9 BB |
673 | struct btrfs_key key; |
674 | int i; | |
675 | u64 min_size = block_group->length; | |
676 | enum btrfs_block_group_size_class size_class = BTRFS_BG_SZ_NONE; | |
677 | int ret; | |
678 | ||
cb0922f2 | 679 | if (!btrfs_block_group_should_use_size_class(block_group)) |
c7eec3d9 BB |
680 | return 0; |
681 | ||
12148367 BB |
682 | lockdep_assert_held(&caching_ctl->mutex); |
683 | lockdep_assert_held_read(&fs_info->commit_root_sem); | |
c7eec3d9 BB |
684 | for (i = 0; i < 5; ++i) { |
685 | ret = sample_block_group_extent_item(caching_ctl, block_group, i, 5, &key); | |
686 | if (ret < 0) | |
687 | goto out; | |
688 | if (ret > 0) | |
689 | continue; | |
690 | min_size = min_t(u64, min_size, key.offset); | |
691 | size_class = btrfs_calc_block_group_size_class(min_size); | |
692 | } | |
693 | if (size_class != BTRFS_BG_SZ_NONE) { | |
694 | spin_lock(&block_group->lock); | |
695 | block_group->size_class = size_class; | |
696 | spin_unlock(&block_group->lock); | |
697 | } | |
c7eec3d9 BB |
698 | out: |
699 | return ret; | |
700 | } | |
701 | ||
9f21246d JB |
702 | static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) |
703 | { | |
32da5386 | 704 | struct btrfs_block_group *block_group = caching_ctl->block_group; |
9f21246d | 705 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
29cbcf40 | 706 | struct btrfs_root *extent_root; |
9f21246d JB |
707 | struct btrfs_path *path; |
708 | struct extent_buffer *leaf; | |
709 | struct btrfs_key key; | |
710 | u64 total_found = 0; | |
711 | u64 last = 0; | |
712 | u32 nritems; | |
713 | int ret; | |
714 | bool wakeup = true; | |
715 | ||
716 | path = btrfs_alloc_path(); | |
717 | if (!path) | |
718 | return -ENOMEM; | |
719 | ||
b3470b5d | 720 | last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET); |
29cbcf40 | 721 | extent_root = btrfs_extent_root(fs_info, last); |
9f21246d JB |
722 | |
723 | #ifdef CONFIG_BTRFS_DEBUG | |
724 | /* | |
725 | * If we're fragmenting we don't want to make anybody think we can | |
726 | * allocate from this block group until we've had a chance to fragment | |
727 | * the free space. | |
728 | */ | |
729 | if (btrfs_should_fragment_free_space(block_group)) | |
730 | wakeup = false; | |
731 | #endif | |
732 | /* | |
733 | * We don't want to deadlock with somebody trying to allocate a new | |
734 | * extent for the extent root while also trying to search the extent | |
735 | * root to add free space. So we skip locking and search the commit | |
736 | * root, since its read-only | |
737 | */ | |
738 | path->skip_locking = 1; | |
739 | path->search_commit_root = 1; | |
740 | path->reada = READA_FORWARD; | |
741 | ||
742 | key.objectid = last; | |
743 | key.offset = 0; | |
744 | key.type = BTRFS_EXTENT_ITEM_KEY; | |
745 | ||
746 | next: | |
747 | ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); | |
748 | if (ret < 0) | |
749 | goto out; | |
750 | ||
751 | leaf = path->nodes[0]; | |
752 | nritems = btrfs_header_nritems(leaf); | |
753 | ||
754 | while (1) { | |
755 | if (btrfs_fs_closing(fs_info) > 1) { | |
756 | last = (u64)-1; | |
757 | break; | |
758 | } | |
759 | ||
760 | if (path->slots[0] < nritems) { | |
761 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | |
762 | } else { | |
763 | ret = btrfs_find_next_key(extent_root, path, &key, 0, 0); | |
764 | if (ret) | |
765 | break; | |
766 | ||
767 | if (need_resched() || | |
768 | rwsem_is_contended(&fs_info->commit_root_sem)) { | |
9f21246d JB |
769 | btrfs_release_path(path); |
770 | up_read(&fs_info->commit_root_sem); | |
771 | mutex_unlock(&caching_ctl->mutex); | |
772 | cond_resched(); | |
773 | mutex_lock(&caching_ctl->mutex); | |
774 | down_read(&fs_info->commit_root_sem); | |
775 | goto next; | |
776 | } | |
777 | ||
778 | ret = btrfs_next_leaf(extent_root, path); | |
779 | if (ret < 0) | |
780 | goto out; | |
781 | if (ret) | |
782 | break; | |
783 | leaf = path->nodes[0]; | |
784 | nritems = btrfs_header_nritems(leaf); | |
785 | continue; | |
786 | } | |
787 | ||
788 | if (key.objectid < last) { | |
789 | key.objectid = last; | |
790 | key.offset = 0; | |
791 | key.type = BTRFS_EXTENT_ITEM_KEY; | |
9f21246d JB |
792 | btrfs_release_path(path); |
793 | goto next; | |
794 | } | |
795 | ||
b3470b5d | 796 | if (key.objectid < block_group->start) { |
9f21246d JB |
797 | path->slots[0]++; |
798 | continue; | |
799 | } | |
800 | ||
b3470b5d | 801 | if (key.objectid >= block_group->start + block_group->length) |
9f21246d JB |
802 | break; |
803 | ||
804 | if (key.type == BTRFS_EXTENT_ITEM_KEY || | |
805 | key.type == BTRFS_METADATA_ITEM_KEY) { | |
d8ccbd21 FM |
806 | u64 space_added; |
807 | ||
3b9f0995 FM |
808 | ret = btrfs_add_new_free_space(block_group, last, |
809 | key.objectid, &space_added); | |
d8ccbd21 FM |
810 | if (ret) |
811 | goto out; | |
812 | total_found += space_added; | |
9f21246d JB |
813 | if (key.type == BTRFS_METADATA_ITEM_KEY) |
814 | last = key.objectid + | |
815 | fs_info->nodesize; | |
816 | else | |
817 | last = key.objectid + key.offset; | |
818 | ||
819 | if (total_found > CACHING_CTL_WAKE_UP) { | |
820 | total_found = 0; | |
fc1f91b9 JB |
821 | if (wakeup) { |
822 | atomic_inc(&caching_ctl->progress); | |
9f21246d | 823 | wake_up(&caching_ctl->wait); |
fc1f91b9 | 824 | } |
9f21246d JB |
825 | } |
826 | } | |
827 | path->slots[0]++; | |
828 | } | |
9f21246d | 829 | |
3b9f0995 FM |
830 | ret = btrfs_add_new_free_space(block_group, last, |
831 | block_group->start + block_group->length, | |
832 | NULL); | |
9f21246d JB |
833 | out: |
834 | btrfs_free_path(path); | |
835 | return ret; | |
836 | } | |
837 | ||
98b5a8fd FM |
838 | static inline void btrfs_free_excluded_extents(const struct btrfs_block_group *bg) |
839 | { | |
840 | clear_extent_bits(&bg->fs_info->excluded_extents, bg->start, | |
841 | bg->start + bg->length - 1, EXTENT_UPTODATE); | |
842 | } | |
843 | ||
9f21246d JB |
844 | static noinline void caching_thread(struct btrfs_work *work) |
845 | { | |
32da5386 | 846 | struct btrfs_block_group *block_group; |
9f21246d JB |
847 | struct btrfs_fs_info *fs_info; |
848 | struct btrfs_caching_control *caching_ctl; | |
849 | int ret; | |
850 | ||
851 | caching_ctl = container_of(work, struct btrfs_caching_control, work); | |
852 | block_group = caching_ctl->block_group; | |
853 | fs_info = block_group->fs_info; | |
854 | ||
855 | mutex_lock(&caching_ctl->mutex); | |
856 | down_read(&fs_info->commit_root_sem); | |
857 | ||
c7eec3d9 | 858 | load_block_group_size_class(caching_ctl, block_group); |
e747853c JB |
859 | if (btrfs_test_opt(fs_info, SPACE_CACHE)) { |
860 | ret = load_free_space_cache(block_group); | |
861 | if (ret == 1) { | |
862 | ret = 0; | |
863 | goto done; | |
864 | } | |
865 | ||
866 | /* | |
867 | * We failed to load the space cache, set ourselves to | |
868 | * CACHE_STARTED and carry on. | |
869 | */ | |
870 | spin_lock(&block_group->lock); | |
871 | block_group->cached = BTRFS_CACHE_STARTED; | |
872 | spin_unlock(&block_group->lock); | |
873 | wake_up(&caching_ctl->wait); | |
874 | } | |
875 | ||
2f96e402 JB |
876 | /* |
877 | * If we are in the transaction that populated the free space tree we | |
878 | * can't actually cache from the free space tree as our commit root and | |
879 | * real root are the same, so we could change the contents of the blocks | |
880 | * while caching. Instead do the slow caching in this case, and after | |
881 | * the transaction has committed we will be safe. | |
882 | */ | |
883 | if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && | |
884 | !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags))) | |
9f21246d JB |
885 | ret = load_free_space_tree(caching_ctl); |
886 | else | |
887 | ret = load_extent_tree_free(caching_ctl); | |
e747853c | 888 | done: |
9f21246d JB |
889 | spin_lock(&block_group->lock); |
890 | block_group->caching_ctl = NULL; | |
891 | block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED; | |
892 | spin_unlock(&block_group->lock); | |
893 | ||
894 | #ifdef CONFIG_BTRFS_DEBUG | |
895 | if (btrfs_should_fragment_free_space(block_group)) { | |
896 | u64 bytes_used; | |
897 | ||
898 | spin_lock(&block_group->space_info->lock); | |
899 | spin_lock(&block_group->lock); | |
b3470b5d | 900 | bytes_used = block_group->length - block_group->used; |
9f21246d JB |
901 | block_group->space_info->bytes_used += bytes_used >> 1; |
902 | spin_unlock(&block_group->lock); | |
903 | spin_unlock(&block_group->space_info->lock); | |
e11c0406 | 904 | fragment_free_space(block_group); |
9f21246d JB |
905 | } |
906 | #endif | |
907 | ||
9f21246d JB |
908 | up_read(&fs_info->commit_root_sem); |
909 | btrfs_free_excluded_extents(block_group); | |
910 | mutex_unlock(&caching_ctl->mutex); | |
911 | ||
912 | wake_up(&caching_ctl->wait); | |
913 | ||
914 | btrfs_put_caching_control(caching_ctl); | |
915 | btrfs_put_block_group(block_group); | |
916 | } | |
917 | ||
ced8ecf0 | 918 | int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait) |
9f21246d | 919 | { |
9f21246d | 920 | struct btrfs_fs_info *fs_info = cache->fs_info; |
e747853c | 921 | struct btrfs_caching_control *caching_ctl = NULL; |
9f21246d JB |
922 | int ret = 0; |
923 | ||
2eda5708 NA |
924 | /* Allocator for zoned filesystems does not use the cache at all */ |
925 | if (btrfs_is_zoned(fs_info)) | |
926 | return 0; | |
927 | ||
9f21246d JB |
928 | caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); |
929 | if (!caching_ctl) | |
930 | return -ENOMEM; | |
931 | ||
932 | INIT_LIST_HEAD(&caching_ctl->list); | |
933 | mutex_init(&caching_ctl->mutex); | |
934 | init_waitqueue_head(&caching_ctl->wait); | |
935 | caching_ctl->block_group = cache; | |
e747853c | 936 | refcount_set(&caching_ctl->count, 2); |
fc1f91b9 | 937 | atomic_set(&caching_ctl->progress, 0); |
a0cac0ec | 938 | btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL); |
9f21246d JB |
939 | |
940 | spin_lock(&cache->lock); | |
9f21246d | 941 | if (cache->cached != BTRFS_CACHE_NO) { |
9f21246d | 942 | kfree(caching_ctl); |
e747853c JB |
943 | |
944 | caching_ctl = cache->caching_ctl; | |
945 | if (caching_ctl) | |
946 | refcount_inc(&caching_ctl->count); | |
947 | spin_unlock(&cache->lock); | |
948 | goto out; | |
9f21246d JB |
949 | } |
950 | WARN_ON(cache->caching_ctl); | |
951 | cache->caching_ctl = caching_ctl; | |
ced8ecf0 | 952 | cache->cached = BTRFS_CACHE_STARTED; |
9f21246d JB |
953 | spin_unlock(&cache->lock); |
954 | ||
16b0c258 | 955 | write_lock(&fs_info->block_group_cache_lock); |
9f21246d JB |
956 | refcount_inc(&caching_ctl->count); |
957 | list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); | |
16b0c258 | 958 | write_unlock(&fs_info->block_group_cache_lock); |
9f21246d JB |
959 | |
960 | btrfs_get_block_group(cache); | |
961 | ||
962 | btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); | |
e747853c | 963 | out: |
ced8ecf0 OS |
964 | if (wait && caching_ctl) |
965 | ret = btrfs_caching_ctl_wait_done(cache, caching_ctl); | |
e747853c JB |
966 | if (caching_ctl) |
967 | btrfs_put_caching_control(caching_ctl); | |
9f21246d JB |
968 | |
969 | return ret; | |
970 | } | |
e3e0520b JB |
971 | |
972 | static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) | |
973 | { | |
974 | u64 extra_flags = chunk_to_extended(flags) & | |
975 | BTRFS_EXTENDED_PROFILE_MASK; | |
976 | ||
977 | write_seqlock(&fs_info->profiles_lock); | |
978 | if (flags & BTRFS_BLOCK_GROUP_DATA) | |
979 | fs_info->avail_data_alloc_bits &= ~extra_flags; | |
980 | if (flags & BTRFS_BLOCK_GROUP_METADATA) | |
981 | fs_info->avail_metadata_alloc_bits &= ~extra_flags; | |
982 | if (flags & BTRFS_BLOCK_GROUP_SYSTEM) | |
983 | fs_info->avail_system_alloc_bits &= ~extra_flags; | |
984 | write_sequnlock(&fs_info->profiles_lock); | |
985 | } | |
986 | ||
987 | /* | |
988 | * Clear incompat bits for the following feature(s): | |
989 | * | |
990 | * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group | |
991 | * in the whole filesystem | |
9c907446 DS |
992 | * |
993 | * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups | |
e3e0520b JB |
994 | */ |
995 | static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags) | |
996 | { | |
9c907446 DS |
997 | bool found_raid56 = false; |
998 | bool found_raid1c34 = false; | |
999 | ||
1000 | if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) || | |
1001 | (flags & BTRFS_BLOCK_GROUP_RAID1C3) || | |
1002 | (flags & BTRFS_BLOCK_GROUP_RAID1C4)) { | |
e3e0520b JB |
1003 | struct list_head *head = &fs_info->space_info; |
1004 | struct btrfs_space_info *sinfo; | |
1005 | ||
1006 | list_for_each_entry_rcu(sinfo, head, list) { | |
e3e0520b JB |
1007 | down_read(&sinfo->groups_sem); |
1008 | if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5])) | |
9c907446 | 1009 | found_raid56 = true; |
e3e0520b | 1010 | if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6])) |
9c907446 DS |
1011 | found_raid56 = true; |
1012 | if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3])) | |
1013 | found_raid1c34 = true; | |
1014 | if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4])) | |
1015 | found_raid1c34 = true; | |
e3e0520b | 1016 | up_read(&sinfo->groups_sem); |
e3e0520b | 1017 | } |
d8e6fd5c | 1018 | if (!found_raid56) |
9c907446 | 1019 | btrfs_clear_fs_incompat(fs_info, RAID56); |
d8e6fd5c | 1020 | if (!found_raid1c34) |
9c907446 | 1021 | btrfs_clear_fs_incompat(fs_info, RAID1C34); |
e3e0520b JB |
1022 | } |
1023 | } | |
1024 | ||
7357623a QW |
1025 | static int remove_block_group_item(struct btrfs_trans_handle *trans, |
1026 | struct btrfs_path *path, | |
1027 | struct btrfs_block_group *block_group) | |
1028 | { | |
1029 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
1030 | struct btrfs_root *root; | |
1031 | struct btrfs_key key; | |
1032 | int ret; | |
1033 | ||
dfe8aec4 | 1034 | root = btrfs_block_group_root(fs_info); |
7357623a QW |
1035 | key.objectid = block_group->start; |
1036 | key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; | |
1037 | key.offset = block_group->length; | |
1038 | ||
1039 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | |
1040 | if (ret > 0) | |
1041 | ret = -ENOENT; | |
1042 | if (ret < 0) | |
1043 | return ret; | |
1044 | ||
1045 | ret = btrfs_del_item(trans, root, path); | |
1046 | return ret; | |
1047 | } | |
1048 | ||
e3e0520b JB |
1049 | int btrfs_remove_block_group(struct btrfs_trans_handle *trans, |
1050 | u64 group_start, struct extent_map *em) | |
1051 | { | |
1052 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
e3e0520b | 1053 | struct btrfs_path *path; |
32da5386 | 1054 | struct btrfs_block_group *block_group; |
e3e0520b | 1055 | struct btrfs_free_cluster *cluster; |
e3e0520b JB |
1056 | struct inode *inode; |
1057 | struct kobject *kobj = NULL; | |
1058 | int ret; | |
1059 | int index; | |
1060 | int factor; | |
1061 | struct btrfs_caching_control *caching_ctl = NULL; | |
1062 | bool remove_em; | |
1063 | bool remove_rsv = false; | |
1064 | ||
1065 | block_group = btrfs_lookup_block_group(fs_info, group_start); | |
1066 | BUG_ON(!block_group); | |
1067 | BUG_ON(!block_group->ro); | |
1068 | ||
1069 | trace_btrfs_remove_block_group(block_group); | |
1070 | /* | |
1071 | * Free the reserved super bytes from this block group before | |
1072 | * remove it. | |
1073 | */ | |
1074 | btrfs_free_excluded_extents(block_group); | |
b3470b5d DS |
1075 | btrfs_free_ref_tree_range(fs_info, block_group->start, |
1076 | block_group->length); | |
e3e0520b | 1077 | |
e3e0520b JB |
1078 | index = btrfs_bg_flags_to_raid_index(block_group->flags); |
1079 | factor = btrfs_bg_type_to_factor(block_group->flags); | |
1080 | ||
1081 | /* make sure this block group isn't part of an allocation cluster */ | |
1082 | cluster = &fs_info->data_alloc_cluster; | |
1083 | spin_lock(&cluster->refill_lock); | |
1084 | btrfs_return_cluster_to_free_space(block_group, cluster); | |
1085 | spin_unlock(&cluster->refill_lock); | |
1086 | ||
1087 | /* | |
1088 | * make sure this block group isn't part of a metadata | |
1089 | * allocation cluster | |
1090 | */ | |
1091 | cluster = &fs_info->meta_alloc_cluster; | |
1092 | spin_lock(&cluster->refill_lock); | |
1093 | btrfs_return_cluster_to_free_space(block_group, cluster); | |
1094 | spin_unlock(&cluster->refill_lock); | |
1095 | ||
40ab3be1 | 1096 | btrfs_clear_treelog_bg(block_group); |
c2707a25 | 1097 | btrfs_clear_data_reloc_bg(block_group); |
40ab3be1 | 1098 | |
e3e0520b JB |
1099 | path = btrfs_alloc_path(); |
1100 | if (!path) { | |
1101 | ret = -ENOMEM; | |
9fecd132 | 1102 | goto out; |
e3e0520b JB |
1103 | } |
1104 | ||
1105 | /* | |
1106 | * get the inode first so any iput calls done for the io_list | |
1107 | * aren't the final iput (no unlinks allowed now) | |
1108 | */ | |
1109 | inode = lookup_free_space_inode(block_group, path); | |
1110 | ||
1111 | mutex_lock(&trans->transaction->cache_write_mutex); | |
1112 | /* | |
1113 | * Make sure our free space cache IO is done before removing the | |
1114 | * free space inode | |
1115 | */ | |
1116 | spin_lock(&trans->transaction->dirty_bgs_lock); | |
1117 | if (!list_empty(&block_group->io_list)) { | |
1118 | list_del_init(&block_group->io_list); | |
1119 | ||
1120 | WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode); | |
1121 | ||
1122 | spin_unlock(&trans->transaction->dirty_bgs_lock); | |
1123 | btrfs_wait_cache_io(trans, block_group, path); | |
1124 | btrfs_put_block_group(block_group); | |
1125 | spin_lock(&trans->transaction->dirty_bgs_lock); | |
1126 | } | |
1127 | ||
1128 | if (!list_empty(&block_group->dirty_list)) { | |
1129 | list_del_init(&block_group->dirty_list); | |
1130 | remove_rsv = true; | |
1131 | btrfs_put_block_group(block_group); | |
1132 | } | |
1133 | spin_unlock(&trans->transaction->dirty_bgs_lock); | |
1134 | mutex_unlock(&trans->transaction->cache_write_mutex); | |
1135 | ||
36b216c8 BB |
1136 | ret = btrfs_remove_free_space_inode(trans, inode, block_group); |
1137 | if (ret) | |
9fecd132 | 1138 | goto out; |
e3e0520b | 1139 | |
16b0c258 | 1140 | write_lock(&fs_info->block_group_cache_lock); |
08dddb29 FM |
1141 | rb_erase_cached(&block_group->cache_node, |
1142 | &fs_info->block_group_cache_tree); | |
e3e0520b JB |
1143 | RB_CLEAR_NODE(&block_group->cache_node); |
1144 | ||
9fecd132 FM |
1145 | /* Once for the block groups rbtree */ |
1146 | btrfs_put_block_group(block_group); | |
1147 | ||
16b0c258 | 1148 | write_unlock(&fs_info->block_group_cache_lock); |
e3e0520b JB |
1149 | |
1150 | down_write(&block_group->space_info->groups_sem); | |
1151 | /* | |
1152 | * we must use list_del_init so people can check to see if they | |
1153 | * are still on the list after taking the semaphore | |
1154 | */ | |
1155 | list_del_init(&block_group->list); | |
1156 | if (list_empty(&block_group->space_info->block_groups[index])) { | |
1157 | kobj = block_group->space_info->block_group_kobjs[index]; | |
1158 | block_group->space_info->block_group_kobjs[index] = NULL; | |
1159 | clear_avail_alloc_bits(fs_info, block_group->flags); | |
1160 | } | |
1161 | up_write(&block_group->space_info->groups_sem); | |
1162 | clear_incompat_bg_bits(fs_info, block_group->flags); | |
1163 | if (kobj) { | |
1164 | kobject_del(kobj); | |
1165 | kobject_put(kobj); | |
1166 | } | |
1167 | ||
e3e0520b JB |
1168 | if (block_group->cached == BTRFS_CACHE_STARTED) |
1169 | btrfs_wait_block_group_cache_done(block_group); | |
7b9c293b JB |
1170 | |
1171 | write_lock(&fs_info->block_group_cache_lock); | |
1172 | caching_ctl = btrfs_get_caching_control(block_group); | |
1173 | if (!caching_ctl) { | |
1174 | struct btrfs_caching_control *ctl; | |
1175 | ||
1176 | list_for_each_entry(ctl, &fs_info->caching_block_groups, list) { | |
1177 | if (ctl->block_group == block_group) { | |
1178 | caching_ctl = ctl; | |
1179 | refcount_inc(&caching_ctl->count); | |
1180 | break; | |
1181 | } | |
e3e0520b JB |
1182 | } |
1183 | } | |
7b9c293b JB |
1184 | if (caching_ctl) |
1185 | list_del_init(&caching_ctl->list); | |
1186 | write_unlock(&fs_info->block_group_cache_lock); | |
1187 | ||
1188 | if (caching_ctl) { | |
1189 | /* Once for the caching bgs list and once for us. */ | |
1190 | btrfs_put_caching_control(caching_ctl); | |
1191 | btrfs_put_caching_control(caching_ctl); | |
1192 | } | |
e3e0520b JB |
1193 | |
1194 | spin_lock(&trans->transaction->dirty_bgs_lock); | |
1195 | WARN_ON(!list_empty(&block_group->dirty_list)); | |
1196 | WARN_ON(!list_empty(&block_group->io_list)); | |
1197 | spin_unlock(&trans->transaction->dirty_bgs_lock); | |
1198 | ||
1199 | btrfs_remove_free_space_cache(block_group); | |
1200 | ||
1201 | spin_lock(&block_group->space_info->lock); | |
1202 | list_del_init(&block_group->ro_list); | |
1203 | ||
1204 | if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { | |
1205 | WARN_ON(block_group->space_info->total_bytes | |
b3470b5d | 1206 | < block_group->length); |
e3e0520b | 1207 | WARN_ON(block_group->space_info->bytes_readonly |
169e0da9 NA |
1208 | < block_group->length - block_group->zone_unusable); |
1209 | WARN_ON(block_group->space_info->bytes_zone_unusable | |
1210 | < block_group->zone_unusable); | |
e3e0520b | 1211 | WARN_ON(block_group->space_info->disk_total |
b3470b5d | 1212 | < block_group->length * factor); |
e3e0520b | 1213 | } |
b3470b5d | 1214 | block_group->space_info->total_bytes -= block_group->length; |
169e0da9 NA |
1215 | block_group->space_info->bytes_readonly -= |
1216 | (block_group->length - block_group->zone_unusable); | |
1217 | block_group->space_info->bytes_zone_unusable -= | |
1218 | block_group->zone_unusable; | |
b3470b5d | 1219 | block_group->space_info->disk_total -= block_group->length * factor; |
e3e0520b JB |
1220 | |
1221 | spin_unlock(&block_group->space_info->lock); | |
1222 | ||
ffcb9d44 FM |
1223 | /* |
1224 | * Remove the free space for the block group from the free space tree | |
1225 | * and the block group's item from the extent tree before marking the | |
1226 | * block group as removed. This is to prevent races with tasks that | |
1227 | * freeze and unfreeze a block group, this task and another task | |
1228 | * allocating a new block group - the unfreeze task ends up removing | |
1229 | * the block group's extent map before the task calling this function | |
1230 | * deletes the block group item from the extent tree, allowing for | |
1231 | * another task to attempt to create another block group with the same | |
1232 | * item key (and failing with -EEXIST and a transaction abort). | |
1233 | */ | |
1234 | ret = remove_block_group_free_space(trans, block_group); | |
1235 | if (ret) | |
1236 | goto out; | |
1237 | ||
1238 | ret = remove_block_group_item(trans, path, block_group); | |
1239 | if (ret < 0) | |
1240 | goto out; | |
1241 | ||
e3e0520b | 1242 | spin_lock(&block_group->lock); |
3349b57f JB |
1243 | set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags); |
1244 | ||
e3e0520b | 1245 | /* |
6b7304af FM |
1246 | * At this point trimming or scrub can't start on this block group, |
1247 | * because we removed the block group from the rbtree | |
1248 | * fs_info->block_group_cache_tree so no one can't find it anymore and | |
1249 | * even if someone already got this block group before we removed it | |
1250 | * from the rbtree, they have already incremented block_group->frozen - | |
1251 | * if they didn't, for the trimming case they won't find any free space | |
1252 | * entries because we already removed them all when we called | |
1253 | * btrfs_remove_free_space_cache(). | |
e3e0520b JB |
1254 | * |
1255 | * And we must not remove the extent map from the fs_info->mapping_tree | |
1256 | * to prevent the same logical address range and physical device space | |
6b7304af FM |
1257 | * ranges from being reused for a new block group. This is needed to |
1258 | * avoid races with trimming and scrub. | |
1259 | * | |
1260 | * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is | |
e3e0520b JB |
1261 | * completely transactionless, so while it is trimming a range the |
1262 | * currently running transaction might finish and a new one start, | |
1263 | * allowing for new block groups to be created that can reuse the same | |
1264 | * physical device locations unless we take this special care. | |
1265 | * | |
1266 | * There may also be an implicit trim operation if the file system | |
1267 | * is mounted with -odiscard. The same protections must remain | |
1268 | * in place until the extents have been discarded completely when | |
1269 | * the transaction commit has completed. | |
1270 | */ | |
6b7304af | 1271 | remove_em = (atomic_read(&block_group->frozen) == 0); |
e3e0520b JB |
1272 | spin_unlock(&block_group->lock); |
1273 | ||
e3e0520b JB |
1274 | if (remove_em) { |
1275 | struct extent_map_tree *em_tree; | |
1276 | ||
1277 | em_tree = &fs_info->mapping_tree; | |
1278 | write_lock(&em_tree->lock); | |
1279 | remove_extent_mapping(em_tree, em); | |
1280 | write_unlock(&em_tree->lock); | |
1281 | /* once for the tree */ | |
1282 | free_extent_map(em); | |
1283 | } | |
f6033c5e | 1284 | |
9fecd132 | 1285 | out: |
f6033c5e XY |
1286 | /* Once for the lookup reference */ |
1287 | btrfs_put_block_group(block_group); | |
e3e0520b JB |
1288 | if (remove_rsv) |
1289 | btrfs_delayed_refs_rsv_release(fs_info, 1); | |
1290 | btrfs_free_path(path); | |
1291 | return ret; | |
1292 | } | |
1293 | ||
1294 | struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( | |
1295 | struct btrfs_fs_info *fs_info, const u64 chunk_offset) | |
1296 | { | |
dfe8aec4 | 1297 | struct btrfs_root *root = btrfs_block_group_root(fs_info); |
e3e0520b JB |
1298 | struct extent_map_tree *em_tree = &fs_info->mapping_tree; |
1299 | struct extent_map *em; | |
1300 | struct map_lookup *map; | |
1301 | unsigned int num_items; | |
1302 | ||
1303 | read_lock(&em_tree->lock); | |
1304 | em = lookup_extent_mapping(em_tree, chunk_offset, 1); | |
1305 | read_unlock(&em_tree->lock); | |
1306 | ASSERT(em && em->start == chunk_offset); | |
1307 | ||
1308 | /* | |
1309 | * We need to reserve 3 + N units from the metadata space info in order | |
1310 | * to remove a block group (done at btrfs_remove_chunk() and at | |
1311 | * btrfs_remove_block_group()), which are used for: | |
1312 | * | |
1313 | * 1 unit for adding the free space inode's orphan (located in the tree | |
1314 | * of tree roots). | |
1315 | * 1 unit for deleting the block group item (located in the extent | |
1316 | * tree). | |
1317 | * 1 unit for deleting the free space item (located in tree of tree | |
1318 | * roots). | |
1319 | * N units for deleting N device extent items corresponding to each | |
1320 | * stripe (located in the device tree). | |
1321 | * | |
1322 | * In order to remove a block group we also need to reserve units in the | |
1323 | * system space info in order to update the chunk tree (update one or | |
1324 | * more device items and remove one chunk item), but this is done at | |
1325 | * btrfs_remove_chunk() through a call to check_system_chunk(). | |
1326 | */ | |
1327 | map = em->map_lookup; | |
1328 | num_items = 3 + map->num_stripes; | |
1329 | free_extent_map(em); | |
1330 | ||
dfe8aec4 | 1331 | return btrfs_start_transaction_fallback_global_rsv(root, num_items); |
e3e0520b JB |
1332 | } |
1333 | ||
26ce2095 JB |
1334 | /* |
1335 | * Mark block group @cache read-only, so later write won't happen to block | |
1336 | * group @cache. | |
1337 | * | |
1338 | * If @force is not set, this function will only mark the block group readonly | |
1339 | * if we have enough free space (1M) in other metadata/system block groups. | |
1340 | * If @force is not set, this function will mark the block group readonly | |
1341 | * without checking free space. | |
1342 | * | |
1343 | * NOTE: This function doesn't care if other block groups can contain all the | |
1344 | * data in this block group. That check should be done by relocation routine, | |
1345 | * not this function. | |
1346 | */ | |
32da5386 | 1347 | static int inc_block_group_ro(struct btrfs_block_group *cache, int force) |
26ce2095 JB |
1348 | { |
1349 | struct btrfs_space_info *sinfo = cache->space_info; | |
1350 | u64 num_bytes; | |
26ce2095 JB |
1351 | int ret = -ENOSPC; |
1352 | ||
26ce2095 JB |
1353 | spin_lock(&sinfo->lock); |
1354 | spin_lock(&cache->lock); | |
1355 | ||
195a49ea FM |
1356 | if (cache->swap_extents) { |
1357 | ret = -ETXTBSY; | |
1358 | goto out; | |
1359 | } | |
1360 | ||
26ce2095 JB |
1361 | if (cache->ro) { |
1362 | cache->ro++; | |
1363 | ret = 0; | |
1364 | goto out; | |
1365 | } | |
1366 | ||
b3470b5d | 1367 | num_bytes = cache->length - cache->reserved - cache->pinned - |
169e0da9 | 1368 | cache->bytes_super - cache->zone_unusable - cache->used; |
26ce2095 JB |
1369 | |
1370 | /* | |
a30a3d20 JB |
1371 | * Data never overcommits, even in mixed mode, so do just the straight |
1372 | * check of left over space in how much we have allocated. | |
26ce2095 | 1373 | */ |
a30a3d20 JB |
1374 | if (force) { |
1375 | ret = 0; | |
1376 | } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) { | |
1377 | u64 sinfo_used = btrfs_space_info_used(sinfo, true); | |
1378 | ||
1379 | /* | |
1380 | * Here we make sure if we mark this bg RO, we still have enough | |
1381 | * free space as buffer. | |
1382 | */ | |
1383 | if (sinfo_used + num_bytes <= sinfo->total_bytes) | |
1384 | ret = 0; | |
1385 | } else { | |
1386 | /* | |
1387 | * We overcommit metadata, so we need to do the | |
1388 | * btrfs_can_overcommit check here, and we need to pass in | |
1389 | * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of | |
1390 | * leeway to allow us to mark this block group as read only. | |
1391 | */ | |
1392 | if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes, | |
1393 | BTRFS_RESERVE_NO_FLUSH)) | |
1394 | ret = 0; | |
1395 | } | |
1396 | ||
1397 | if (!ret) { | |
26ce2095 | 1398 | sinfo->bytes_readonly += num_bytes; |
169e0da9 NA |
1399 | if (btrfs_is_zoned(cache->fs_info)) { |
1400 | /* Migrate zone_unusable bytes to readonly */ | |
1401 | sinfo->bytes_readonly += cache->zone_unusable; | |
1402 | sinfo->bytes_zone_unusable -= cache->zone_unusable; | |
1403 | cache->zone_unusable = 0; | |
1404 | } | |
26ce2095 JB |
1405 | cache->ro++; |
1406 | list_add_tail(&cache->ro_list, &sinfo->ro_bgs); | |
26ce2095 JB |
1407 | } |
1408 | out: | |
1409 | spin_unlock(&cache->lock); | |
1410 | spin_unlock(&sinfo->lock); | |
1411 | if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { | |
1412 | btrfs_info(cache->fs_info, | |
b3470b5d | 1413 | "unable to make block group %llu ro", cache->start); |
26ce2095 JB |
1414 | btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); |
1415 | } | |
1416 | return ret; | |
1417 | } | |
1418 | ||
fe119a6e NB |
1419 | static bool clean_pinned_extents(struct btrfs_trans_handle *trans, |
1420 | struct btrfs_block_group *bg) | |
45bb5d6a NB |
1421 | { |
1422 | struct btrfs_fs_info *fs_info = bg->fs_info; | |
fe119a6e | 1423 | struct btrfs_transaction *prev_trans = NULL; |
45bb5d6a NB |
1424 | const u64 start = bg->start; |
1425 | const u64 end = start + bg->length - 1; | |
1426 | int ret; | |
1427 | ||
fe119a6e NB |
1428 | spin_lock(&fs_info->trans_lock); |
1429 | if (trans->transaction->list.prev != &fs_info->trans_list) { | |
1430 | prev_trans = list_last_entry(&trans->transaction->list, | |
1431 | struct btrfs_transaction, list); | |
1432 | refcount_inc(&prev_trans->use_count); | |
1433 | } | |
1434 | spin_unlock(&fs_info->trans_lock); | |
1435 | ||
45bb5d6a NB |
1436 | /* |
1437 | * Hold the unused_bg_unpin_mutex lock to avoid racing with | |
1438 | * btrfs_finish_extent_commit(). If we are at transaction N, another | |
1439 | * task might be running finish_extent_commit() for the previous | |
1440 | * transaction N - 1, and have seen a range belonging to the block | |
fe119a6e NB |
1441 | * group in pinned_extents before we were able to clear the whole block |
1442 | * group range from pinned_extents. This means that task can lookup for | |
1443 | * the block group after we unpinned it from pinned_extents and removed | |
1444 | * it, leading to a BUG_ON() at unpin_extent_range(). | |
45bb5d6a NB |
1445 | */ |
1446 | mutex_lock(&fs_info->unused_bg_unpin_mutex); | |
fe119a6e NB |
1447 | if (prev_trans) { |
1448 | ret = clear_extent_bits(&prev_trans->pinned_extents, start, end, | |
1449 | EXTENT_DIRTY); | |
1450 | if (ret) | |
534cf531 | 1451 | goto out; |
fe119a6e | 1452 | } |
45bb5d6a | 1453 | |
fe119a6e | 1454 | ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end, |
45bb5d6a | 1455 | EXTENT_DIRTY); |
534cf531 | 1456 | out: |
45bb5d6a | 1457 | mutex_unlock(&fs_info->unused_bg_unpin_mutex); |
5150bf19 FM |
1458 | if (prev_trans) |
1459 | btrfs_put_transaction(prev_trans); | |
45bb5d6a | 1460 | |
534cf531 | 1461 | return ret == 0; |
45bb5d6a NB |
1462 | } |
1463 | ||
e3e0520b JB |
1464 | /* |
1465 | * Process the unused_bgs list and remove any that don't have any allocated | |
1466 | * space inside of them. | |
1467 | */ | |
1468 | void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) | |
1469 | { | |
32da5386 | 1470 | struct btrfs_block_group *block_group; |
e3e0520b JB |
1471 | struct btrfs_space_info *space_info; |
1472 | struct btrfs_trans_handle *trans; | |
6e80d4f8 | 1473 | const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC); |
e3e0520b JB |
1474 | int ret = 0; |
1475 | ||
1476 | if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) | |
1477 | return; | |
1478 | ||
2f12741f JB |
1479 | if (btrfs_fs_closing(fs_info)) |
1480 | return; | |
1481 | ||
ddfd08cb JB |
1482 | /* |
1483 | * Long running balances can keep us blocked here for eternity, so | |
1484 | * simply skip deletion if we're unable to get the mutex. | |
1485 | */ | |
f3372065 | 1486 | if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) |
ddfd08cb JB |
1487 | return; |
1488 | ||
e3e0520b JB |
1489 | spin_lock(&fs_info->unused_bgs_lock); |
1490 | while (!list_empty(&fs_info->unused_bgs)) { | |
e3e0520b JB |
1491 | int trimming; |
1492 | ||
1493 | block_group = list_first_entry(&fs_info->unused_bgs, | |
32da5386 | 1494 | struct btrfs_block_group, |
e3e0520b JB |
1495 | bg_list); |
1496 | list_del_init(&block_group->bg_list); | |
1497 | ||
1498 | space_info = block_group->space_info; | |
1499 | ||
1500 | if (ret || btrfs_mixed_space_info(space_info)) { | |
1501 | btrfs_put_block_group(block_group); | |
1502 | continue; | |
1503 | } | |
1504 | spin_unlock(&fs_info->unused_bgs_lock); | |
1505 | ||
b0643e59 DZ |
1506 | btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); |
1507 | ||
e3e0520b JB |
1508 | /* Don't want to race with allocators so take the groups_sem */ |
1509 | down_write(&space_info->groups_sem); | |
6e80d4f8 DZ |
1510 | |
1511 | /* | |
1512 | * Async discard moves the final block group discard to be prior | |
1513 | * to the unused_bgs code path. Therefore, if it's not fully | |
1514 | * trimmed, punt it back to the async discard lists. | |
1515 | */ | |
1516 | if (btrfs_test_opt(fs_info, DISCARD_ASYNC) && | |
1517 | !btrfs_is_free_space_trimmed(block_group)) { | |
1518 | trace_btrfs_skip_unused_block_group(block_group); | |
1519 | up_write(&space_info->groups_sem); | |
1520 | /* Requeue if we failed because of async discard */ | |
1521 | btrfs_discard_queue_work(&fs_info->discard_ctl, | |
1522 | block_group); | |
1523 | goto next; | |
1524 | } | |
1525 | ||
e3e0520b JB |
1526 | spin_lock(&block_group->lock); |
1527 | if (block_group->reserved || block_group->pinned || | |
bf38be65 | 1528 | block_group->used || block_group->ro || |
e3e0520b JB |
1529 | list_is_singular(&block_group->list)) { |
1530 | /* | |
1531 | * We want to bail if we made new allocations or have | |
1532 | * outstanding allocations in this block group. We do | |
1533 | * the ro check in case balance is currently acting on | |
1534 | * this block group. | |
1535 | */ | |
1536 | trace_btrfs_skip_unused_block_group(block_group); | |
1537 | spin_unlock(&block_group->lock); | |
1538 | up_write(&space_info->groups_sem); | |
1539 | goto next; | |
1540 | } | |
1541 | spin_unlock(&block_group->lock); | |
1542 | ||
1543 | /* We don't want to force the issue, only flip if it's ok. */ | |
e11c0406 | 1544 | ret = inc_block_group_ro(block_group, 0); |
e3e0520b JB |
1545 | up_write(&space_info->groups_sem); |
1546 | if (ret < 0) { | |
1547 | ret = 0; | |
1548 | goto next; | |
1549 | } | |
1550 | ||
74e91b12 NA |
1551 | ret = btrfs_zone_finish(block_group); |
1552 | if (ret < 0) { | |
1553 | btrfs_dec_block_group_ro(block_group); | |
1554 | if (ret == -EAGAIN) | |
1555 | ret = 0; | |
1556 | goto next; | |
1557 | } | |
1558 | ||
e3e0520b JB |
1559 | /* |
1560 | * Want to do this before we do anything else so we can recover | |
1561 | * properly if we fail to join the transaction. | |
1562 | */ | |
1563 | trans = btrfs_start_trans_remove_block_group(fs_info, | |
b3470b5d | 1564 | block_group->start); |
e3e0520b JB |
1565 | if (IS_ERR(trans)) { |
1566 | btrfs_dec_block_group_ro(block_group); | |
1567 | ret = PTR_ERR(trans); | |
1568 | goto next; | |
1569 | } | |
1570 | ||
1571 | /* | |
1572 | * We could have pending pinned extents for this block group, | |
1573 | * just delete them, we don't care about them anymore. | |
1574 | */ | |
534cf531 FM |
1575 | if (!clean_pinned_extents(trans, block_group)) { |
1576 | btrfs_dec_block_group_ro(block_group); | |
e3e0520b | 1577 | goto end_trans; |
534cf531 | 1578 | } |
e3e0520b | 1579 | |
b0643e59 DZ |
1580 | /* |
1581 | * At this point, the block_group is read only and should fail | |
1582 | * new allocations. However, btrfs_finish_extent_commit() can | |
1583 | * cause this block_group to be placed back on the discard | |
1584 | * lists because now the block_group isn't fully discarded. | |
1585 | * Bail here and try again later after discarding everything. | |
1586 | */ | |
1587 | spin_lock(&fs_info->discard_ctl.lock); | |
1588 | if (!list_empty(&block_group->discard_list)) { | |
1589 | spin_unlock(&fs_info->discard_ctl.lock); | |
1590 | btrfs_dec_block_group_ro(block_group); | |
1591 | btrfs_discard_queue_work(&fs_info->discard_ctl, | |
1592 | block_group); | |
1593 | goto end_trans; | |
1594 | } | |
1595 | spin_unlock(&fs_info->discard_ctl.lock); | |
1596 | ||
e3e0520b JB |
1597 | /* Reset pinned so btrfs_put_block_group doesn't complain */ |
1598 | spin_lock(&space_info->lock); | |
1599 | spin_lock(&block_group->lock); | |
1600 | ||
1601 | btrfs_space_info_update_bytes_pinned(fs_info, space_info, | |
1602 | -block_group->pinned); | |
1603 | space_info->bytes_readonly += block_group->pinned; | |
e3e0520b JB |
1604 | block_group->pinned = 0; |
1605 | ||
1606 | spin_unlock(&block_group->lock); | |
1607 | spin_unlock(&space_info->lock); | |
1608 | ||
6e80d4f8 DZ |
1609 | /* |
1610 | * The normal path here is an unused block group is passed here, | |
1611 | * then trimming is handled in the transaction commit path. | |
1612 | * Async discard interposes before this to do the trimming | |
1613 | * before coming down the unused block group path as trimming | |
1614 | * will no longer be done later in the transaction commit path. | |
1615 | */ | |
1616 | if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC)) | |
1617 | goto flip_async; | |
1618 | ||
dcba6e48 NA |
1619 | /* |
1620 | * DISCARD can flip during remount. On zoned filesystems, we | |
1621 | * need to reset sequential-required zones. | |
1622 | */ | |
1623 | trimming = btrfs_test_opt(fs_info, DISCARD_SYNC) || | |
1624 | btrfs_is_zoned(fs_info); | |
e3e0520b JB |
1625 | |
1626 | /* Implicit trim during transaction commit. */ | |
1627 | if (trimming) | |
6b7304af | 1628 | btrfs_freeze_block_group(block_group); |
e3e0520b JB |
1629 | |
1630 | /* | |
1631 | * Btrfs_remove_chunk will abort the transaction if things go | |
1632 | * horribly wrong. | |
1633 | */ | |
b3470b5d | 1634 | ret = btrfs_remove_chunk(trans, block_group->start); |
e3e0520b JB |
1635 | |
1636 | if (ret) { | |
1637 | if (trimming) | |
6b7304af | 1638 | btrfs_unfreeze_block_group(block_group); |
e3e0520b JB |
1639 | goto end_trans; |
1640 | } | |
1641 | ||
1642 | /* | |
1643 | * If we're not mounted with -odiscard, we can just forget | |
1644 | * about this block group. Otherwise we'll need to wait | |
1645 | * until transaction commit to do the actual discard. | |
1646 | */ | |
1647 | if (trimming) { | |
1648 | spin_lock(&fs_info->unused_bgs_lock); | |
1649 | /* | |
1650 | * A concurrent scrub might have added us to the list | |
1651 | * fs_info->unused_bgs, so use a list_move operation | |
1652 | * to add the block group to the deleted_bgs list. | |
1653 | */ | |
1654 | list_move(&block_group->bg_list, | |
1655 | &trans->transaction->deleted_bgs); | |
1656 | spin_unlock(&fs_info->unused_bgs_lock); | |
1657 | btrfs_get_block_group(block_group); | |
1658 | } | |
1659 | end_trans: | |
1660 | btrfs_end_transaction(trans); | |
1661 | next: | |
e3e0520b JB |
1662 | btrfs_put_block_group(block_group); |
1663 | spin_lock(&fs_info->unused_bgs_lock); | |
1664 | } | |
1665 | spin_unlock(&fs_info->unused_bgs_lock); | |
f3372065 | 1666 | mutex_unlock(&fs_info->reclaim_bgs_lock); |
6e80d4f8 DZ |
1667 | return; |
1668 | ||
1669 | flip_async: | |
1670 | btrfs_end_transaction(trans); | |
f3372065 | 1671 | mutex_unlock(&fs_info->reclaim_bgs_lock); |
6e80d4f8 DZ |
1672 | btrfs_put_block_group(block_group); |
1673 | btrfs_discard_punt_unused_bgs_list(fs_info); | |
e3e0520b JB |
1674 | } |
1675 | ||
32da5386 | 1676 | void btrfs_mark_bg_unused(struct btrfs_block_group *bg) |
e3e0520b JB |
1677 | { |
1678 | struct btrfs_fs_info *fs_info = bg->fs_info; | |
1679 | ||
1680 | spin_lock(&fs_info->unused_bgs_lock); | |
1681 | if (list_empty(&bg->bg_list)) { | |
1682 | btrfs_get_block_group(bg); | |
0657b20c | 1683 | trace_btrfs_add_unused_block_group(bg); |
e3e0520b | 1684 | list_add_tail(&bg->bg_list, &fs_info->unused_bgs); |
0657b20c | 1685 | } else if (!test_bit(BLOCK_GROUP_FLAG_NEW, &bg->runtime_flags)) { |
a9f18971 | 1686 | /* Pull out the block group from the reclaim_bgs list. */ |
0657b20c | 1687 | trace_btrfs_add_unused_block_group(bg); |
a9f18971 | 1688 | list_move_tail(&bg->bg_list, &fs_info->unused_bgs); |
e3e0520b JB |
1689 | } |
1690 | spin_unlock(&fs_info->unused_bgs_lock); | |
1691 | } | |
4358d963 | 1692 | |
2ca0ec77 JT |
1693 | /* |
1694 | * We want block groups with a low number of used bytes to be in the beginning | |
1695 | * of the list, so they will get reclaimed first. | |
1696 | */ | |
1697 | static int reclaim_bgs_cmp(void *unused, const struct list_head *a, | |
1698 | const struct list_head *b) | |
1699 | { | |
1700 | const struct btrfs_block_group *bg1, *bg2; | |
1701 | ||
1702 | bg1 = list_entry(a, struct btrfs_block_group, bg_list); | |
1703 | bg2 = list_entry(b, struct btrfs_block_group, bg_list); | |
1704 | ||
1705 | return bg1->used > bg2->used; | |
1706 | } | |
1707 | ||
3687fcb0 JT |
1708 | static inline bool btrfs_should_reclaim(struct btrfs_fs_info *fs_info) |
1709 | { | |
1710 | if (btrfs_is_zoned(fs_info)) | |
1711 | return btrfs_zoned_should_reclaim(fs_info); | |
1712 | return true; | |
1713 | } | |
1714 | ||
81531225 BB |
1715 | static bool should_reclaim_block_group(struct btrfs_block_group *bg, u64 bytes_freed) |
1716 | { | |
1717 | const struct btrfs_space_info *space_info = bg->space_info; | |
1718 | const int reclaim_thresh = READ_ONCE(space_info->bg_reclaim_threshold); | |
1719 | const u64 new_val = bg->used; | |
1720 | const u64 old_val = new_val + bytes_freed; | |
1721 | u64 thresh; | |
1722 | ||
1723 | if (reclaim_thresh == 0) | |
1724 | return false; | |
1725 | ||
428c8e03 | 1726 | thresh = mult_perc(bg->length, reclaim_thresh); |
81531225 BB |
1727 | |
1728 | /* | |
1729 | * If we were below the threshold before don't reclaim, we are likely a | |
1730 | * brand new block group and we don't want to relocate new block groups. | |
1731 | */ | |
1732 | if (old_val < thresh) | |
1733 | return false; | |
1734 | if (new_val >= thresh) | |
1735 | return false; | |
1736 | return true; | |
1737 | } | |
1738 | ||
18bb8bbf JT |
1739 | void btrfs_reclaim_bgs_work(struct work_struct *work) |
1740 | { | |
1741 | struct btrfs_fs_info *fs_info = | |
1742 | container_of(work, struct btrfs_fs_info, reclaim_bgs_work); | |
1743 | struct btrfs_block_group *bg; | |
1744 | struct btrfs_space_info *space_info; | |
18bb8bbf JT |
1745 | |
1746 | if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) | |
1747 | return; | |
1748 | ||
2f12741f JB |
1749 | if (btrfs_fs_closing(fs_info)) |
1750 | return; | |
1751 | ||
3687fcb0 JT |
1752 | if (!btrfs_should_reclaim(fs_info)) |
1753 | return; | |
1754 | ||
ca5e4ea0 NA |
1755 | sb_start_write(fs_info->sb); |
1756 | ||
1757 | if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { | |
1758 | sb_end_write(fs_info->sb); | |
18bb8bbf | 1759 | return; |
ca5e4ea0 | 1760 | } |
18bb8bbf | 1761 | |
9cc0b837 JT |
1762 | /* |
1763 | * Long running balances can keep us blocked here for eternity, so | |
1764 | * simply skip reclaim if we're unable to get the mutex. | |
1765 | */ | |
1766 | if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) { | |
1767 | btrfs_exclop_finish(fs_info); | |
ca5e4ea0 | 1768 | sb_end_write(fs_info->sb); |
9cc0b837 JT |
1769 | return; |
1770 | } | |
1771 | ||
18bb8bbf | 1772 | spin_lock(&fs_info->unused_bgs_lock); |
2ca0ec77 JT |
1773 | /* |
1774 | * Sort happens under lock because we can't simply splice it and sort. | |
1775 | * The block groups might still be in use and reachable via bg_list, | |
1776 | * and their presence in the reclaim_bgs list must be preserved. | |
1777 | */ | |
1778 | list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp); | |
18bb8bbf | 1779 | while (!list_empty(&fs_info->reclaim_bgs)) { |
5f93e776 | 1780 | u64 zone_unusable; |
1cea5cf0 FM |
1781 | int ret = 0; |
1782 | ||
18bb8bbf JT |
1783 | bg = list_first_entry(&fs_info->reclaim_bgs, |
1784 | struct btrfs_block_group, | |
1785 | bg_list); | |
1786 | list_del_init(&bg->bg_list); | |
1787 | ||
1788 | space_info = bg->space_info; | |
1789 | spin_unlock(&fs_info->unused_bgs_lock); | |
1790 | ||
1791 | /* Don't race with allocators so take the groups_sem */ | |
1792 | down_write(&space_info->groups_sem); | |
1793 | ||
1794 | spin_lock(&bg->lock); | |
1795 | if (bg->reserved || bg->pinned || bg->ro) { | |
1796 | /* | |
1797 | * We want to bail if we made new allocations or have | |
1798 | * outstanding allocations in this block group. We do | |
1799 | * the ro check in case balance is currently acting on | |
1800 | * this block group. | |
1801 | */ | |
1802 | spin_unlock(&bg->lock); | |
1803 | up_write(&space_info->groups_sem); | |
1804 | goto next; | |
1805 | } | |
cc4804bf BB |
1806 | if (bg->used == 0) { |
1807 | /* | |
1808 | * It is possible that we trigger relocation on a block | |
1809 | * group as its extents are deleted and it first goes | |
1810 | * below the threshold, then shortly after goes empty. | |
1811 | * | |
1812 | * In this case, relocating it does delete it, but has | |
1813 | * some overhead in relocation specific metadata, looking | |
1814 | * for the non-existent extents and running some extra | |
1815 | * transactions, which we can avoid by using one of the | |
1816 | * other mechanisms for dealing with empty block groups. | |
1817 | */ | |
1818 | if (!btrfs_test_opt(fs_info, DISCARD_ASYNC)) | |
1819 | btrfs_mark_bg_unused(bg); | |
1820 | spin_unlock(&bg->lock); | |
1821 | up_write(&space_info->groups_sem); | |
1822 | goto next; | |
81531225 BB |
1823 | |
1824 | } | |
1825 | /* | |
1826 | * The block group might no longer meet the reclaim condition by | |
1827 | * the time we get around to reclaiming it, so to avoid | |
1828 | * reclaiming overly full block_groups, skip reclaiming them. | |
1829 | * | |
1830 | * Since the decision making process also depends on the amount | |
1831 | * being freed, pass in a fake giant value to skip that extra | |
1832 | * check, which is more meaningful when adding to the list in | |
1833 | * the first place. | |
1834 | */ | |
1835 | if (!should_reclaim_block_group(bg, bg->length)) { | |
1836 | spin_unlock(&bg->lock); | |
1837 | up_write(&space_info->groups_sem); | |
1838 | goto next; | |
cc4804bf | 1839 | } |
18bb8bbf JT |
1840 | spin_unlock(&bg->lock); |
1841 | ||
93463ff7 NA |
1842 | /* |
1843 | * Get out fast, in case we're read-only or unmounting the | |
1844 | * filesystem. It is OK to drop block groups from the list even | |
1845 | * for the read-only case. As we did sb_start_write(), | |
1846 | * "mount -o remount,ro" won't happen and read-only filesystem | |
1847 | * means it is forced read-only due to a fatal error. So, it | |
1848 | * never gets back to read-write to let us reclaim again. | |
1849 | */ | |
1850 | if (btrfs_need_cleaner_sleep(fs_info)) { | |
18bb8bbf JT |
1851 | up_write(&space_info->groups_sem); |
1852 | goto next; | |
1853 | } | |
1854 | ||
5f93e776 JT |
1855 | /* |
1856 | * Cache the zone_unusable value before turning the block group | |
1857 | * to read only. As soon as the blog group is read only it's | |
1858 | * zone_unusable value gets moved to the block group's read-only | |
1859 | * bytes and isn't available for calculations anymore. | |
1860 | */ | |
1861 | zone_unusable = bg->zone_unusable; | |
18bb8bbf JT |
1862 | ret = inc_block_group_ro(bg, 0); |
1863 | up_write(&space_info->groups_sem); | |
1864 | if (ret < 0) | |
1865 | goto next; | |
1866 | ||
5f93e776 JT |
1867 | btrfs_info(fs_info, |
1868 | "reclaiming chunk %llu with %llu%% used %llu%% unusable", | |
95cd356c JT |
1869 | bg->start, |
1870 | div64_u64(bg->used * 100, bg->length), | |
5f93e776 | 1871 | div64_u64(zone_unusable * 100, bg->length)); |
18bb8bbf JT |
1872 | trace_btrfs_reclaim_block_group(bg); |
1873 | ret = btrfs_relocate_chunk(fs_info, bg->start); | |
74944c87 JB |
1874 | if (ret) { |
1875 | btrfs_dec_block_group_ro(bg); | |
18bb8bbf JT |
1876 | btrfs_err(fs_info, "error relocating chunk %llu", |
1877 | bg->start); | |
74944c87 | 1878 | } |
18bb8bbf JT |
1879 | |
1880 | next: | |
7e271809 NA |
1881 | if (ret) |
1882 | btrfs_mark_bg_to_reclaim(bg); | |
d96b3424 | 1883 | btrfs_put_block_group(bg); |
3ed01616 NA |
1884 | |
1885 | mutex_unlock(&fs_info->reclaim_bgs_lock); | |
1886 | /* | |
1887 | * Reclaiming all the block groups in the list can take really | |
1888 | * long. Prioritize cleaning up unused block groups. | |
1889 | */ | |
1890 | btrfs_delete_unused_bgs(fs_info); | |
1891 | /* | |
1892 | * If we are interrupted by a balance, we can just bail out. The | |
1893 | * cleaner thread restart again if necessary. | |
1894 | */ | |
1895 | if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) | |
1896 | goto end; | |
18bb8bbf JT |
1897 | spin_lock(&fs_info->unused_bgs_lock); |
1898 | } | |
1899 | spin_unlock(&fs_info->unused_bgs_lock); | |
1900 | mutex_unlock(&fs_info->reclaim_bgs_lock); | |
3ed01616 | 1901 | end: |
18bb8bbf | 1902 | btrfs_exclop_finish(fs_info); |
ca5e4ea0 | 1903 | sb_end_write(fs_info->sb); |
18bb8bbf JT |
1904 | } |
1905 | ||
1906 | void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info) | |
1907 | { | |
1908 | spin_lock(&fs_info->unused_bgs_lock); | |
1909 | if (!list_empty(&fs_info->reclaim_bgs)) | |
1910 | queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work); | |
1911 | spin_unlock(&fs_info->unused_bgs_lock); | |
1912 | } | |
1913 | ||
1914 | void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg) | |
1915 | { | |
1916 | struct btrfs_fs_info *fs_info = bg->fs_info; | |
1917 | ||
1918 | spin_lock(&fs_info->unused_bgs_lock); | |
1919 | if (list_empty(&bg->bg_list)) { | |
1920 | btrfs_get_block_group(bg); | |
1921 | trace_btrfs_add_reclaim_block_group(bg); | |
1922 | list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs); | |
1923 | } | |
1924 | spin_unlock(&fs_info->unused_bgs_lock); | |
1925 | } | |
1926 | ||
e3ba67a1 JT |
1927 | static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key, |
1928 | struct btrfs_path *path) | |
1929 | { | |
1930 | struct extent_map_tree *em_tree; | |
1931 | struct extent_map *em; | |
1932 | struct btrfs_block_group_item bg; | |
1933 | struct extent_buffer *leaf; | |
1934 | int slot; | |
1935 | u64 flags; | |
1936 | int ret = 0; | |
1937 | ||
1938 | slot = path->slots[0]; | |
1939 | leaf = path->nodes[0]; | |
1940 | ||
1941 | em_tree = &fs_info->mapping_tree; | |
1942 | read_lock(&em_tree->lock); | |
1943 | em = lookup_extent_mapping(em_tree, key->objectid, key->offset); | |
1944 | read_unlock(&em_tree->lock); | |
1945 | if (!em) { | |
1946 | btrfs_err(fs_info, | |
1947 | "logical %llu len %llu found bg but no related chunk", | |
1948 | key->objectid, key->offset); | |
1949 | return -ENOENT; | |
1950 | } | |
1951 | ||
1952 | if (em->start != key->objectid || em->len != key->offset) { | |
1953 | btrfs_err(fs_info, | |
1954 | "block group %llu len %llu mismatch with chunk %llu len %llu", | |
1955 | key->objectid, key->offset, em->start, em->len); | |
1956 | ret = -EUCLEAN; | |
1957 | goto out_free_em; | |
1958 | } | |
1959 | ||
1960 | read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot), | |
1961 | sizeof(bg)); | |
1962 | flags = btrfs_stack_block_group_flags(&bg) & | |
1963 | BTRFS_BLOCK_GROUP_TYPE_MASK; | |
1964 | ||
1965 | if (flags != (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { | |
1966 | btrfs_err(fs_info, | |
1967 | "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx", | |
1968 | key->objectid, key->offset, flags, | |
1969 | (BTRFS_BLOCK_GROUP_TYPE_MASK & em->map_lookup->type)); | |
1970 | ret = -EUCLEAN; | |
1971 | } | |
1972 | ||
1973 | out_free_em: | |
1974 | free_extent_map(em); | |
1975 | return ret; | |
1976 | } | |
1977 | ||
4358d963 JB |
1978 | static int find_first_block_group(struct btrfs_fs_info *fs_info, |
1979 | struct btrfs_path *path, | |
1980 | struct btrfs_key *key) | |
1981 | { | |
dfe8aec4 | 1982 | struct btrfs_root *root = btrfs_block_group_root(fs_info); |
e3ba67a1 | 1983 | int ret; |
4358d963 | 1984 | struct btrfs_key found_key; |
4358d963 | 1985 | |
36dfbbe2 | 1986 | btrfs_for_each_slot(root, key, &found_key, path, ret) { |
4358d963 JB |
1987 | if (found_key.objectid >= key->objectid && |
1988 | found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { | |
36dfbbe2 | 1989 | return read_bg_from_eb(fs_info, &found_key, path); |
4358d963 | 1990 | } |
4358d963 | 1991 | } |
4358d963 JB |
1992 | return ret; |
1993 | } | |
1994 | ||
1995 | static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) | |
1996 | { | |
1997 | u64 extra_flags = chunk_to_extended(flags) & | |
1998 | BTRFS_EXTENDED_PROFILE_MASK; | |
1999 | ||
2000 | write_seqlock(&fs_info->profiles_lock); | |
2001 | if (flags & BTRFS_BLOCK_GROUP_DATA) | |
2002 | fs_info->avail_data_alloc_bits |= extra_flags; | |
2003 | if (flags & BTRFS_BLOCK_GROUP_METADATA) | |
2004 | fs_info->avail_metadata_alloc_bits |= extra_flags; | |
2005 | if (flags & BTRFS_BLOCK_GROUP_SYSTEM) | |
2006 | fs_info->avail_system_alloc_bits |= extra_flags; | |
2007 | write_sequnlock(&fs_info->profiles_lock); | |
2008 | } | |
2009 | ||
43dd529a DS |
2010 | /* |
2011 | * Map a physical disk address to a list of logical addresses. | |
9ee9b979 NB |
2012 | * |
2013 | * @fs_info: the filesystem | |
96a14336 NB |
2014 | * @chunk_start: logical address of block group |
2015 | * @physical: physical address to map to logical addresses | |
2016 | * @logical: return array of logical addresses which map to @physical | |
2017 | * @naddrs: length of @logical | |
2018 | * @stripe_len: size of IO stripe for the given block group | |
2019 | * | |
2020 | * Maps a particular @physical disk address to a list of @logical addresses. | |
2021 | * Used primarily to exclude those portions of a block group that contain super | |
2022 | * block copies. | |
2023 | */ | |
96a14336 | 2024 | int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, |
1eb82ef8 | 2025 | u64 physical, u64 **logical, int *naddrs, int *stripe_len) |
96a14336 NB |
2026 | { |
2027 | struct extent_map *em; | |
2028 | struct map_lookup *map; | |
2029 | u64 *buf; | |
2030 | u64 bytenr; | |
1776ad17 NB |
2031 | u64 data_stripe_length; |
2032 | u64 io_stripe_size; | |
2033 | int i, nr = 0; | |
2034 | int ret = 0; | |
96a14336 NB |
2035 | |
2036 | em = btrfs_get_chunk_map(fs_info, chunk_start, 1); | |
2037 | if (IS_ERR(em)) | |
2038 | return -EIO; | |
2039 | ||
2040 | map = em->map_lookup; | |
9e22b925 | 2041 | data_stripe_length = em->orig_block_len; |
a97699d1 | 2042 | io_stripe_size = BTRFS_STRIPE_LEN; |
138082f3 | 2043 | chunk_start = em->start; |
96a14336 | 2044 | |
9e22b925 NB |
2045 | /* For RAID5/6 adjust to a full IO stripe length */ |
2046 | if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) | |
cb091225 | 2047 | io_stripe_size = btrfs_stripe_nr_to_offset(nr_data_stripes(map)); |
96a14336 NB |
2048 | |
2049 | buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); | |
1776ad17 NB |
2050 | if (!buf) { |
2051 | ret = -ENOMEM; | |
2052 | goto out; | |
2053 | } | |
96a14336 NB |
2054 | |
2055 | for (i = 0; i < map->num_stripes; i++) { | |
1776ad17 | 2056 | bool already_inserted = false; |
6ded22c1 QW |
2057 | u32 stripe_nr; |
2058 | u32 offset; | |
1776ad17 NB |
2059 | int j; |
2060 | ||
2061 | if (!in_range(physical, map->stripes[i].physical, | |
2062 | data_stripe_length)) | |
96a14336 NB |
2063 | continue; |
2064 | ||
a97699d1 QW |
2065 | stripe_nr = (physical - map->stripes[i].physical) >> |
2066 | BTRFS_STRIPE_LEN_SHIFT; | |
2067 | offset = (physical - map->stripes[i].physical) & | |
2068 | BTRFS_STRIPE_LEN_MASK; | |
96a14336 | 2069 | |
ac067734 | 2070 | if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | |
6ded22c1 QW |
2071 | BTRFS_BLOCK_GROUP_RAID10)) |
2072 | stripe_nr = div_u64(stripe_nr * map->num_stripes + i, | |
2073 | map->sub_stripes); | |
96a14336 NB |
2074 | /* |
2075 | * The remaining case would be for RAID56, multiply by | |
2076 | * nr_data_stripes(). Alternatively, just use rmap_len below | |
2077 | * instead of map->stripe_len | |
2078 | */ | |
138082f3 | 2079 | bytenr = chunk_start + stripe_nr * io_stripe_size + offset; |
1776ad17 NB |
2080 | |
2081 | /* Ensure we don't add duplicate addresses */ | |
96a14336 | 2082 | for (j = 0; j < nr; j++) { |
1776ad17 NB |
2083 | if (buf[j] == bytenr) { |
2084 | already_inserted = true; | |
96a14336 | 2085 | break; |
1776ad17 | 2086 | } |
96a14336 | 2087 | } |
1776ad17 NB |
2088 | |
2089 | if (!already_inserted) | |
96a14336 | 2090 | buf[nr++] = bytenr; |
96a14336 NB |
2091 | } |
2092 | ||
2093 | *logical = buf; | |
2094 | *naddrs = nr; | |
1776ad17 NB |
2095 | *stripe_len = io_stripe_size; |
2096 | out: | |
96a14336 | 2097 | free_extent_map(em); |
1776ad17 | 2098 | return ret; |
96a14336 NB |
2099 | } |
2100 | ||
32da5386 | 2101 | static int exclude_super_stripes(struct btrfs_block_group *cache) |
4358d963 JB |
2102 | { |
2103 | struct btrfs_fs_info *fs_info = cache->fs_info; | |
12659251 | 2104 | const bool zoned = btrfs_is_zoned(fs_info); |
4358d963 JB |
2105 | u64 bytenr; |
2106 | u64 *logical; | |
2107 | int stripe_len; | |
2108 | int i, nr, ret; | |
2109 | ||
b3470b5d DS |
2110 | if (cache->start < BTRFS_SUPER_INFO_OFFSET) { |
2111 | stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start; | |
4358d963 | 2112 | cache->bytes_super += stripe_len; |
b1c8f527 FM |
2113 | ret = set_extent_bit(&fs_info->excluded_extents, cache->start, |
2114 | cache->start + stripe_len - 1, | |
2115 | EXTENT_UPTODATE, NULL); | |
4358d963 JB |
2116 | if (ret) |
2117 | return ret; | |
2118 | } | |
2119 | ||
2120 | for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { | |
2121 | bytenr = btrfs_sb_offset(i); | |
1eb82ef8 | 2122 | ret = btrfs_rmap_block(fs_info, cache->start, |
4358d963 JB |
2123 | bytenr, &logical, &nr, &stripe_len); |
2124 | if (ret) | |
2125 | return ret; | |
2126 | ||
12659251 NA |
2127 | /* Shouldn't have super stripes in sequential zones */ |
2128 | if (zoned && nr) { | |
f1a07c2b | 2129 | kfree(logical); |
12659251 NA |
2130 | btrfs_err(fs_info, |
2131 | "zoned: block group %llu must not contain super block", | |
2132 | cache->start); | |
2133 | return -EUCLEAN; | |
2134 | } | |
2135 | ||
4358d963 | 2136 | while (nr--) { |
96f9b0f2 NB |
2137 | u64 len = min_t(u64, stripe_len, |
2138 | cache->start + cache->length - logical[nr]); | |
4358d963 JB |
2139 | |
2140 | cache->bytes_super += len; | |
b1c8f527 FM |
2141 | ret = set_extent_bit(&fs_info->excluded_extents, logical[nr], |
2142 | logical[nr] + len - 1, | |
2143 | EXTENT_UPTODATE, NULL); | |
4358d963 JB |
2144 | if (ret) { |
2145 | kfree(logical); | |
2146 | return ret; | |
2147 | } | |
2148 | } | |
2149 | ||
2150 | kfree(logical); | |
2151 | } | |
2152 | return 0; | |
2153 | } | |
2154 | ||
32da5386 | 2155 | static struct btrfs_block_group *btrfs_create_block_group_cache( |
9afc6649 | 2156 | struct btrfs_fs_info *fs_info, u64 start) |
4358d963 | 2157 | { |
32da5386 | 2158 | struct btrfs_block_group *cache; |
4358d963 JB |
2159 | |
2160 | cache = kzalloc(sizeof(*cache), GFP_NOFS); | |
2161 | if (!cache) | |
2162 | return NULL; | |
2163 | ||
2164 | cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), | |
2165 | GFP_NOFS); | |
2166 | if (!cache->free_space_ctl) { | |
2167 | kfree(cache); | |
2168 | return NULL; | |
2169 | } | |
2170 | ||
b3470b5d | 2171 | cache->start = start; |
4358d963 JB |
2172 | |
2173 | cache->fs_info = fs_info; | |
2174 | cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); | |
4358d963 | 2175 | |
6e80d4f8 DZ |
2176 | cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED; |
2177 | ||
48aaeebe | 2178 | refcount_set(&cache->refs, 1); |
4358d963 JB |
2179 | spin_lock_init(&cache->lock); |
2180 | init_rwsem(&cache->data_rwsem); | |
2181 | INIT_LIST_HEAD(&cache->list); | |
2182 | INIT_LIST_HEAD(&cache->cluster_list); | |
2183 | INIT_LIST_HEAD(&cache->bg_list); | |
2184 | INIT_LIST_HEAD(&cache->ro_list); | |
b0643e59 | 2185 | INIT_LIST_HEAD(&cache->discard_list); |
4358d963 JB |
2186 | INIT_LIST_HEAD(&cache->dirty_list); |
2187 | INIT_LIST_HEAD(&cache->io_list); | |
afba2bc0 | 2188 | INIT_LIST_HEAD(&cache->active_bg_list); |
cd79909b | 2189 | btrfs_init_free_space_ctl(cache, cache->free_space_ctl); |
6b7304af | 2190 | atomic_set(&cache->frozen, 0); |
4358d963 | 2191 | mutex_init(&cache->free_space_lock); |
4358d963 JB |
2192 | |
2193 | return cache; | |
2194 | } | |
2195 | ||
2196 | /* | |
2197 | * Iterate all chunks and verify that each of them has the corresponding block | |
2198 | * group | |
2199 | */ | |
2200 | static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) | |
2201 | { | |
2202 | struct extent_map_tree *map_tree = &fs_info->mapping_tree; | |
2203 | struct extent_map *em; | |
32da5386 | 2204 | struct btrfs_block_group *bg; |
4358d963 JB |
2205 | u64 start = 0; |
2206 | int ret = 0; | |
2207 | ||
2208 | while (1) { | |
2209 | read_lock(&map_tree->lock); | |
2210 | /* | |
2211 | * lookup_extent_mapping will return the first extent map | |
2212 | * intersecting the range, so setting @len to 1 is enough to | |
2213 | * get the first chunk. | |
2214 | */ | |
2215 | em = lookup_extent_mapping(map_tree, start, 1); | |
2216 | read_unlock(&map_tree->lock); | |
2217 | if (!em) | |
2218 | break; | |
2219 | ||
2220 | bg = btrfs_lookup_block_group(fs_info, em->start); | |
2221 | if (!bg) { | |
2222 | btrfs_err(fs_info, | |
2223 | "chunk start=%llu len=%llu doesn't have corresponding block group", | |
2224 | em->start, em->len); | |
2225 | ret = -EUCLEAN; | |
2226 | free_extent_map(em); | |
2227 | break; | |
2228 | } | |
b3470b5d | 2229 | if (bg->start != em->start || bg->length != em->len || |
4358d963 JB |
2230 | (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != |
2231 | (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { | |
2232 | btrfs_err(fs_info, | |
2233 | "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx", | |
2234 | em->start, em->len, | |
2235 | em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK, | |
b3470b5d | 2236 | bg->start, bg->length, |
4358d963 JB |
2237 | bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); |
2238 | ret = -EUCLEAN; | |
2239 | free_extent_map(em); | |
2240 | btrfs_put_block_group(bg); | |
2241 | break; | |
2242 | } | |
2243 | start = em->start + em->len; | |
2244 | free_extent_map(em); | |
2245 | btrfs_put_block_group(bg); | |
2246 | } | |
2247 | return ret; | |
2248 | } | |
2249 | ||
ffb9e0f0 | 2250 | static int read_one_block_group(struct btrfs_fs_info *info, |
4afd2fe8 | 2251 | struct btrfs_block_group_item *bgi, |
d49a2ddb | 2252 | const struct btrfs_key *key, |
ffb9e0f0 QW |
2253 | int need_clear) |
2254 | { | |
32da5386 | 2255 | struct btrfs_block_group *cache; |
ffb9e0f0 | 2256 | const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS); |
ffb9e0f0 QW |
2257 | int ret; |
2258 | ||
d49a2ddb | 2259 | ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY); |
ffb9e0f0 | 2260 | |
9afc6649 | 2261 | cache = btrfs_create_block_group_cache(info, key->objectid); |
ffb9e0f0 QW |
2262 | if (!cache) |
2263 | return -ENOMEM; | |
2264 | ||
4afd2fe8 JT |
2265 | cache->length = key->offset; |
2266 | cache->used = btrfs_stack_block_group_used(bgi); | |
7248e0ce | 2267 | cache->commit_used = cache->used; |
4afd2fe8 | 2268 | cache->flags = btrfs_stack_block_group_flags(bgi); |
f7238e50 | 2269 | cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi); |
9afc6649 | 2270 | |
e3e39c72 MPS |
2271 | set_free_space_tree_thresholds(cache); |
2272 | ||
ffb9e0f0 QW |
2273 | if (need_clear) { |
2274 | /* | |
2275 | * When we mount with old space cache, we need to | |
2276 | * set BTRFS_DC_CLEAR and set dirty flag. | |
2277 | * | |
2278 | * a) Setting 'BTRFS_DC_CLEAR' makes sure that we | |
2279 | * truncate the old free space cache inode and | |
2280 | * setup a new one. | |
2281 | * b) Setting 'dirty flag' makes sure that we flush | |
2282 | * the new space cache info onto disk. | |
2283 | */ | |
2284 | if (btrfs_test_opt(info, SPACE_CACHE)) | |
2285 | cache->disk_cache_state = BTRFS_DC_CLEAR; | |
2286 | } | |
ffb9e0f0 QW |
2287 | if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && |
2288 | (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { | |
2289 | btrfs_err(info, | |
2290 | "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", | |
2291 | cache->start); | |
2292 | ret = -EINVAL; | |
2293 | goto error; | |
2294 | } | |
2295 | ||
a94794d5 | 2296 | ret = btrfs_load_block_group_zone_info(cache, false); |
08e11a3d NA |
2297 | if (ret) { |
2298 | btrfs_err(info, "zoned: failed to load zone info of bg %llu", | |
2299 | cache->start); | |
2300 | goto error; | |
2301 | } | |
2302 | ||
ffb9e0f0 QW |
2303 | /* |
2304 | * We need to exclude the super stripes now so that the space info has | |
2305 | * super bytes accounted for, otherwise we'll think we have more space | |
2306 | * than we actually do. | |
2307 | */ | |
2308 | ret = exclude_super_stripes(cache); | |
2309 | if (ret) { | |
2310 | /* We may have excluded something, so call this just in case. */ | |
2311 | btrfs_free_excluded_extents(cache); | |
2312 | goto error; | |
2313 | } | |
2314 | ||
2315 | /* | |
169e0da9 NA |
2316 | * For zoned filesystem, space after the allocation offset is the only |
2317 | * free space for a block group. So, we don't need any caching work. | |
2318 | * btrfs_calc_zone_unusable() will set the amount of free space and | |
2319 | * zone_unusable space. | |
2320 | * | |
2321 | * For regular filesystem, check for two cases, either we are full, and | |
2322 | * therefore don't need to bother with the caching work since we won't | |
2323 | * find any space, or we are empty, and we can just add all the space | |
2324 | * in and be done with it. This saves us _a_lot_ of time, particularly | |
2325 | * in the full case. | |
ffb9e0f0 | 2326 | */ |
169e0da9 NA |
2327 | if (btrfs_is_zoned(info)) { |
2328 | btrfs_calc_zone_unusable(cache); | |
c46c4247 NA |
2329 | /* Should not have any excluded extents. Just in case, though. */ |
2330 | btrfs_free_excluded_extents(cache); | |
169e0da9 | 2331 | } else if (cache->length == cache->used) { |
ffb9e0f0 QW |
2332 | cache->cached = BTRFS_CACHE_FINISHED; |
2333 | btrfs_free_excluded_extents(cache); | |
2334 | } else if (cache->used == 0) { | |
ffb9e0f0 | 2335 | cache->cached = BTRFS_CACHE_FINISHED; |
3b9f0995 FM |
2336 | ret = btrfs_add_new_free_space(cache, cache->start, |
2337 | cache->start + cache->length, NULL); | |
ffb9e0f0 | 2338 | btrfs_free_excluded_extents(cache); |
d8ccbd21 FM |
2339 | if (ret) |
2340 | goto error; | |
ffb9e0f0 QW |
2341 | } |
2342 | ||
2343 | ret = btrfs_add_block_group_cache(info, cache); | |
2344 | if (ret) { | |
2345 | btrfs_remove_free_space_cache(cache); | |
2346 | goto error; | |
2347 | } | |
2348 | trace_btrfs_add_block_group(info, cache, 0); | |
723de71d | 2349 | btrfs_add_bg_to_space_info(info, cache); |
ffb9e0f0 QW |
2350 | |
2351 | set_avail_alloc_bits(info, cache->flags); | |
a09f23c3 AJ |
2352 | if (btrfs_chunk_writeable(info, cache->start)) { |
2353 | if (cache->used == 0) { | |
2354 | ASSERT(list_empty(&cache->bg_list)); | |
2355 | if (btrfs_test_opt(info, DISCARD_ASYNC)) | |
2356 | btrfs_discard_queue_work(&info->discard_ctl, cache); | |
2357 | else | |
2358 | btrfs_mark_bg_unused(cache); | |
2359 | } | |
2360 | } else { | |
ffb9e0f0 | 2361 | inc_block_group_ro(cache, 1); |
ffb9e0f0 | 2362 | } |
a09f23c3 | 2363 | |
ffb9e0f0 QW |
2364 | return 0; |
2365 | error: | |
2366 | btrfs_put_block_group(cache); | |
2367 | return ret; | |
2368 | } | |
2369 | ||
42437a63 JB |
2370 | static int fill_dummy_bgs(struct btrfs_fs_info *fs_info) |
2371 | { | |
2372 | struct extent_map_tree *em_tree = &fs_info->mapping_tree; | |
42437a63 JB |
2373 | struct rb_node *node; |
2374 | int ret = 0; | |
2375 | ||
2376 | for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { | |
2377 | struct extent_map *em; | |
2378 | struct map_lookup *map; | |
2379 | struct btrfs_block_group *bg; | |
2380 | ||
2381 | em = rb_entry(node, struct extent_map, rb_node); | |
2382 | map = em->map_lookup; | |
2383 | bg = btrfs_create_block_group_cache(fs_info, em->start); | |
2384 | if (!bg) { | |
2385 | ret = -ENOMEM; | |
2386 | break; | |
2387 | } | |
2388 | ||
2389 | /* Fill dummy cache as FULL */ | |
2390 | bg->length = em->len; | |
2391 | bg->flags = map->type; | |
42437a63 JB |
2392 | bg->cached = BTRFS_CACHE_FINISHED; |
2393 | bg->used = em->len; | |
2394 | bg->flags = map->type; | |
2395 | ret = btrfs_add_block_group_cache(fs_info, bg); | |
2b29726c QW |
2396 | /* |
2397 | * We may have some valid block group cache added already, in | |
2398 | * that case we skip to the next one. | |
2399 | */ | |
2400 | if (ret == -EEXIST) { | |
2401 | ret = 0; | |
2402 | btrfs_put_block_group(bg); | |
2403 | continue; | |
2404 | } | |
2405 | ||
42437a63 JB |
2406 | if (ret) { |
2407 | btrfs_remove_free_space_cache(bg); | |
2408 | btrfs_put_block_group(bg); | |
2409 | break; | |
2410 | } | |
2b29726c | 2411 | |
723de71d | 2412 | btrfs_add_bg_to_space_info(fs_info, bg); |
42437a63 JB |
2413 | |
2414 | set_avail_alloc_bits(fs_info, bg->flags); | |
2415 | } | |
2416 | if (!ret) | |
2417 | btrfs_init_global_block_rsv(fs_info); | |
2418 | return ret; | |
2419 | } | |
2420 | ||
4358d963 JB |
2421 | int btrfs_read_block_groups(struct btrfs_fs_info *info) |
2422 | { | |
dfe8aec4 | 2423 | struct btrfs_root *root = btrfs_block_group_root(info); |
4358d963 JB |
2424 | struct btrfs_path *path; |
2425 | int ret; | |
32da5386 | 2426 | struct btrfs_block_group *cache; |
4358d963 JB |
2427 | struct btrfs_space_info *space_info; |
2428 | struct btrfs_key key; | |
4358d963 JB |
2429 | int need_clear = 0; |
2430 | u64 cache_gen; | |
4358d963 | 2431 | |
81d5d614 QW |
2432 | /* |
2433 | * Either no extent root (with ibadroots rescue option) or we have | |
2434 | * unsupported RO options. The fs can never be mounted read-write, so no | |
2435 | * need to waste time searching block group items. | |
2436 | * | |
2437 | * This also allows new extent tree related changes to be RO compat, | |
2438 | * no need for a full incompat flag. | |
2439 | */ | |
2440 | if (!root || (btrfs_super_compat_ro_flags(info->super_copy) & | |
2441 | ~BTRFS_FEATURE_COMPAT_RO_SUPP)) | |
42437a63 JB |
2442 | return fill_dummy_bgs(info); |
2443 | ||
4358d963 JB |
2444 | key.objectid = 0; |
2445 | key.offset = 0; | |
2446 | key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; | |
2447 | path = btrfs_alloc_path(); | |
2448 | if (!path) | |
2449 | return -ENOMEM; | |
4358d963 JB |
2450 | |
2451 | cache_gen = btrfs_super_cache_generation(info->super_copy); | |
2452 | if (btrfs_test_opt(info, SPACE_CACHE) && | |
2453 | btrfs_super_generation(info->super_copy) != cache_gen) | |
2454 | need_clear = 1; | |
2455 | if (btrfs_test_opt(info, CLEAR_CACHE)) | |
2456 | need_clear = 1; | |
2457 | ||
2458 | while (1) { | |
4afd2fe8 JT |
2459 | struct btrfs_block_group_item bgi; |
2460 | struct extent_buffer *leaf; | |
2461 | int slot; | |
2462 | ||
4358d963 JB |
2463 | ret = find_first_block_group(info, path, &key); |
2464 | if (ret > 0) | |
2465 | break; | |
2466 | if (ret != 0) | |
2467 | goto error; | |
2468 | ||
4afd2fe8 JT |
2469 | leaf = path->nodes[0]; |
2470 | slot = path->slots[0]; | |
2471 | ||
2472 | read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), | |
2473 | sizeof(bgi)); | |
2474 | ||
2475 | btrfs_item_key_to_cpu(leaf, &key, slot); | |
2476 | btrfs_release_path(path); | |
2477 | ret = read_one_block_group(info, &bgi, &key, need_clear); | |
ffb9e0f0 | 2478 | if (ret < 0) |
4358d963 | 2479 | goto error; |
ffb9e0f0 QW |
2480 | key.objectid += key.offset; |
2481 | key.offset = 0; | |
4358d963 | 2482 | } |
7837fa88 | 2483 | btrfs_release_path(path); |
4358d963 | 2484 | |
72804905 | 2485 | list_for_each_entry(space_info, &info->space_info, list) { |
49ea112d JB |
2486 | int i; |
2487 | ||
2488 | for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { | |
2489 | if (list_empty(&space_info->block_groups[i])) | |
2490 | continue; | |
2491 | cache = list_first_entry(&space_info->block_groups[i], | |
2492 | struct btrfs_block_group, | |
2493 | list); | |
2494 | btrfs_sysfs_add_block_group_type(cache); | |
2495 | } | |
2496 | ||
4358d963 JB |
2497 | if (!(btrfs_get_alloc_profile(info, space_info->flags) & |
2498 | (BTRFS_BLOCK_GROUP_RAID10 | | |
2499 | BTRFS_BLOCK_GROUP_RAID1_MASK | | |
2500 | BTRFS_BLOCK_GROUP_RAID56_MASK | | |
2501 | BTRFS_BLOCK_GROUP_DUP))) | |
2502 | continue; | |
2503 | /* | |
2504 | * Avoid allocating from un-mirrored block group if there are | |
2505 | * mirrored block groups. | |
2506 | */ | |
2507 | list_for_each_entry(cache, | |
2508 | &space_info->block_groups[BTRFS_RAID_RAID0], | |
2509 | list) | |
e11c0406 | 2510 | inc_block_group_ro(cache, 1); |
4358d963 JB |
2511 | list_for_each_entry(cache, |
2512 | &space_info->block_groups[BTRFS_RAID_SINGLE], | |
2513 | list) | |
e11c0406 | 2514 | inc_block_group_ro(cache, 1); |
4358d963 JB |
2515 | } |
2516 | ||
2517 | btrfs_init_global_block_rsv(info); | |
2518 | ret = check_chunk_block_group_mappings(info); | |
2519 | error: | |
2520 | btrfs_free_path(path); | |
2b29726c QW |
2521 | /* |
2522 | * We've hit some error while reading the extent tree, and have | |
2523 | * rescue=ibadroots mount option. | |
2524 | * Try to fill the tree using dummy block groups so that the user can | |
2525 | * continue to mount and grab their data. | |
2526 | */ | |
2527 | if (ret && btrfs_test_opt(info, IGNOREBADROOTS)) | |
2528 | ret = fill_dummy_bgs(info); | |
4358d963 JB |
2529 | return ret; |
2530 | } | |
2531 | ||
79bd3712 FM |
2532 | /* |
2533 | * This function, insert_block_group_item(), belongs to the phase 2 of chunk | |
2534 | * allocation. | |
2535 | * | |
2536 | * See the comment at btrfs_chunk_alloc() for details about the chunk allocation | |
2537 | * phases. | |
2538 | */ | |
97f4728a QW |
2539 | static int insert_block_group_item(struct btrfs_trans_handle *trans, |
2540 | struct btrfs_block_group *block_group) | |
2541 | { | |
2542 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
2543 | struct btrfs_block_group_item bgi; | |
dfe8aec4 | 2544 | struct btrfs_root *root = btrfs_block_group_root(fs_info); |
97f4728a | 2545 | struct btrfs_key key; |
675dfe12 FM |
2546 | u64 old_commit_used; |
2547 | int ret; | |
97f4728a QW |
2548 | |
2549 | spin_lock(&block_group->lock); | |
2550 | btrfs_set_stack_block_group_used(&bgi, block_group->used); | |
2551 | btrfs_set_stack_block_group_chunk_objectid(&bgi, | |
f7238e50 | 2552 | block_group->global_root_id); |
97f4728a | 2553 | btrfs_set_stack_block_group_flags(&bgi, block_group->flags); |
675dfe12 FM |
2554 | old_commit_used = block_group->commit_used; |
2555 | block_group->commit_used = block_group->used; | |
97f4728a QW |
2556 | key.objectid = block_group->start; |
2557 | key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; | |
2558 | key.offset = block_group->length; | |
2559 | spin_unlock(&block_group->lock); | |
2560 | ||
675dfe12 FM |
2561 | ret = btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi)); |
2562 | if (ret < 0) { | |
2563 | spin_lock(&block_group->lock); | |
2564 | block_group->commit_used = old_commit_used; | |
2565 | spin_unlock(&block_group->lock); | |
2566 | } | |
2567 | ||
2568 | return ret; | |
97f4728a QW |
2569 | } |
2570 | ||
2eadb9e7 NB |
2571 | static int insert_dev_extent(struct btrfs_trans_handle *trans, |
2572 | struct btrfs_device *device, u64 chunk_offset, | |
2573 | u64 start, u64 num_bytes) | |
2574 | { | |
2575 | struct btrfs_fs_info *fs_info = device->fs_info; | |
2576 | struct btrfs_root *root = fs_info->dev_root; | |
2577 | struct btrfs_path *path; | |
2578 | struct btrfs_dev_extent *extent; | |
2579 | struct extent_buffer *leaf; | |
2580 | struct btrfs_key key; | |
2581 | int ret; | |
2582 | ||
2583 | WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)); | |
2584 | WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)); | |
2585 | path = btrfs_alloc_path(); | |
2586 | if (!path) | |
2587 | return -ENOMEM; | |
2588 | ||
2589 | key.objectid = device->devid; | |
2590 | key.type = BTRFS_DEV_EXTENT_KEY; | |
2591 | key.offset = start; | |
2592 | ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent)); | |
2593 | if (ret) | |
2594 | goto out; | |
2595 | ||
2596 | leaf = path->nodes[0]; | |
2597 | extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); | |
2598 | btrfs_set_dev_extent_chunk_tree(leaf, extent, BTRFS_CHUNK_TREE_OBJECTID); | |
2599 | btrfs_set_dev_extent_chunk_objectid(leaf, extent, | |
2600 | BTRFS_FIRST_CHUNK_TREE_OBJECTID); | |
2601 | btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset); | |
2602 | ||
2603 | btrfs_set_dev_extent_length(leaf, extent, num_bytes); | |
2604 | btrfs_mark_buffer_dirty(leaf); | |
2605 | out: | |
2606 | btrfs_free_path(path); | |
2607 | return ret; | |
2608 | } | |
2609 | ||
2610 | /* | |
2611 | * This function belongs to phase 2. | |
2612 | * | |
2613 | * See the comment at btrfs_chunk_alloc() for details about the chunk allocation | |
2614 | * phases. | |
2615 | */ | |
2616 | static int insert_dev_extents(struct btrfs_trans_handle *trans, | |
2617 | u64 chunk_offset, u64 chunk_size) | |
2618 | { | |
2619 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
2620 | struct btrfs_device *device; | |
2621 | struct extent_map *em; | |
2622 | struct map_lookup *map; | |
2623 | u64 dev_offset; | |
2624 | u64 stripe_size; | |
2625 | int i; | |
2626 | int ret = 0; | |
2627 | ||
2628 | em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size); | |
2629 | if (IS_ERR(em)) | |
2630 | return PTR_ERR(em); | |
2631 | ||
2632 | map = em->map_lookup; | |
2633 | stripe_size = em->orig_block_len; | |
2634 | ||
2635 | /* | |
2636 | * Take the device list mutex to prevent races with the final phase of | |
2637 | * a device replace operation that replaces the device object associated | |
2638 | * with the map's stripes, because the device object's id can change | |
2639 | * at any time during that final phase of the device replace operation | |
2640 | * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the | |
2641 | * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID, | |
2642 | * resulting in persisting a device extent item with such ID. | |
2643 | */ | |
2644 | mutex_lock(&fs_info->fs_devices->device_list_mutex); | |
2645 | for (i = 0; i < map->num_stripes; i++) { | |
2646 | device = map->stripes[i].dev; | |
2647 | dev_offset = map->stripes[i].physical; | |
2648 | ||
2649 | ret = insert_dev_extent(trans, device, chunk_offset, dev_offset, | |
2650 | stripe_size); | |
2651 | if (ret) | |
2652 | break; | |
2653 | } | |
2654 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | |
2655 | ||
2656 | free_extent_map(em); | |
2657 | return ret; | |
2658 | } | |
2659 | ||
79bd3712 FM |
2660 | /* |
2661 | * This function, btrfs_create_pending_block_groups(), belongs to the phase 2 of | |
2662 | * chunk allocation. | |
2663 | * | |
2664 | * See the comment at btrfs_chunk_alloc() for details about the chunk allocation | |
2665 | * phases. | |
2666 | */ | |
4358d963 JB |
2667 | void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) |
2668 | { | |
2669 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
32da5386 | 2670 | struct btrfs_block_group *block_group; |
4358d963 JB |
2671 | int ret = 0; |
2672 | ||
4358d963 | 2673 | while (!list_empty(&trans->new_bgs)) { |
49ea112d JB |
2674 | int index; |
2675 | ||
4358d963 | 2676 | block_group = list_first_entry(&trans->new_bgs, |
32da5386 | 2677 | struct btrfs_block_group, |
4358d963 JB |
2678 | bg_list); |
2679 | if (ret) | |
2680 | goto next; | |
2681 | ||
49ea112d JB |
2682 | index = btrfs_bg_flags_to_raid_index(block_group->flags); |
2683 | ||
97f4728a | 2684 | ret = insert_block_group_item(trans, block_group); |
4358d963 JB |
2685 | if (ret) |
2686 | btrfs_abort_transaction(trans, ret); | |
3349b57f JB |
2687 | if (!test_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED, |
2688 | &block_group->runtime_flags)) { | |
79bd3712 FM |
2689 | mutex_lock(&fs_info->chunk_mutex); |
2690 | ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group); | |
2691 | mutex_unlock(&fs_info->chunk_mutex); | |
2692 | if (ret) | |
2693 | btrfs_abort_transaction(trans, ret); | |
2694 | } | |
2eadb9e7 NB |
2695 | ret = insert_dev_extents(trans, block_group->start, |
2696 | block_group->length); | |
4358d963 JB |
2697 | if (ret) |
2698 | btrfs_abort_transaction(trans, ret); | |
2699 | add_block_group_free_space(trans, block_group); | |
49ea112d JB |
2700 | |
2701 | /* | |
2702 | * If we restriped during balance, we may have added a new raid | |
2703 | * type, so now add the sysfs entries when it is safe to do so. | |
2704 | * We don't have to worry about locking here as it's handled in | |
2705 | * btrfs_sysfs_add_block_group_type. | |
2706 | */ | |
2707 | if (block_group->space_info->block_group_kobjs[index] == NULL) | |
2708 | btrfs_sysfs_add_block_group_type(block_group); | |
2709 | ||
4358d963 JB |
2710 | /* Already aborted the transaction if it failed. */ |
2711 | next: | |
2712 | btrfs_delayed_refs_rsv_release(fs_info, 1); | |
2713 | list_del_init(&block_group->bg_list); | |
0657b20c | 2714 | clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags); |
4358d963 JB |
2715 | } |
2716 | btrfs_trans_release_chunk_metadata(trans); | |
2717 | } | |
2718 | ||
f7238e50 JB |
2719 | /* |
2720 | * For extent tree v2 we use the block_group_item->chunk_offset to point at our | |
2721 | * global root id. For v1 it's always set to BTRFS_FIRST_CHUNK_TREE_OBJECTID. | |
2722 | */ | |
2723 | static u64 calculate_global_root_id(struct btrfs_fs_info *fs_info, u64 offset) | |
2724 | { | |
2725 | u64 div = SZ_1G; | |
2726 | u64 index; | |
2727 | ||
2728 | if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) | |
2729 | return BTRFS_FIRST_CHUNK_TREE_OBJECTID; | |
2730 | ||
2731 | /* If we have a smaller fs index based on 128MiB. */ | |
2732 | if (btrfs_super_total_bytes(fs_info->super_copy) <= (SZ_1G * 10ULL)) | |
2733 | div = SZ_128M; | |
2734 | ||
2735 | offset = div64_u64(offset, div); | |
2736 | div64_u64_rem(offset, fs_info->nr_global_roots, &index); | |
2737 | return index; | |
2738 | } | |
2739 | ||
79bd3712 | 2740 | struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans, |
5758d1bd | 2741 | u64 type, |
79bd3712 | 2742 | u64 chunk_offset, u64 size) |
4358d963 JB |
2743 | { |
2744 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
32da5386 | 2745 | struct btrfs_block_group *cache; |
4358d963 JB |
2746 | int ret; |
2747 | ||
2748 | btrfs_set_log_full_commit(trans); | |
2749 | ||
9afc6649 | 2750 | cache = btrfs_create_block_group_cache(fs_info, chunk_offset); |
4358d963 | 2751 | if (!cache) |
79bd3712 | 2752 | return ERR_PTR(-ENOMEM); |
4358d963 | 2753 | |
0657b20c FM |
2754 | /* |
2755 | * Mark it as new before adding it to the rbtree of block groups or any | |
2756 | * list, so that no other task finds it and calls btrfs_mark_bg_unused() | |
2757 | * before the new flag is set. | |
2758 | */ | |
2759 | set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags); | |
2760 | ||
9afc6649 | 2761 | cache->length = size; |
e3e39c72 | 2762 | set_free_space_tree_thresholds(cache); |
4358d963 | 2763 | cache->flags = type; |
4358d963 | 2764 | cache->cached = BTRFS_CACHE_FINISHED; |
f7238e50 JB |
2765 | cache->global_root_id = calculate_global_root_id(fs_info, cache->start); |
2766 | ||
997e3e2e | 2767 | if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) |
0d7764ff | 2768 | set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags); |
08e11a3d | 2769 | |
a94794d5 | 2770 | ret = btrfs_load_block_group_zone_info(cache, true); |
08e11a3d NA |
2771 | if (ret) { |
2772 | btrfs_put_block_group(cache); | |
79bd3712 | 2773 | return ERR_PTR(ret); |
08e11a3d NA |
2774 | } |
2775 | ||
4358d963 JB |
2776 | ret = exclude_super_stripes(cache); |
2777 | if (ret) { | |
2778 | /* We may have excluded something, so call this just in case */ | |
2779 | btrfs_free_excluded_extents(cache); | |
2780 | btrfs_put_block_group(cache); | |
79bd3712 | 2781 | return ERR_PTR(ret); |
4358d963 JB |
2782 | } |
2783 | ||
3b9f0995 | 2784 | ret = btrfs_add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL); |
4358d963 | 2785 | btrfs_free_excluded_extents(cache); |
d8ccbd21 FM |
2786 | if (ret) { |
2787 | btrfs_put_block_group(cache); | |
2788 | return ERR_PTR(ret); | |
2789 | } | |
4358d963 | 2790 | |
4358d963 JB |
2791 | /* |
2792 | * Ensure the corresponding space_info object is created and | |
2793 | * assigned to our block group. We want our bg to be added to the rbtree | |
2794 | * with its ->space_info set. | |
2795 | */ | |
2796 | cache->space_info = btrfs_find_space_info(fs_info, cache->flags); | |
2797 | ASSERT(cache->space_info); | |
2798 | ||
2799 | ret = btrfs_add_block_group_cache(fs_info, cache); | |
2800 | if (ret) { | |
2801 | btrfs_remove_free_space_cache(cache); | |
2802 | btrfs_put_block_group(cache); | |
79bd3712 | 2803 | return ERR_PTR(ret); |
4358d963 JB |
2804 | } |
2805 | ||
2806 | /* | |
2807 | * Now that our block group has its ->space_info set and is inserted in | |
2808 | * the rbtree, update the space info's counters. | |
2809 | */ | |
2810 | trace_btrfs_add_block_group(fs_info, cache, 1); | |
723de71d | 2811 | btrfs_add_bg_to_space_info(fs_info, cache); |
4358d963 JB |
2812 | btrfs_update_global_block_rsv(fs_info); |
2813 | ||
9d4b0a12 JB |
2814 | #ifdef CONFIG_BTRFS_DEBUG |
2815 | if (btrfs_should_fragment_free_space(cache)) { | |
5758d1bd | 2816 | cache->space_info->bytes_used += size >> 1; |
9d4b0a12 JB |
2817 | fragment_free_space(cache); |
2818 | } | |
2819 | #endif | |
4358d963 JB |
2820 | |
2821 | list_add_tail(&cache->bg_list, &trans->new_bgs); | |
2822 | trans->delayed_ref_updates++; | |
2823 | btrfs_update_delayed_refs_rsv(trans); | |
2824 | ||
2825 | set_avail_alloc_bits(fs_info, type); | |
79bd3712 | 2826 | return cache; |
4358d963 | 2827 | } |
26ce2095 | 2828 | |
b12de528 QW |
2829 | /* |
2830 | * Mark one block group RO, can be called several times for the same block | |
2831 | * group. | |
2832 | * | |
2833 | * @cache: the destination block group | |
2834 | * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to | |
2835 | * ensure we still have some free space after marking this | |
2836 | * block group RO. | |
2837 | */ | |
2838 | int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, | |
2839 | bool do_chunk_alloc) | |
26ce2095 JB |
2840 | { |
2841 | struct btrfs_fs_info *fs_info = cache->fs_info; | |
2842 | struct btrfs_trans_handle *trans; | |
dfe8aec4 | 2843 | struct btrfs_root *root = btrfs_block_group_root(fs_info); |
26ce2095 JB |
2844 | u64 alloc_flags; |
2845 | int ret; | |
b6e9f16c | 2846 | bool dirty_bg_running; |
26ce2095 | 2847 | |
2d192fc4 QW |
2848 | /* |
2849 | * This can only happen when we are doing read-only scrub on read-only | |
2850 | * mount. | |
2851 | * In that case we should not start a new transaction on read-only fs. | |
2852 | * Thus here we skip all chunk allocations. | |
2853 | */ | |
2854 | if (sb_rdonly(fs_info->sb)) { | |
2855 | mutex_lock(&fs_info->ro_block_group_mutex); | |
2856 | ret = inc_block_group_ro(cache, 0); | |
2857 | mutex_unlock(&fs_info->ro_block_group_mutex); | |
2858 | return ret; | |
2859 | } | |
2860 | ||
b6e9f16c | 2861 | do { |
dfe8aec4 | 2862 | trans = btrfs_join_transaction(root); |
b6e9f16c NB |
2863 | if (IS_ERR(trans)) |
2864 | return PTR_ERR(trans); | |
26ce2095 | 2865 | |
b6e9f16c | 2866 | dirty_bg_running = false; |
26ce2095 | 2867 | |
b6e9f16c NB |
2868 | /* |
2869 | * We're not allowed to set block groups readonly after the dirty | |
2870 | * block group cache has started writing. If it already started, | |
2871 | * back off and let this transaction commit. | |
2872 | */ | |
2873 | mutex_lock(&fs_info->ro_block_group_mutex); | |
2874 | if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) { | |
2875 | u64 transid = trans->transid; | |
26ce2095 | 2876 | |
b6e9f16c NB |
2877 | mutex_unlock(&fs_info->ro_block_group_mutex); |
2878 | btrfs_end_transaction(trans); | |
2879 | ||
2880 | ret = btrfs_wait_for_commit(fs_info, transid); | |
2881 | if (ret) | |
2882 | return ret; | |
2883 | dirty_bg_running = true; | |
2884 | } | |
2885 | } while (dirty_bg_running); | |
26ce2095 | 2886 | |
b12de528 | 2887 | if (do_chunk_alloc) { |
26ce2095 | 2888 | /* |
b12de528 QW |
2889 | * If we are changing raid levels, try to allocate a |
2890 | * corresponding block group with the new raid level. | |
26ce2095 | 2891 | */ |
349e120e | 2892 | alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); |
b12de528 QW |
2893 | if (alloc_flags != cache->flags) { |
2894 | ret = btrfs_chunk_alloc(trans, alloc_flags, | |
2895 | CHUNK_ALLOC_FORCE); | |
2896 | /* | |
2897 | * ENOSPC is allowed here, we may have enough space | |
2898 | * already allocated at the new raid level to carry on | |
2899 | */ | |
2900 | if (ret == -ENOSPC) | |
2901 | ret = 0; | |
2902 | if (ret < 0) | |
2903 | goto out; | |
2904 | } | |
26ce2095 JB |
2905 | } |
2906 | ||
a7a63acc | 2907 | ret = inc_block_group_ro(cache, 0); |
26ce2095 JB |
2908 | if (!ret) |
2909 | goto out; | |
7561551e QW |
2910 | if (ret == -ETXTBSY) |
2911 | goto unlock_out; | |
2912 | ||
2913 | /* | |
2914 | * Skip chunk alloction if the bg is SYSTEM, this is to avoid system | |
2915 | * chunk allocation storm to exhaust the system chunk array. Otherwise | |
2916 | * we still want to try our best to mark the block group read-only. | |
2917 | */ | |
2918 | if (!do_chunk_alloc && ret == -ENOSPC && | |
2919 | (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM)) | |
2920 | goto unlock_out; | |
2921 | ||
26ce2095 JB |
2922 | alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); |
2923 | ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); | |
2924 | if (ret < 0) | |
2925 | goto out; | |
b6a98021 NA |
2926 | /* |
2927 | * We have allocated a new chunk. We also need to activate that chunk to | |
2928 | * grant metadata tickets for zoned filesystem. | |
2929 | */ | |
2930 | ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true); | |
2931 | if (ret < 0) | |
2932 | goto out; | |
2933 | ||
e11c0406 | 2934 | ret = inc_block_group_ro(cache, 0); |
195a49ea FM |
2935 | if (ret == -ETXTBSY) |
2936 | goto unlock_out; | |
26ce2095 JB |
2937 | out: |
2938 | if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { | |
349e120e | 2939 | alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); |
26ce2095 JB |
2940 | mutex_lock(&fs_info->chunk_mutex); |
2941 | check_system_chunk(trans, alloc_flags); | |
2942 | mutex_unlock(&fs_info->chunk_mutex); | |
2943 | } | |
b12de528 | 2944 | unlock_out: |
26ce2095 JB |
2945 | mutex_unlock(&fs_info->ro_block_group_mutex); |
2946 | ||
2947 | btrfs_end_transaction(trans); | |
2948 | return ret; | |
2949 | } | |
2950 | ||
32da5386 | 2951 | void btrfs_dec_block_group_ro(struct btrfs_block_group *cache) |
26ce2095 JB |
2952 | { |
2953 | struct btrfs_space_info *sinfo = cache->space_info; | |
2954 | u64 num_bytes; | |
2955 | ||
2956 | BUG_ON(!cache->ro); | |
2957 | ||
2958 | spin_lock(&sinfo->lock); | |
2959 | spin_lock(&cache->lock); | |
2960 | if (!--cache->ro) { | |
169e0da9 NA |
2961 | if (btrfs_is_zoned(cache->fs_info)) { |
2962 | /* Migrate zone_unusable bytes back */ | |
98173255 NA |
2963 | cache->zone_unusable = |
2964 | (cache->alloc_offset - cache->used) + | |
2965 | (cache->length - cache->zone_capacity); | |
169e0da9 NA |
2966 | sinfo->bytes_zone_unusable += cache->zone_unusable; |
2967 | sinfo->bytes_readonly -= cache->zone_unusable; | |
2968 | } | |
f9f28e5b NA |
2969 | num_bytes = cache->length - cache->reserved - |
2970 | cache->pinned - cache->bytes_super - | |
2971 | cache->zone_unusable - cache->used; | |
2972 | sinfo->bytes_readonly -= num_bytes; | |
26ce2095 JB |
2973 | list_del_init(&cache->ro_list); |
2974 | } | |
2975 | spin_unlock(&cache->lock); | |
2976 | spin_unlock(&sinfo->lock); | |
2977 | } | |
77745c05 | 2978 | |
3be4d8ef QW |
2979 | static int update_block_group_item(struct btrfs_trans_handle *trans, |
2980 | struct btrfs_path *path, | |
2981 | struct btrfs_block_group *cache) | |
77745c05 JB |
2982 | { |
2983 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
2984 | int ret; | |
dfe8aec4 | 2985 | struct btrfs_root *root = btrfs_block_group_root(fs_info); |
77745c05 JB |
2986 | unsigned long bi; |
2987 | struct extent_buffer *leaf; | |
bf38be65 | 2988 | struct btrfs_block_group_item bgi; |
b3470b5d | 2989 | struct btrfs_key key; |
7248e0ce QW |
2990 | u64 old_commit_used; |
2991 | u64 used; | |
2992 | ||
2993 | /* | |
2994 | * Block group items update can be triggered out of commit transaction | |
2995 | * critical section, thus we need a consistent view of used bytes. | |
2996 | * We cannot use cache->used directly outside of the spin lock, as it | |
2997 | * may be changed. | |
2998 | */ | |
2999 | spin_lock(&cache->lock); | |
3000 | old_commit_used = cache->commit_used; | |
3001 | used = cache->used; | |
3002 | /* No change in used bytes, can safely skip it. */ | |
3003 | if (cache->commit_used == used) { | |
3004 | spin_unlock(&cache->lock); | |
3005 | return 0; | |
3006 | } | |
3007 | cache->commit_used = used; | |
3008 | spin_unlock(&cache->lock); | |
b3470b5d DS |
3009 | |
3010 | key.objectid = cache->start; | |
3011 | key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; | |
3012 | key.offset = cache->length; | |
77745c05 | 3013 | |
3be4d8ef | 3014 | ret = btrfs_search_slot(trans, root, &key, path, 0, 1); |
77745c05 JB |
3015 | if (ret) { |
3016 | if (ret > 0) | |
3017 | ret = -ENOENT; | |
3018 | goto fail; | |
3019 | } | |
3020 | ||
3021 | leaf = path->nodes[0]; | |
3022 | bi = btrfs_item_ptr_offset(leaf, path->slots[0]); | |
7248e0ce | 3023 | btrfs_set_stack_block_group_used(&bgi, used); |
de0dc456 | 3024 | btrfs_set_stack_block_group_chunk_objectid(&bgi, |
f7238e50 | 3025 | cache->global_root_id); |
de0dc456 | 3026 | btrfs_set_stack_block_group_flags(&bgi, cache->flags); |
bf38be65 | 3027 | write_extent_buffer(leaf, &bgi, bi, sizeof(bgi)); |
77745c05 JB |
3028 | btrfs_mark_buffer_dirty(leaf); |
3029 | fail: | |
3030 | btrfs_release_path(path); | |
7248e0ce QW |
3031 | /* We didn't update the block group item, need to revert @commit_used. */ |
3032 | if (ret < 0) { | |
3033 | spin_lock(&cache->lock); | |
3034 | cache->commit_used = old_commit_used; | |
3035 | spin_unlock(&cache->lock); | |
3036 | } | |
77745c05 JB |
3037 | return ret; |
3038 | ||
3039 | } | |
3040 | ||
32da5386 | 3041 | static int cache_save_setup(struct btrfs_block_group *block_group, |
77745c05 JB |
3042 | struct btrfs_trans_handle *trans, |
3043 | struct btrfs_path *path) | |
3044 | { | |
3045 | struct btrfs_fs_info *fs_info = block_group->fs_info; | |
3046 | struct btrfs_root *root = fs_info->tree_root; | |
3047 | struct inode *inode = NULL; | |
3048 | struct extent_changeset *data_reserved = NULL; | |
3049 | u64 alloc_hint = 0; | |
3050 | int dcs = BTRFS_DC_ERROR; | |
0044ae11 | 3051 | u64 cache_size = 0; |
77745c05 JB |
3052 | int retries = 0; |
3053 | int ret = 0; | |
3054 | ||
af456a2c BB |
3055 | if (!btrfs_test_opt(fs_info, SPACE_CACHE)) |
3056 | return 0; | |
3057 | ||
77745c05 JB |
3058 | /* |
3059 | * If this block group is smaller than 100 megs don't bother caching the | |
3060 | * block group. | |
3061 | */ | |
b3470b5d | 3062 | if (block_group->length < (100 * SZ_1M)) { |
77745c05 JB |
3063 | spin_lock(&block_group->lock); |
3064 | block_group->disk_cache_state = BTRFS_DC_WRITTEN; | |
3065 | spin_unlock(&block_group->lock); | |
3066 | return 0; | |
3067 | } | |
3068 | ||
bf31f87f | 3069 | if (TRANS_ABORTED(trans)) |
77745c05 JB |
3070 | return 0; |
3071 | again: | |
3072 | inode = lookup_free_space_inode(block_group, path); | |
3073 | if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { | |
3074 | ret = PTR_ERR(inode); | |
3075 | btrfs_release_path(path); | |
3076 | goto out; | |
3077 | } | |
3078 | ||
3079 | if (IS_ERR(inode)) { | |
3080 | BUG_ON(retries); | |
3081 | retries++; | |
3082 | ||
3083 | if (block_group->ro) | |
3084 | goto out_free; | |
3085 | ||
3086 | ret = create_free_space_inode(trans, block_group, path); | |
3087 | if (ret) | |
3088 | goto out_free; | |
3089 | goto again; | |
3090 | } | |
3091 | ||
3092 | /* | |
3093 | * We want to set the generation to 0, that way if anything goes wrong | |
3094 | * from here on out we know not to trust this cache when we load up next | |
3095 | * time. | |
3096 | */ | |
3097 | BTRFS_I(inode)->generation = 0; | |
9a56fcd1 | 3098 | ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); |
77745c05 JB |
3099 | if (ret) { |
3100 | /* | |
3101 | * So theoretically we could recover from this, simply set the | |
3102 | * super cache generation to 0 so we know to invalidate the | |
3103 | * cache, but then we'd have to keep track of the block groups | |
3104 | * that fail this way so we know we _have_ to reset this cache | |
3105 | * before the next commit or risk reading stale cache. So to | |
3106 | * limit our exposure to horrible edge cases lets just abort the | |
3107 | * transaction, this only happens in really bad situations | |
3108 | * anyway. | |
3109 | */ | |
3110 | btrfs_abort_transaction(trans, ret); | |
3111 | goto out_put; | |
3112 | } | |
3113 | WARN_ON(ret); | |
3114 | ||
3115 | /* We've already setup this transaction, go ahead and exit */ | |
3116 | if (block_group->cache_generation == trans->transid && | |
3117 | i_size_read(inode)) { | |
3118 | dcs = BTRFS_DC_SETUP; | |
3119 | goto out_put; | |
3120 | } | |
3121 | ||
3122 | if (i_size_read(inode) > 0) { | |
3123 | ret = btrfs_check_trunc_cache_free_space(fs_info, | |
3124 | &fs_info->global_block_rsv); | |
3125 | if (ret) | |
3126 | goto out_put; | |
3127 | ||
3128 | ret = btrfs_truncate_free_space_cache(trans, NULL, inode); | |
3129 | if (ret) | |
3130 | goto out_put; | |
3131 | } | |
3132 | ||
3133 | spin_lock(&block_group->lock); | |
3134 | if (block_group->cached != BTRFS_CACHE_FINISHED || | |
3135 | !btrfs_test_opt(fs_info, SPACE_CACHE)) { | |
3136 | /* | |
3137 | * don't bother trying to write stuff out _if_ | |
3138 | * a) we're not cached, | |
3139 | * b) we're with nospace_cache mount option, | |
3140 | * c) we're with v2 space_cache (FREE_SPACE_TREE). | |
3141 | */ | |
3142 | dcs = BTRFS_DC_WRITTEN; | |
3143 | spin_unlock(&block_group->lock); | |
3144 | goto out_put; | |
3145 | } | |
3146 | spin_unlock(&block_group->lock); | |
3147 | ||
3148 | /* | |
3149 | * We hit an ENOSPC when setting up the cache in this transaction, just | |
3150 | * skip doing the setup, we've already cleared the cache so we're safe. | |
3151 | */ | |
3152 | if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) { | |
3153 | ret = -ENOSPC; | |
3154 | goto out_put; | |
3155 | } | |
3156 | ||
3157 | /* | |
3158 | * Try to preallocate enough space based on how big the block group is. | |
3159 | * Keep in mind this has to include any pinned space which could end up | |
3160 | * taking up quite a bit since it's not folded into the other space | |
3161 | * cache. | |
3162 | */ | |
0044ae11 QW |
3163 | cache_size = div_u64(block_group->length, SZ_256M); |
3164 | if (!cache_size) | |
3165 | cache_size = 1; | |
77745c05 | 3166 | |
0044ae11 QW |
3167 | cache_size *= 16; |
3168 | cache_size *= fs_info->sectorsize; | |
77745c05 | 3169 | |
36ea6f3e | 3170 | ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0, |
1daedb1d | 3171 | cache_size, false); |
77745c05 JB |
3172 | if (ret) |
3173 | goto out_put; | |
3174 | ||
0044ae11 QW |
3175 | ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, cache_size, |
3176 | cache_size, cache_size, | |
77745c05 JB |
3177 | &alloc_hint); |
3178 | /* | |
3179 | * Our cache requires contiguous chunks so that we don't modify a bunch | |
3180 | * of metadata or split extents when writing the cache out, which means | |
3181 | * we can enospc if we are heavily fragmented in addition to just normal | |
3182 | * out of space conditions. So if we hit this just skip setting up any | |
3183 | * other block groups for this transaction, maybe we'll unpin enough | |
3184 | * space the next time around. | |
3185 | */ | |
3186 | if (!ret) | |
3187 | dcs = BTRFS_DC_SETUP; | |
3188 | else if (ret == -ENOSPC) | |
3189 | set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags); | |
3190 | ||
3191 | out_put: | |
3192 | iput(inode); | |
3193 | out_free: | |
3194 | btrfs_release_path(path); | |
3195 | out: | |
3196 | spin_lock(&block_group->lock); | |
3197 | if (!ret && dcs == BTRFS_DC_SETUP) | |
3198 | block_group->cache_generation = trans->transid; | |
3199 | block_group->disk_cache_state = dcs; | |
3200 | spin_unlock(&block_group->lock); | |
3201 | ||
3202 | extent_changeset_free(data_reserved); | |
3203 | return ret; | |
3204 | } | |
3205 | ||
3206 | int btrfs_setup_space_cache(struct btrfs_trans_handle *trans) | |
3207 | { | |
3208 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
32da5386 | 3209 | struct btrfs_block_group *cache, *tmp; |
77745c05 JB |
3210 | struct btrfs_transaction *cur_trans = trans->transaction; |
3211 | struct btrfs_path *path; | |
3212 | ||
3213 | if (list_empty(&cur_trans->dirty_bgs) || | |
3214 | !btrfs_test_opt(fs_info, SPACE_CACHE)) | |
3215 | return 0; | |
3216 | ||
3217 | path = btrfs_alloc_path(); | |
3218 | if (!path) | |
3219 | return -ENOMEM; | |
3220 | ||
3221 | /* Could add new block groups, use _safe just in case */ | |
3222 | list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, | |
3223 | dirty_list) { | |
3224 | if (cache->disk_cache_state == BTRFS_DC_CLEAR) | |
3225 | cache_save_setup(cache, trans, path); | |
3226 | } | |
3227 | ||
3228 | btrfs_free_path(path); | |
3229 | return 0; | |
3230 | } | |
3231 | ||
3232 | /* | |
3233 | * Transaction commit does final block group cache writeback during a critical | |
3234 | * section where nothing is allowed to change the FS. This is required in | |
3235 | * order for the cache to actually match the block group, but can introduce a | |
3236 | * lot of latency into the commit. | |
3237 | * | |
3238 | * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO. | |
3239 | * There's a chance we'll have to redo some of it if the block group changes | |
3240 | * again during the commit, but it greatly reduces the commit latency by | |
3241 | * getting rid of the easy block groups while we're still allowing others to | |
3242 | * join the commit. | |
3243 | */ | |
3244 | int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) | |
3245 | { | |
3246 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
32da5386 | 3247 | struct btrfs_block_group *cache; |
77745c05 JB |
3248 | struct btrfs_transaction *cur_trans = trans->transaction; |
3249 | int ret = 0; | |
3250 | int should_put; | |
3251 | struct btrfs_path *path = NULL; | |
3252 | LIST_HEAD(dirty); | |
3253 | struct list_head *io = &cur_trans->io_bgs; | |
77745c05 JB |
3254 | int loops = 0; |
3255 | ||
3256 | spin_lock(&cur_trans->dirty_bgs_lock); | |
3257 | if (list_empty(&cur_trans->dirty_bgs)) { | |
3258 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
3259 | return 0; | |
3260 | } | |
3261 | list_splice_init(&cur_trans->dirty_bgs, &dirty); | |
3262 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
3263 | ||
3264 | again: | |
3265 | /* Make sure all the block groups on our dirty list actually exist */ | |
3266 | btrfs_create_pending_block_groups(trans); | |
3267 | ||
3268 | if (!path) { | |
3269 | path = btrfs_alloc_path(); | |
938fcbfb JB |
3270 | if (!path) { |
3271 | ret = -ENOMEM; | |
3272 | goto out; | |
3273 | } | |
77745c05 JB |
3274 | } |
3275 | ||
3276 | /* | |
3277 | * cache_write_mutex is here only to save us from balance or automatic | |
3278 | * removal of empty block groups deleting this block group while we are | |
3279 | * writing out the cache | |
3280 | */ | |
3281 | mutex_lock(&trans->transaction->cache_write_mutex); | |
3282 | while (!list_empty(&dirty)) { | |
3283 | bool drop_reserve = true; | |
3284 | ||
32da5386 | 3285 | cache = list_first_entry(&dirty, struct btrfs_block_group, |
77745c05 JB |
3286 | dirty_list); |
3287 | /* | |
3288 | * This can happen if something re-dirties a block group that | |
3289 | * is already under IO. Just wait for it to finish and then do | |
3290 | * it all again | |
3291 | */ | |
3292 | if (!list_empty(&cache->io_list)) { | |
3293 | list_del_init(&cache->io_list); | |
3294 | btrfs_wait_cache_io(trans, cache, path); | |
3295 | btrfs_put_block_group(cache); | |
3296 | } | |
3297 | ||
3298 | ||
3299 | /* | |
3300 | * btrfs_wait_cache_io uses the cache->dirty_list to decide if | |
3301 | * it should update the cache_state. Don't delete until after | |
3302 | * we wait. | |
3303 | * | |
3304 | * Since we're not running in the commit critical section | |
3305 | * we need the dirty_bgs_lock to protect from update_block_group | |
3306 | */ | |
3307 | spin_lock(&cur_trans->dirty_bgs_lock); | |
3308 | list_del_init(&cache->dirty_list); | |
3309 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
3310 | ||
3311 | should_put = 1; | |
3312 | ||
3313 | cache_save_setup(cache, trans, path); | |
3314 | ||
3315 | if (cache->disk_cache_state == BTRFS_DC_SETUP) { | |
3316 | cache->io_ctl.inode = NULL; | |
3317 | ret = btrfs_write_out_cache(trans, cache, path); | |
3318 | if (ret == 0 && cache->io_ctl.inode) { | |
77745c05 JB |
3319 | should_put = 0; |
3320 | ||
3321 | /* | |
3322 | * The cache_write_mutex is protecting the | |
3323 | * io_list, also refer to the definition of | |
3324 | * btrfs_transaction::io_bgs for more details | |
3325 | */ | |
3326 | list_add_tail(&cache->io_list, io); | |
3327 | } else { | |
3328 | /* | |
3329 | * If we failed to write the cache, the | |
3330 | * generation will be bad and life goes on | |
3331 | */ | |
3332 | ret = 0; | |
3333 | } | |
3334 | } | |
3335 | if (!ret) { | |
3be4d8ef | 3336 | ret = update_block_group_item(trans, path, cache); |
77745c05 JB |
3337 | /* |
3338 | * Our block group might still be attached to the list | |
3339 | * of new block groups in the transaction handle of some | |
3340 | * other task (struct btrfs_trans_handle->new_bgs). This | |
3341 | * means its block group item isn't yet in the extent | |
3342 | * tree. If this happens ignore the error, as we will | |
3343 | * try again later in the critical section of the | |
3344 | * transaction commit. | |
3345 | */ | |
3346 | if (ret == -ENOENT) { | |
3347 | ret = 0; | |
3348 | spin_lock(&cur_trans->dirty_bgs_lock); | |
3349 | if (list_empty(&cache->dirty_list)) { | |
3350 | list_add_tail(&cache->dirty_list, | |
3351 | &cur_trans->dirty_bgs); | |
3352 | btrfs_get_block_group(cache); | |
3353 | drop_reserve = false; | |
3354 | } | |
3355 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
3356 | } else if (ret) { | |
3357 | btrfs_abort_transaction(trans, ret); | |
3358 | } | |
3359 | } | |
3360 | ||
3361 | /* If it's not on the io list, we need to put the block group */ | |
3362 | if (should_put) | |
3363 | btrfs_put_block_group(cache); | |
3364 | if (drop_reserve) | |
3365 | btrfs_delayed_refs_rsv_release(fs_info, 1); | |
77745c05 JB |
3366 | /* |
3367 | * Avoid blocking other tasks for too long. It might even save | |
3368 | * us from writing caches for block groups that are going to be | |
3369 | * removed. | |
3370 | */ | |
3371 | mutex_unlock(&trans->transaction->cache_write_mutex); | |
938fcbfb JB |
3372 | if (ret) |
3373 | goto out; | |
77745c05 JB |
3374 | mutex_lock(&trans->transaction->cache_write_mutex); |
3375 | } | |
3376 | mutex_unlock(&trans->transaction->cache_write_mutex); | |
3377 | ||
3378 | /* | |
3379 | * Go through delayed refs for all the stuff we've just kicked off | |
3380 | * and then loop back (just once) | |
3381 | */ | |
34d1eb0e JB |
3382 | if (!ret) |
3383 | ret = btrfs_run_delayed_refs(trans, 0); | |
77745c05 JB |
3384 | if (!ret && loops == 0) { |
3385 | loops++; | |
3386 | spin_lock(&cur_trans->dirty_bgs_lock); | |
3387 | list_splice_init(&cur_trans->dirty_bgs, &dirty); | |
3388 | /* | |
3389 | * dirty_bgs_lock protects us from concurrent block group | |
3390 | * deletes too (not just cache_write_mutex). | |
3391 | */ | |
3392 | if (!list_empty(&dirty)) { | |
3393 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
3394 | goto again; | |
3395 | } | |
3396 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
938fcbfb JB |
3397 | } |
3398 | out: | |
3399 | if (ret < 0) { | |
3400 | spin_lock(&cur_trans->dirty_bgs_lock); | |
3401 | list_splice_init(&dirty, &cur_trans->dirty_bgs); | |
3402 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
77745c05 JB |
3403 | btrfs_cleanup_dirty_bgs(cur_trans, fs_info); |
3404 | } | |
3405 | ||
3406 | btrfs_free_path(path); | |
3407 | return ret; | |
3408 | } | |
3409 | ||
3410 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) | |
3411 | { | |
3412 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
32da5386 | 3413 | struct btrfs_block_group *cache; |
77745c05 JB |
3414 | struct btrfs_transaction *cur_trans = trans->transaction; |
3415 | int ret = 0; | |
3416 | int should_put; | |
3417 | struct btrfs_path *path; | |
3418 | struct list_head *io = &cur_trans->io_bgs; | |
77745c05 JB |
3419 | |
3420 | path = btrfs_alloc_path(); | |
3421 | if (!path) | |
3422 | return -ENOMEM; | |
3423 | ||
3424 | /* | |
3425 | * Even though we are in the critical section of the transaction commit, | |
3426 | * we can still have concurrent tasks adding elements to this | |
3427 | * transaction's list of dirty block groups. These tasks correspond to | |
3428 | * endio free space workers started when writeback finishes for a | |
3429 | * space cache, which run inode.c:btrfs_finish_ordered_io(), and can | |
3430 | * allocate new block groups as a result of COWing nodes of the root | |
3431 | * tree when updating the free space inode. The writeback for the space | |
3432 | * caches is triggered by an earlier call to | |
3433 | * btrfs_start_dirty_block_groups() and iterations of the following | |
3434 | * loop. | |
3435 | * Also we want to do the cache_save_setup first and then run the | |
3436 | * delayed refs to make sure we have the best chance at doing this all | |
3437 | * in one shot. | |
3438 | */ | |
3439 | spin_lock(&cur_trans->dirty_bgs_lock); | |
3440 | while (!list_empty(&cur_trans->dirty_bgs)) { | |
3441 | cache = list_first_entry(&cur_trans->dirty_bgs, | |
32da5386 | 3442 | struct btrfs_block_group, |
77745c05 JB |
3443 | dirty_list); |
3444 | ||
3445 | /* | |
3446 | * This can happen if cache_save_setup re-dirties a block group | |
3447 | * that is already under IO. Just wait for it to finish and | |
3448 | * then do it all again | |
3449 | */ | |
3450 | if (!list_empty(&cache->io_list)) { | |
3451 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
3452 | list_del_init(&cache->io_list); | |
3453 | btrfs_wait_cache_io(trans, cache, path); | |
3454 | btrfs_put_block_group(cache); | |
3455 | spin_lock(&cur_trans->dirty_bgs_lock); | |
3456 | } | |
3457 | ||
3458 | /* | |
3459 | * Don't remove from the dirty list until after we've waited on | |
3460 | * any pending IO | |
3461 | */ | |
3462 | list_del_init(&cache->dirty_list); | |
3463 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
3464 | should_put = 1; | |
3465 | ||
3466 | cache_save_setup(cache, trans, path); | |
3467 | ||
3468 | if (!ret) | |
3469 | ret = btrfs_run_delayed_refs(trans, | |
3470 | (unsigned long) -1); | |
3471 | ||
3472 | if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { | |
3473 | cache->io_ctl.inode = NULL; | |
3474 | ret = btrfs_write_out_cache(trans, cache, path); | |
3475 | if (ret == 0 && cache->io_ctl.inode) { | |
77745c05 JB |
3476 | should_put = 0; |
3477 | list_add_tail(&cache->io_list, io); | |
3478 | } else { | |
3479 | /* | |
3480 | * If we failed to write the cache, the | |
3481 | * generation will be bad and life goes on | |
3482 | */ | |
3483 | ret = 0; | |
3484 | } | |
3485 | } | |
3486 | if (!ret) { | |
3be4d8ef | 3487 | ret = update_block_group_item(trans, path, cache); |
77745c05 JB |
3488 | /* |
3489 | * One of the free space endio workers might have | |
3490 | * created a new block group while updating a free space | |
3491 | * cache's inode (at inode.c:btrfs_finish_ordered_io()) | |
3492 | * and hasn't released its transaction handle yet, in | |
3493 | * which case the new block group is still attached to | |
3494 | * its transaction handle and its creation has not | |
3495 | * finished yet (no block group item in the extent tree | |
3496 | * yet, etc). If this is the case, wait for all free | |
3497 | * space endio workers to finish and retry. This is a | |
260db43c | 3498 | * very rare case so no need for a more efficient and |
77745c05 JB |
3499 | * complex approach. |
3500 | */ | |
3501 | if (ret == -ENOENT) { | |
3502 | wait_event(cur_trans->writer_wait, | |
3503 | atomic_read(&cur_trans->num_writers) == 1); | |
3be4d8ef | 3504 | ret = update_block_group_item(trans, path, cache); |
77745c05 JB |
3505 | } |
3506 | if (ret) | |
3507 | btrfs_abort_transaction(trans, ret); | |
3508 | } | |
3509 | ||
3510 | /* If its not on the io list, we need to put the block group */ | |
3511 | if (should_put) | |
3512 | btrfs_put_block_group(cache); | |
3513 | btrfs_delayed_refs_rsv_release(fs_info, 1); | |
3514 | spin_lock(&cur_trans->dirty_bgs_lock); | |
3515 | } | |
3516 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
3517 | ||
3518 | /* | |
3519 | * Refer to the definition of io_bgs member for details why it's safe | |
3520 | * to use it without any locking | |
3521 | */ | |
3522 | while (!list_empty(io)) { | |
32da5386 | 3523 | cache = list_first_entry(io, struct btrfs_block_group, |
77745c05 JB |
3524 | io_list); |
3525 | list_del_init(&cache->io_list); | |
3526 | btrfs_wait_cache_io(trans, cache, path); | |
3527 | btrfs_put_block_group(cache); | |
3528 | } | |
3529 | ||
3530 | btrfs_free_path(path); | |
3531 | return ret; | |
3532 | } | |
606d1bf1 JB |
3533 | |
3534 | int btrfs_update_block_group(struct btrfs_trans_handle *trans, | |
11b66fa6 | 3535 | u64 bytenr, u64 num_bytes, bool alloc) |
606d1bf1 JB |
3536 | { |
3537 | struct btrfs_fs_info *info = trans->fs_info; | |
32da5386 | 3538 | struct btrfs_block_group *cache = NULL; |
606d1bf1 JB |
3539 | u64 total = num_bytes; |
3540 | u64 old_val; | |
3541 | u64 byte_in_group; | |
3542 | int factor; | |
3543 | int ret = 0; | |
3544 | ||
3545 | /* Block accounting for super block */ | |
3546 | spin_lock(&info->delalloc_root_lock); | |
3547 | old_val = btrfs_super_bytes_used(info->super_copy); | |
3548 | if (alloc) | |
3549 | old_val += num_bytes; | |
3550 | else | |
3551 | old_val -= num_bytes; | |
3552 | btrfs_set_super_bytes_used(info->super_copy, old_val); | |
3553 | spin_unlock(&info->delalloc_root_lock); | |
3554 | ||
3555 | while (total) { | |
df384da5 | 3556 | struct btrfs_space_info *space_info; |
efbf35a1 | 3557 | bool reclaim = false; |
ac2f1e63 | 3558 | |
606d1bf1 JB |
3559 | cache = btrfs_lookup_block_group(info, bytenr); |
3560 | if (!cache) { | |
3561 | ret = -ENOENT; | |
3562 | break; | |
3563 | } | |
df384da5 | 3564 | space_info = cache->space_info; |
606d1bf1 JB |
3565 | factor = btrfs_bg_type_to_factor(cache->flags); |
3566 | ||
3567 | /* | |
3568 | * If this block group has free space cache written out, we | |
3569 | * need to make sure to load it if we are removing space. This | |
3570 | * is because we need the unpinning stage to actually add the | |
3571 | * space back to the block group, otherwise we will leak space. | |
3572 | */ | |
32da5386 | 3573 | if (!alloc && !btrfs_block_group_done(cache)) |
ced8ecf0 | 3574 | btrfs_cache_block_group(cache, true); |
606d1bf1 | 3575 | |
b3470b5d DS |
3576 | byte_in_group = bytenr - cache->start; |
3577 | WARN_ON(byte_in_group > cache->length); | |
606d1bf1 | 3578 | |
df384da5 | 3579 | spin_lock(&space_info->lock); |
606d1bf1 JB |
3580 | spin_lock(&cache->lock); |
3581 | ||
3582 | if (btrfs_test_opt(info, SPACE_CACHE) && | |
3583 | cache->disk_cache_state < BTRFS_DC_CLEAR) | |
3584 | cache->disk_cache_state = BTRFS_DC_CLEAR; | |
3585 | ||
bf38be65 | 3586 | old_val = cache->used; |
b3470b5d | 3587 | num_bytes = min(total, cache->length - byte_in_group); |
606d1bf1 JB |
3588 | if (alloc) { |
3589 | old_val += num_bytes; | |
bf38be65 | 3590 | cache->used = old_val; |
606d1bf1 | 3591 | cache->reserved -= num_bytes; |
df384da5 JB |
3592 | space_info->bytes_reserved -= num_bytes; |
3593 | space_info->bytes_used += num_bytes; | |
3594 | space_info->disk_used += num_bytes * factor; | |
606d1bf1 | 3595 | spin_unlock(&cache->lock); |
df384da5 | 3596 | spin_unlock(&space_info->lock); |
606d1bf1 JB |
3597 | } else { |
3598 | old_val -= num_bytes; | |
bf38be65 | 3599 | cache->used = old_val; |
606d1bf1 | 3600 | cache->pinned += num_bytes; |
df384da5 JB |
3601 | btrfs_space_info_update_bytes_pinned(info, space_info, |
3602 | num_bytes); | |
3603 | space_info->bytes_used -= num_bytes; | |
3604 | space_info->disk_used -= num_bytes * factor; | |
ac2f1e63 JB |
3605 | |
3606 | reclaim = should_reclaim_block_group(cache, num_bytes); | |
52bb7a21 | 3607 | |
606d1bf1 | 3608 | spin_unlock(&cache->lock); |
df384da5 | 3609 | spin_unlock(&space_info->lock); |
606d1bf1 | 3610 | |
fe1a598c DS |
3611 | set_extent_bit(&trans->transaction->pinned_extents, |
3612 | bytenr, bytenr + num_bytes - 1, | |
1d126800 | 3613 | EXTENT_DIRTY, NULL); |
606d1bf1 JB |
3614 | } |
3615 | ||
3616 | spin_lock(&trans->transaction->dirty_bgs_lock); | |
3617 | if (list_empty(&cache->dirty_list)) { | |
3618 | list_add_tail(&cache->dirty_list, | |
3619 | &trans->transaction->dirty_bgs); | |
3620 | trans->delayed_ref_updates++; | |
3621 | btrfs_get_block_group(cache); | |
3622 | } | |
3623 | spin_unlock(&trans->transaction->dirty_bgs_lock); | |
3624 | ||
3625 | /* | |
3626 | * No longer have used bytes in this block group, queue it for | |
3627 | * deletion. We do this after adding the block group to the | |
3628 | * dirty list to avoid races between cleaner kthread and space | |
3629 | * cache writeout. | |
3630 | */ | |
6e80d4f8 DZ |
3631 | if (!alloc && old_val == 0) { |
3632 | if (!btrfs_test_opt(info, DISCARD_ASYNC)) | |
3633 | btrfs_mark_bg_unused(cache); | |
ac2f1e63 JB |
3634 | } else if (!alloc && reclaim) { |
3635 | btrfs_mark_bg_to_reclaim(cache); | |
6e80d4f8 | 3636 | } |
606d1bf1 JB |
3637 | |
3638 | btrfs_put_block_group(cache); | |
3639 | total -= num_bytes; | |
3640 | bytenr += num_bytes; | |
3641 | } | |
3642 | ||
3643 | /* Modified block groups are accounted for in the delayed_refs_rsv. */ | |
3644 | btrfs_update_delayed_refs_rsv(trans); | |
3645 | return ret; | |
3646 | } | |
3647 | ||
43dd529a DS |
3648 | /* |
3649 | * Update the block_group and space info counters. | |
3650 | * | |
606d1bf1 JB |
3651 | * @cache: The cache we are manipulating |
3652 | * @ram_bytes: The number of bytes of file content, and will be same to | |
3653 | * @num_bytes except for the compress path. | |
3654 | * @num_bytes: The number of bytes in question | |
3655 | * @delalloc: The blocks are allocated for the delalloc write | |
3656 | * | |
3657 | * This is called by the allocator when it reserves space. If this is a | |
3658 | * reservation and the block group has become read only we cannot make the | |
3659 | * reservation and return -EAGAIN, otherwise this function always succeeds. | |
3660 | */ | |
32da5386 | 3661 | int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, |
52bb7a21 BB |
3662 | u64 ram_bytes, u64 num_bytes, int delalloc, |
3663 | bool force_wrong_size_class) | |
606d1bf1 JB |
3664 | { |
3665 | struct btrfs_space_info *space_info = cache->space_info; | |
52bb7a21 | 3666 | enum btrfs_block_group_size_class size_class; |
606d1bf1 JB |
3667 | int ret = 0; |
3668 | ||
3669 | spin_lock(&space_info->lock); | |
3670 | spin_lock(&cache->lock); | |
3671 | if (cache->ro) { | |
3672 | ret = -EAGAIN; | |
52bb7a21 BB |
3673 | goto out; |
3674 | } | |
99ffb43e | 3675 | |
cb0922f2 | 3676 | if (btrfs_block_group_should_use_size_class(cache)) { |
52bb7a21 BB |
3677 | size_class = btrfs_calc_block_group_size_class(num_bytes); |
3678 | ret = btrfs_use_block_group_size_class(cache, size_class, force_wrong_size_class); | |
3679 | if (ret) | |
3680 | goto out; | |
606d1bf1 | 3681 | } |
52bb7a21 BB |
3682 | cache->reserved += num_bytes; |
3683 | space_info->bytes_reserved += num_bytes; | |
3684 | trace_btrfs_space_reservation(cache->fs_info, "space_info", | |
3685 | space_info->flags, num_bytes, 1); | |
3686 | btrfs_space_info_update_bytes_may_use(cache->fs_info, | |
3687 | space_info, -ram_bytes); | |
3688 | if (delalloc) | |
3689 | cache->delalloc_bytes += num_bytes; | |
3690 | ||
3691 | /* | |
3692 | * Compression can use less space than we reserved, so wake tickets if | |
3693 | * that happens. | |
3694 | */ | |
3695 | if (num_bytes < ram_bytes) | |
3696 | btrfs_try_granting_tickets(cache->fs_info, space_info); | |
3697 | out: | |
606d1bf1 JB |
3698 | spin_unlock(&cache->lock); |
3699 | spin_unlock(&space_info->lock); | |
3700 | return ret; | |
3701 | } | |
3702 | ||
43dd529a DS |
3703 | /* |
3704 | * Update the block_group and space info counters. | |
3705 | * | |
606d1bf1 JB |
3706 | * @cache: The cache we are manipulating |
3707 | * @num_bytes: The number of bytes in question | |
3708 | * @delalloc: The blocks are allocated for the delalloc write | |
3709 | * | |
3710 | * This is called by somebody who is freeing space that was never actually used | |
3711 | * on disk. For example if you reserve some space for a new leaf in transaction | |
3712 | * A and before transaction A commits you free that leaf, you call this with | |
3713 | * reserve set to 0 in order to clear the reservation. | |
3714 | */ | |
32da5386 | 3715 | void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, |
606d1bf1 JB |
3716 | u64 num_bytes, int delalloc) |
3717 | { | |
3718 | struct btrfs_space_info *space_info = cache->space_info; | |
3719 | ||
3720 | spin_lock(&space_info->lock); | |
3721 | spin_lock(&cache->lock); | |
3722 | if (cache->ro) | |
3723 | space_info->bytes_readonly += num_bytes; | |
3724 | cache->reserved -= num_bytes; | |
3725 | space_info->bytes_reserved -= num_bytes; | |
3726 | space_info->max_extent_size = 0; | |
3727 | ||
3728 | if (delalloc) | |
3729 | cache->delalloc_bytes -= num_bytes; | |
3730 | spin_unlock(&cache->lock); | |
3308234a JB |
3731 | |
3732 | btrfs_try_granting_tickets(cache->fs_info, space_info); | |
606d1bf1 JB |
3733 | spin_unlock(&space_info->lock); |
3734 | } | |
07730d87 JB |
3735 | |
3736 | static void force_metadata_allocation(struct btrfs_fs_info *info) | |
3737 | { | |
3738 | struct list_head *head = &info->space_info; | |
3739 | struct btrfs_space_info *found; | |
3740 | ||
72804905 | 3741 | list_for_each_entry(found, head, list) { |
07730d87 JB |
3742 | if (found->flags & BTRFS_BLOCK_GROUP_METADATA) |
3743 | found->force_alloc = CHUNK_ALLOC_FORCE; | |
3744 | } | |
07730d87 JB |
3745 | } |
3746 | ||
3747 | static int should_alloc_chunk(struct btrfs_fs_info *fs_info, | |
3748 | struct btrfs_space_info *sinfo, int force) | |
3749 | { | |
3750 | u64 bytes_used = btrfs_space_info_used(sinfo, false); | |
3751 | u64 thresh; | |
3752 | ||
3753 | if (force == CHUNK_ALLOC_FORCE) | |
3754 | return 1; | |
3755 | ||
3756 | /* | |
3757 | * in limited mode, we want to have some free space up to | |
3758 | * about 1% of the FS size. | |
3759 | */ | |
3760 | if (force == CHUNK_ALLOC_LIMITED) { | |
3761 | thresh = btrfs_super_total_bytes(fs_info->super_copy); | |
428c8e03 | 3762 | thresh = max_t(u64, SZ_64M, mult_perc(thresh, 1)); |
07730d87 JB |
3763 | |
3764 | if (sinfo->total_bytes - bytes_used < thresh) | |
3765 | return 1; | |
3766 | } | |
3767 | ||
428c8e03 | 3768 | if (bytes_used + SZ_2M < mult_perc(sinfo->total_bytes, 80)) |
07730d87 JB |
3769 | return 0; |
3770 | return 1; | |
3771 | } | |
3772 | ||
3773 | int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type) | |
3774 | { | |
3775 | u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); | |
3776 | ||
3777 | return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); | |
3778 | } | |
3779 | ||
820c363b | 3780 | static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags) |
79bd3712 FM |
3781 | { |
3782 | struct btrfs_block_group *bg; | |
3783 | int ret; | |
3784 | ||
3785 | /* | |
3786 | * Check if we have enough space in the system space info because we | |
3787 | * will need to update device items in the chunk btree and insert a new | |
3788 | * chunk item in the chunk btree as well. This will allocate a new | |
3789 | * system block group if needed. | |
3790 | */ | |
3791 | check_system_chunk(trans, flags); | |
3792 | ||
f6f39f7a | 3793 | bg = btrfs_create_chunk(trans, flags); |
79bd3712 FM |
3794 | if (IS_ERR(bg)) { |
3795 | ret = PTR_ERR(bg); | |
3796 | goto out; | |
3797 | } | |
3798 | ||
79bd3712 FM |
3799 | ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); |
3800 | /* | |
3801 | * Normally we are not expected to fail with -ENOSPC here, since we have | |
3802 | * previously reserved space in the system space_info and allocated one | |
ecd84d54 | 3803 | * new system chunk if necessary. However there are three exceptions: |
79bd3712 FM |
3804 | * |
3805 | * 1) We may have enough free space in the system space_info but all the | |
3806 | * existing system block groups have a profile which can not be used | |
3807 | * for extent allocation. | |
3808 | * | |
3809 | * This happens when mounting in degraded mode. For example we have a | |
3810 | * RAID1 filesystem with 2 devices, lose one device and mount the fs | |
3811 | * using the other device in degraded mode. If we then allocate a chunk, | |
3812 | * we may have enough free space in the existing system space_info, but | |
3813 | * none of the block groups can be used for extent allocation since they | |
3814 | * have a RAID1 profile, and because we are in degraded mode with a | |
3815 | * single device, we are forced to allocate a new system chunk with a | |
3816 | * SINGLE profile. Making check_system_chunk() iterate over all system | |
3817 | * block groups and check if they have a usable profile and enough space | |
3818 | * can be slow on very large filesystems, so we tolerate the -ENOSPC and | |
3819 | * try again after forcing allocation of a new system chunk. Like this | |
3820 | * we avoid paying the cost of that search in normal circumstances, when | |
3821 | * we were not mounted in degraded mode; | |
3822 | * | |
3823 | * 2) We had enough free space info the system space_info, and one suitable | |
3824 | * block group to allocate from when we called check_system_chunk() | |
3825 | * above. However right after we called it, the only system block group | |
3826 | * with enough free space got turned into RO mode by a running scrub, | |
3827 | * and in this case we have to allocate a new one and retry. We only | |
3828 | * need do this allocate and retry once, since we have a transaction | |
ecd84d54 FM |
3829 | * handle and scrub uses the commit root to search for block groups; |
3830 | * | |
3831 | * 3) We had one system block group with enough free space when we called | |
3832 | * check_system_chunk(), but after that, right before we tried to | |
3833 | * allocate the last extent buffer we needed, a discard operation came | |
3834 | * in and it temporarily removed the last free space entry from the | |
3835 | * block group (discard removes a free space entry, discards it, and | |
3836 | * then adds back the entry to the block group cache). | |
79bd3712 FM |
3837 | */ |
3838 | if (ret == -ENOSPC) { | |
3839 | const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info); | |
3840 | struct btrfs_block_group *sys_bg; | |
3841 | ||
f6f39f7a | 3842 | sys_bg = btrfs_create_chunk(trans, sys_flags); |
79bd3712 FM |
3843 | if (IS_ERR(sys_bg)) { |
3844 | ret = PTR_ERR(sys_bg); | |
3845 | btrfs_abort_transaction(trans, ret); | |
3846 | goto out; | |
3847 | } | |
3848 | ||
3849 | ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg); | |
3850 | if (ret) { | |
3851 | btrfs_abort_transaction(trans, ret); | |
3852 | goto out; | |
3853 | } | |
3854 | ||
3855 | ret = btrfs_chunk_alloc_add_chunk_item(trans, bg); | |
3856 | if (ret) { | |
3857 | btrfs_abort_transaction(trans, ret); | |
3858 | goto out; | |
3859 | } | |
3860 | } else if (ret) { | |
3861 | btrfs_abort_transaction(trans, ret); | |
3862 | goto out; | |
3863 | } | |
3864 | out: | |
3865 | btrfs_trans_release_chunk_metadata(trans); | |
3866 | ||
820c363b NA |
3867 | if (ret) |
3868 | return ERR_PTR(ret); | |
3869 | ||
3870 | btrfs_get_block_group(bg); | |
3871 | return bg; | |
79bd3712 FM |
3872 | } |
3873 | ||
07730d87 | 3874 | /* |
79bd3712 FM |
3875 | * Chunk allocation is done in 2 phases: |
3876 | * | |
3877 | * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for | |
3878 | * the chunk, the chunk mapping, create its block group and add the items | |
3879 | * that belong in the chunk btree to it - more specifically, we need to | |
3880 | * update device items in the chunk btree and add a new chunk item to it. | |
3881 | * | |
3882 | * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block | |
3883 | * group item to the extent btree and the device extent items to the devices | |
3884 | * btree. | |
3885 | * | |
3886 | * This is done to prevent deadlocks. For example when COWing a node from the | |
3887 | * extent btree we are holding a write lock on the node's parent and if we | |
3888 | * trigger chunk allocation and attempted to insert the new block group item | |
3889 | * in the extent btree right way, we could deadlock because the path for the | |
3890 | * insertion can include that parent node. At first glance it seems impossible | |
3891 | * to trigger chunk allocation after starting a transaction since tasks should | |
3892 | * reserve enough transaction units (metadata space), however while that is true | |
3893 | * most of the time, chunk allocation may still be triggered for several reasons: | |
3894 | * | |
3895 | * 1) When reserving metadata, we check if there is enough free space in the | |
3896 | * metadata space_info and therefore don't trigger allocation of a new chunk. | |
3897 | * However later when the task actually tries to COW an extent buffer from | |
3898 | * the extent btree or from the device btree for example, it is forced to | |
3899 | * allocate a new block group (chunk) because the only one that had enough | |
3900 | * free space was just turned to RO mode by a running scrub for example (or | |
3901 | * device replace, block group reclaim thread, etc), so we can not use it | |
3902 | * for allocating an extent and end up being forced to allocate a new one; | |
3903 | * | |
3904 | * 2) Because we only check that the metadata space_info has enough free bytes, | |
3905 | * we end up not allocating a new metadata chunk in that case. However if | |
3906 | * the filesystem was mounted in degraded mode, none of the existing block | |
3907 | * groups might be suitable for extent allocation due to their incompatible | |
3908 | * profile (for e.g. mounting a 2 devices filesystem, where all block groups | |
3909 | * use a RAID1 profile, in degraded mode using a single device). In this case | |
3910 | * when the task attempts to COW some extent buffer of the extent btree for | |
3911 | * example, it will trigger allocation of a new metadata block group with a | |
3912 | * suitable profile (SINGLE profile in the example of the degraded mount of | |
3913 | * the RAID1 filesystem); | |
3914 | * | |
3915 | * 3) The task has reserved enough transaction units / metadata space, but when | |
3916 | * it attempts to COW an extent buffer from the extent or device btree for | |
3917 | * example, it does not find any free extent in any metadata block group, | |
3918 | * therefore forced to try to allocate a new metadata block group. | |
3919 | * This is because some other task allocated all available extents in the | |
3920 | * meanwhile - this typically happens with tasks that don't reserve space | |
3921 | * properly, either intentionally or as a bug. One example where this is | |
3922 | * done intentionally is fsync, as it does not reserve any transaction units | |
3923 | * and ends up allocating a variable number of metadata extents for log | |
ecd84d54 FM |
3924 | * tree extent buffers; |
3925 | * | |
3926 | * 4) The task has reserved enough transaction units / metadata space, but right | |
3927 | * before it tries to allocate the last extent buffer it needs, a discard | |
3928 | * operation comes in and, temporarily, removes the last free space entry from | |
3929 | * the only metadata block group that had free space (discard starts by | |
3930 | * removing a free space entry from a block group, then does the discard | |
3931 | * operation and, once it's done, it adds back the free space entry to the | |
3932 | * block group). | |
79bd3712 FM |
3933 | * |
3934 | * We also need this 2 phases setup when adding a device to a filesystem with | |
3935 | * a seed device - we must create new metadata and system chunks without adding | |
3936 | * any of the block group items to the chunk, extent and device btrees. If we | |
3937 | * did not do it this way, we would get ENOSPC when attempting to update those | |
3938 | * btrees, since all the chunks from the seed device are read-only. | |
3939 | * | |
3940 | * Phase 1 does the updates and insertions to the chunk btree because if we had | |
3941 | * it done in phase 2 and have a thundering herd of tasks allocating chunks in | |
3942 | * parallel, we risk having too many system chunks allocated by many tasks if | |
3943 | * many tasks reach phase 1 without the previous ones completing phase 2. In the | |
3944 | * extreme case this leads to exhaustion of the system chunk array in the | |
3945 | * superblock. This is easier to trigger if using a btree node/leaf size of 64K | |
3946 | * and with RAID filesystems (so we have more device items in the chunk btree). | |
3947 | * This has happened before and commit eafa4fd0ad0607 ("btrfs: fix exhaustion of | |
3948 | * the system chunk array due to concurrent allocations") provides more details. | |
3949 | * | |
2bb2e00e FM |
3950 | * Allocation of system chunks does not happen through this function. A task that |
3951 | * needs to update the chunk btree (the only btree that uses system chunks), must | |
3952 | * preallocate chunk space by calling either check_system_chunk() or | |
3953 | * btrfs_reserve_chunk_metadata() - the former is used when allocating a data or | |
3954 | * metadata chunk or when removing a chunk, while the later is used before doing | |
3955 | * a modification to the chunk btree - use cases for the later are adding, | |
3956 | * removing and resizing a device as well as relocation of a system chunk. | |
3957 | * See the comment below for more details. | |
79bd3712 FM |
3958 | * |
3959 | * The reservation of system space, done through check_system_chunk(), as well | |
3960 | * as all the updates and insertions into the chunk btree must be done while | |
3961 | * holding fs_info->chunk_mutex. This is important to guarantee that while COWing | |
3962 | * an extent buffer from the chunks btree we never trigger allocation of a new | |
3963 | * system chunk, which would result in a deadlock (trying to lock twice an | |
3964 | * extent buffer of the chunk btree, first time before triggering the chunk | |
3965 | * allocation and the second time during chunk allocation while attempting to | |
3966 | * update the chunks btree). The system chunk array is also updated while holding | |
3967 | * that mutex. The same logic applies to removing chunks - we must reserve system | |
3968 | * space, update the chunk btree and the system chunk array in the superblock | |
3969 | * while holding fs_info->chunk_mutex. | |
3970 | * | |
3971 | * This function, btrfs_chunk_alloc(), belongs to phase 1. | |
3972 | * | |
3973 | * If @force is CHUNK_ALLOC_FORCE: | |
07730d87 JB |
3974 | * - return 1 if it successfully allocates a chunk, |
3975 | * - return errors including -ENOSPC otherwise. | |
79bd3712 | 3976 | * If @force is NOT CHUNK_ALLOC_FORCE: |
07730d87 JB |
3977 | * - return 0 if it doesn't need to allocate a new chunk, |
3978 | * - return 1 if it successfully allocates a chunk, | |
3979 | * - return errors including -ENOSPC otherwise. | |
3980 | */ | |
3981 | int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, | |
3982 | enum btrfs_chunk_alloc_enum force) | |
3983 | { | |
3984 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
3985 | struct btrfs_space_info *space_info; | |
820c363b | 3986 | struct btrfs_block_group *ret_bg; |
07730d87 JB |
3987 | bool wait_for_alloc = false; |
3988 | bool should_alloc = false; | |
760e69c4 | 3989 | bool from_extent_allocation = false; |
07730d87 JB |
3990 | int ret = 0; |
3991 | ||
760e69c4 NA |
3992 | if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) { |
3993 | from_extent_allocation = true; | |
3994 | force = CHUNK_ALLOC_FORCE; | |
3995 | } | |
3996 | ||
07730d87 JB |
3997 | /* Don't re-enter if we're already allocating a chunk */ |
3998 | if (trans->allocating_chunk) | |
3999 | return -ENOSPC; | |
79bd3712 | 4000 | /* |
2bb2e00e FM |
4001 | * Allocation of system chunks can not happen through this path, as we |
4002 | * could end up in a deadlock if we are allocating a data or metadata | |
4003 | * chunk and there is another task modifying the chunk btree. | |
4004 | * | |
4005 | * This is because while we are holding the chunk mutex, we will attempt | |
4006 | * to add the new chunk item to the chunk btree or update an existing | |
4007 | * device item in the chunk btree, while the other task that is modifying | |
4008 | * the chunk btree is attempting to COW an extent buffer while holding a | |
4009 | * lock on it and on its parent - if the COW operation triggers a system | |
4010 | * chunk allocation, then we can deadlock because we are holding the | |
4011 | * chunk mutex and we may need to access that extent buffer or its parent | |
4012 | * in order to add the chunk item or update a device item. | |
4013 | * | |
4014 | * Tasks that want to modify the chunk tree should reserve system space | |
4015 | * before updating the chunk btree, by calling either | |
4016 | * btrfs_reserve_chunk_metadata() or check_system_chunk(). | |
4017 | * It's possible that after a task reserves the space, it still ends up | |
4018 | * here - this happens in the cases described above at do_chunk_alloc(). | |
4019 | * The task will have to either retry or fail. | |
79bd3712 | 4020 | */ |
2bb2e00e | 4021 | if (flags & BTRFS_BLOCK_GROUP_SYSTEM) |
79bd3712 | 4022 | return -ENOSPC; |
07730d87 JB |
4023 | |
4024 | space_info = btrfs_find_space_info(fs_info, flags); | |
4025 | ASSERT(space_info); | |
4026 | ||
4027 | do { | |
4028 | spin_lock(&space_info->lock); | |
4029 | if (force < space_info->force_alloc) | |
4030 | force = space_info->force_alloc; | |
4031 | should_alloc = should_alloc_chunk(fs_info, space_info, force); | |
4032 | if (space_info->full) { | |
4033 | /* No more free physical space */ | |
4034 | if (should_alloc) | |
4035 | ret = -ENOSPC; | |
4036 | else | |
4037 | ret = 0; | |
4038 | spin_unlock(&space_info->lock); | |
4039 | return ret; | |
4040 | } else if (!should_alloc) { | |
4041 | spin_unlock(&space_info->lock); | |
4042 | return 0; | |
4043 | } else if (space_info->chunk_alloc) { | |
4044 | /* | |
4045 | * Someone is already allocating, so we need to block | |
4046 | * until this someone is finished and then loop to | |
4047 | * recheck if we should continue with our allocation | |
4048 | * attempt. | |
4049 | */ | |
4050 | wait_for_alloc = true; | |
1314ca78 | 4051 | force = CHUNK_ALLOC_NO_FORCE; |
07730d87 JB |
4052 | spin_unlock(&space_info->lock); |
4053 | mutex_lock(&fs_info->chunk_mutex); | |
4054 | mutex_unlock(&fs_info->chunk_mutex); | |
4055 | } else { | |
4056 | /* Proceed with allocation */ | |
4057 | space_info->chunk_alloc = 1; | |
4058 | wait_for_alloc = false; | |
4059 | spin_unlock(&space_info->lock); | |
4060 | } | |
4061 | ||
4062 | cond_resched(); | |
4063 | } while (wait_for_alloc); | |
4064 | ||
4065 | mutex_lock(&fs_info->chunk_mutex); | |
4066 | trans->allocating_chunk = true; | |
4067 | ||
4068 | /* | |
4069 | * If we have mixed data/metadata chunks we want to make sure we keep | |
4070 | * allocating mixed chunks instead of individual chunks. | |
4071 | */ | |
4072 | if (btrfs_mixed_space_info(space_info)) | |
4073 | flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA); | |
4074 | ||
4075 | /* | |
4076 | * if we're doing a data chunk, go ahead and make sure that | |
4077 | * we keep a reasonable number of metadata chunks allocated in the | |
4078 | * FS as well. | |
4079 | */ | |
4080 | if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { | |
4081 | fs_info->data_chunk_allocations++; | |
4082 | if (!(fs_info->data_chunk_allocations % | |
4083 | fs_info->metadata_ratio)) | |
4084 | force_metadata_allocation(fs_info); | |
4085 | } | |
4086 | ||
820c363b | 4087 | ret_bg = do_chunk_alloc(trans, flags); |
07730d87 JB |
4088 | trans->allocating_chunk = false; |
4089 | ||
760e69c4 | 4090 | if (IS_ERR(ret_bg)) { |
820c363b | 4091 | ret = PTR_ERR(ret_bg); |
5a7d107e | 4092 | } else if (from_extent_allocation && (flags & BTRFS_BLOCK_GROUP_DATA)) { |
760e69c4 NA |
4093 | /* |
4094 | * New block group is likely to be used soon. Try to activate | |
4095 | * it now. Failure is OK for now. | |
4096 | */ | |
4097 | btrfs_zone_activate(ret_bg); | |
4098 | } | |
4099 | ||
4100 | if (!ret) | |
820c363b NA |
4101 | btrfs_put_block_group(ret_bg); |
4102 | ||
07730d87 JB |
4103 | spin_lock(&space_info->lock); |
4104 | if (ret < 0) { | |
4105 | if (ret == -ENOSPC) | |
4106 | space_info->full = 1; | |
4107 | else | |
4108 | goto out; | |
4109 | } else { | |
4110 | ret = 1; | |
4111 | space_info->max_extent_size = 0; | |
4112 | } | |
4113 | ||
4114 | space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; | |
4115 | out: | |
4116 | space_info->chunk_alloc = 0; | |
4117 | spin_unlock(&space_info->lock); | |
4118 | mutex_unlock(&fs_info->chunk_mutex); | |
07730d87 JB |
4119 | |
4120 | return ret; | |
4121 | } | |
4122 | ||
4123 | static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type) | |
4124 | { | |
4125 | u64 num_dev; | |
4126 | ||
4127 | num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max; | |
4128 | if (!num_dev) | |
4129 | num_dev = fs_info->fs_devices->rw_devices; | |
4130 | ||
4131 | return num_dev; | |
4132 | } | |
4133 | ||
2bb2e00e FM |
4134 | static void reserve_chunk_space(struct btrfs_trans_handle *trans, |
4135 | u64 bytes, | |
4136 | u64 type) | |
07730d87 JB |
4137 | { |
4138 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
4139 | struct btrfs_space_info *info; | |
4140 | u64 left; | |
07730d87 | 4141 | int ret = 0; |
07730d87 JB |
4142 | |
4143 | /* | |
4144 | * Needed because we can end up allocating a system chunk and for an | |
4145 | * atomic and race free space reservation in the chunk block reserve. | |
4146 | */ | |
4147 | lockdep_assert_held(&fs_info->chunk_mutex); | |
4148 | ||
4149 | info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); | |
4150 | spin_lock(&info->lock); | |
4151 | left = info->total_bytes - btrfs_space_info_used(info, true); | |
4152 | spin_unlock(&info->lock); | |
4153 | ||
2bb2e00e | 4154 | if (left < bytes && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { |
07730d87 | 4155 | btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu", |
2bb2e00e | 4156 | left, bytes, type); |
07730d87 JB |
4157 | btrfs_dump_space_info(fs_info, info, 0, 0); |
4158 | } | |
4159 | ||
2bb2e00e | 4160 | if (left < bytes) { |
07730d87 | 4161 | u64 flags = btrfs_system_alloc_profile(fs_info); |
79bd3712 | 4162 | struct btrfs_block_group *bg; |
07730d87 JB |
4163 | |
4164 | /* | |
4165 | * Ignore failure to create system chunk. We might end up not | |
4166 | * needing it, as we might not need to COW all nodes/leafs from | |
4167 | * the paths we visit in the chunk tree (they were already COWed | |
4168 | * or created in the current transaction for example). | |
4169 | */ | |
f6f39f7a | 4170 | bg = btrfs_create_chunk(trans, flags); |
79bd3712 FM |
4171 | if (IS_ERR(bg)) { |
4172 | ret = PTR_ERR(bg); | |
2bb2e00e | 4173 | } else { |
b6a98021 NA |
4174 | /* |
4175 | * We have a new chunk. We also need to activate it for | |
4176 | * zoned filesystem. | |
4177 | */ | |
4178 | ret = btrfs_zoned_activate_one_bg(fs_info, info, true); | |
4179 | if (ret < 0) | |
4180 | return; | |
4181 | ||
79bd3712 FM |
4182 | /* |
4183 | * If we fail to add the chunk item here, we end up | |
4184 | * trying again at phase 2 of chunk allocation, at | |
4185 | * btrfs_create_pending_block_groups(). So ignore | |
2bb2e00e FM |
4186 | * any error here. An ENOSPC here could happen, due to |
4187 | * the cases described at do_chunk_alloc() - the system | |
4188 | * block group we just created was just turned into RO | |
4189 | * mode by a scrub for example, or a running discard | |
4190 | * temporarily removed its free space entries, etc. | |
79bd3712 FM |
4191 | */ |
4192 | btrfs_chunk_alloc_add_chunk_item(trans, bg); | |
4193 | } | |
07730d87 JB |
4194 | } |
4195 | ||
4196 | if (!ret) { | |
9270501c | 4197 | ret = btrfs_block_rsv_add(fs_info, |
07730d87 | 4198 | &fs_info->chunk_block_rsv, |
2bb2e00e | 4199 | bytes, BTRFS_RESERVE_NO_FLUSH); |
1cb3db1c | 4200 | if (!ret) |
2bb2e00e | 4201 | trans->chunk_bytes_reserved += bytes; |
07730d87 JB |
4202 | } |
4203 | } | |
4204 | ||
2bb2e00e FM |
4205 | /* |
4206 | * Reserve space in the system space for allocating or removing a chunk. | |
4207 | * The caller must be holding fs_info->chunk_mutex. | |
4208 | */ | |
4209 | void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) | |
4210 | { | |
4211 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
4212 | const u64 num_devs = get_profile_num_devs(fs_info, type); | |
4213 | u64 bytes; | |
4214 | ||
4215 | /* num_devs device items to update and 1 chunk item to add or remove. */ | |
4216 | bytes = btrfs_calc_metadata_size(fs_info, num_devs) + | |
4217 | btrfs_calc_insert_metadata_size(fs_info, 1); | |
4218 | ||
4219 | reserve_chunk_space(trans, bytes, type); | |
4220 | } | |
4221 | ||
4222 | /* | |
4223 | * Reserve space in the system space, if needed, for doing a modification to the | |
4224 | * chunk btree. | |
4225 | * | |
4226 | * @trans: A transaction handle. | |
4227 | * @is_item_insertion: Indicate if the modification is for inserting a new item | |
4228 | * in the chunk btree or if it's for the deletion or update | |
4229 | * of an existing item. | |
4230 | * | |
4231 | * This is used in a context where we need to update the chunk btree outside | |
4232 | * block group allocation and removal, to avoid a deadlock with a concurrent | |
4233 | * task that is allocating a metadata or data block group and therefore needs to | |
4234 | * update the chunk btree while holding the chunk mutex. After the update to the | |
4235 | * chunk btree is done, btrfs_trans_release_chunk_metadata() should be called. | |
4236 | * | |
4237 | */ | |
4238 | void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans, | |
4239 | bool is_item_insertion) | |
4240 | { | |
4241 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
4242 | u64 bytes; | |
4243 | ||
4244 | if (is_item_insertion) | |
4245 | bytes = btrfs_calc_insert_metadata_size(fs_info, 1); | |
4246 | else | |
4247 | bytes = btrfs_calc_metadata_size(fs_info, 1); | |
4248 | ||
4249 | mutex_lock(&fs_info->chunk_mutex); | |
4250 | reserve_chunk_space(trans, bytes, BTRFS_BLOCK_GROUP_SYSTEM); | |
4251 | mutex_unlock(&fs_info->chunk_mutex); | |
4252 | } | |
4253 | ||
3e43c279 JB |
4254 | void btrfs_put_block_group_cache(struct btrfs_fs_info *info) |
4255 | { | |
32da5386 | 4256 | struct btrfs_block_group *block_group; |
3e43c279 | 4257 | |
50c31eaa JB |
4258 | block_group = btrfs_lookup_first_block_group(info, 0); |
4259 | while (block_group) { | |
4260 | btrfs_wait_block_group_cache_done(block_group); | |
4261 | spin_lock(&block_group->lock); | |
4262 | if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, | |
4263 | &block_group->runtime_flags)) { | |
4264 | struct inode *inode = block_group->inode; | |
4265 | ||
4266 | block_group->inode = NULL; | |
3e43c279 | 4267 | spin_unlock(&block_group->lock); |
3e43c279 | 4268 | |
50c31eaa JB |
4269 | ASSERT(block_group->io_ctl.inode == NULL); |
4270 | iput(inode); | |
4271 | } else { | |
4272 | spin_unlock(&block_group->lock); | |
4273 | } | |
4274 | block_group = btrfs_next_block_group(block_group); | |
3e43c279 JB |
4275 | } |
4276 | } | |
4277 | ||
4278 | /* | |
4279 | * Must be called only after stopping all workers, since we could have block | |
4280 | * group caching kthreads running, and therefore they could race with us if we | |
4281 | * freed the block groups before stopping them. | |
4282 | */ | |
4283 | int btrfs_free_block_groups(struct btrfs_fs_info *info) | |
4284 | { | |
32da5386 | 4285 | struct btrfs_block_group *block_group; |
3e43c279 JB |
4286 | struct btrfs_space_info *space_info; |
4287 | struct btrfs_caching_control *caching_ctl; | |
4288 | struct rb_node *n; | |
4289 | ||
13bb483d NA |
4290 | if (btrfs_is_zoned(info)) { |
4291 | if (info->active_meta_bg) { | |
4292 | btrfs_put_block_group(info->active_meta_bg); | |
4293 | info->active_meta_bg = NULL; | |
4294 | } | |
4295 | if (info->active_system_bg) { | |
4296 | btrfs_put_block_group(info->active_system_bg); | |
4297 | info->active_system_bg = NULL; | |
4298 | } | |
4299 | } | |
4300 | ||
16b0c258 | 4301 | write_lock(&info->block_group_cache_lock); |
3e43c279 JB |
4302 | while (!list_empty(&info->caching_block_groups)) { |
4303 | caching_ctl = list_entry(info->caching_block_groups.next, | |
4304 | struct btrfs_caching_control, list); | |
4305 | list_del(&caching_ctl->list); | |
4306 | btrfs_put_caching_control(caching_ctl); | |
4307 | } | |
16b0c258 | 4308 | write_unlock(&info->block_group_cache_lock); |
3e43c279 JB |
4309 | |
4310 | spin_lock(&info->unused_bgs_lock); | |
4311 | while (!list_empty(&info->unused_bgs)) { | |
4312 | block_group = list_first_entry(&info->unused_bgs, | |
32da5386 | 4313 | struct btrfs_block_group, |
3e43c279 JB |
4314 | bg_list); |
4315 | list_del_init(&block_group->bg_list); | |
4316 | btrfs_put_block_group(block_group); | |
4317 | } | |
3e43c279 | 4318 | |
18bb8bbf JT |
4319 | while (!list_empty(&info->reclaim_bgs)) { |
4320 | block_group = list_first_entry(&info->reclaim_bgs, | |
4321 | struct btrfs_block_group, | |
4322 | bg_list); | |
4323 | list_del_init(&block_group->bg_list); | |
4324 | btrfs_put_block_group(block_group); | |
4325 | } | |
4326 | spin_unlock(&info->unused_bgs_lock); | |
4327 | ||
afba2bc0 NA |
4328 | spin_lock(&info->zone_active_bgs_lock); |
4329 | while (!list_empty(&info->zone_active_bgs)) { | |
4330 | block_group = list_first_entry(&info->zone_active_bgs, | |
4331 | struct btrfs_block_group, | |
4332 | active_bg_list); | |
4333 | list_del_init(&block_group->active_bg_list); | |
4334 | btrfs_put_block_group(block_group); | |
4335 | } | |
4336 | spin_unlock(&info->zone_active_bgs_lock); | |
4337 | ||
16b0c258 | 4338 | write_lock(&info->block_group_cache_lock); |
08dddb29 | 4339 | while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) { |
32da5386 | 4340 | block_group = rb_entry(n, struct btrfs_block_group, |
3e43c279 | 4341 | cache_node); |
08dddb29 FM |
4342 | rb_erase_cached(&block_group->cache_node, |
4343 | &info->block_group_cache_tree); | |
3e43c279 | 4344 | RB_CLEAR_NODE(&block_group->cache_node); |
16b0c258 | 4345 | write_unlock(&info->block_group_cache_lock); |
3e43c279 JB |
4346 | |
4347 | down_write(&block_group->space_info->groups_sem); | |
4348 | list_del(&block_group->list); | |
4349 | up_write(&block_group->space_info->groups_sem); | |
4350 | ||
4351 | /* | |
4352 | * We haven't cached this block group, which means we could | |
4353 | * possibly have excluded extents on this block group. | |
4354 | */ | |
4355 | if (block_group->cached == BTRFS_CACHE_NO || | |
4356 | block_group->cached == BTRFS_CACHE_ERROR) | |
4357 | btrfs_free_excluded_extents(block_group); | |
4358 | ||
4359 | btrfs_remove_free_space_cache(block_group); | |
4360 | ASSERT(block_group->cached != BTRFS_CACHE_STARTED); | |
4361 | ASSERT(list_empty(&block_group->dirty_list)); | |
4362 | ASSERT(list_empty(&block_group->io_list)); | |
4363 | ASSERT(list_empty(&block_group->bg_list)); | |
48aaeebe | 4364 | ASSERT(refcount_read(&block_group->refs) == 1); |
195a49ea | 4365 | ASSERT(block_group->swap_extents == 0); |
3e43c279 JB |
4366 | btrfs_put_block_group(block_group); |
4367 | ||
16b0c258 | 4368 | write_lock(&info->block_group_cache_lock); |
3e43c279 | 4369 | } |
16b0c258 | 4370 | write_unlock(&info->block_group_cache_lock); |
3e43c279 | 4371 | |
3e43c279 JB |
4372 | btrfs_release_global_block_rsv(info); |
4373 | ||
4374 | while (!list_empty(&info->space_info)) { | |
4375 | space_info = list_entry(info->space_info.next, | |
4376 | struct btrfs_space_info, | |
4377 | list); | |
4378 | ||
4379 | /* | |
4380 | * Do not hide this behind enospc_debug, this is actually | |
4381 | * important and indicates a real bug if this happens. | |
4382 | */ | |
4383 | if (WARN_ON(space_info->bytes_pinned > 0 || | |
3e43c279 JB |
4384 | space_info->bytes_may_use > 0)) |
4385 | btrfs_dump_space_info(info, space_info, 0, 0); | |
40cdc509 FM |
4386 | |
4387 | /* | |
4388 | * If there was a failure to cleanup a log tree, very likely due | |
4389 | * to an IO failure on a writeback attempt of one or more of its | |
4390 | * extent buffers, we could not do proper (and cheap) unaccounting | |
4391 | * of their reserved space, so don't warn on bytes_reserved > 0 in | |
4392 | * that case. | |
4393 | */ | |
4394 | if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) || | |
4395 | !BTRFS_FS_LOG_CLEANUP_ERROR(info)) { | |
4396 | if (WARN_ON(space_info->bytes_reserved > 0)) | |
4397 | btrfs_dump_space_info(info, space_info, 0, 0); | |
4398 | } | |
4399 | ||
d611add4 | 4400 | WARN_ON(space_info->reclaim_size > 0); |
3e43c279 JB |
4401 | list_del(&space_info->list); |
4402 | btrfs_sysfs_remove_space_info(space_info); | |
4403 | } | |
4404 | return 0; | |
4405 | } | |
684b752b FM |
4406 | |
4407 | void btrfs_freeze_block_group(struct btrfs_block_group *cache) | |
4408 | { | |
4409 | atomic_inc(&cache->frozen); | |
4410 | } | |
4411 | ||
4412 | void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group) | |
4413 | { | |
4414 | struct btrfs_fs_info *fs_info = block_group->fs_info; | |
4415 | struct extent_map_tree *em_tree; | |
4416 | struct extent_map *em; | |
4417 | bool cleanup; | |
4418 | ||
4419 | spin_lock(&block_group->lock); | |
4420 | cleanup = (atomic_dec_and_test(&block_group->frozen) && | |
3349b57f | 4421 | test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)); |
684b752b FM |
4422 | spin_unlock(&block_group->lock); |
4423 | ||
4424 | if (cleanup) { | |
684b752b FM |
4425 | em_tree = &fs_info->mapping_tree; |
4426 | write_lock(&em_tree->lock); | |
4427 | em = lookup_extent_mapping(em_tree, block_group->start, | |
4428 | 1); | |
4429 | BUG_ON(!em); /* logic error, can't happen */ | |
4430 | remove_extent_mapping(em_tree, em); | |
4431 | write_unlock(&em_tree->lock); | |
684b752b FM |
4432 | |
4433 | /* once for us and once for the tree */ | |
4434 | free_extent_map(em); | |
4435 | free_extent_map(em); | |
4436 | ||
4437 | /* | |
4438 | * We may have left one free space entry and other possible | |
4439 | * tasks trimming this block group have left 1 entry each one. | |
4440 | * Free them if any. | |
4441 | */ | |
fc80f7ac | 4442 | btrfs_remove_free_space_cache(block_group); |
684b752b FM |
4443 | } |
4444 | } | |
195a49ea FM |
4445 | |
4446 | bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg) | |
4447 | { | |
4448 | bool ret = true; | |
4449 | ||
4450 | spin_lock(&bg->lock); | |
4451 | if (bg->ro) | |
4452 | ret = false; | |
4453 | else | |
4454 | bg->swap_extents++; | |
4455 | spin_unlock(&bg->lock); | |
4456 | ||
4457 | return ret; | |
4458 | } | |
4459 | ||
4460 | void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount) | |
4461 | { | |
4462 | spin_lock(&bg->lock); | |
4463 | ASSERT(!bg->ro); | |
4464 | ASSERT(bg->swap_extents >= amount); | |
4465 | bg->swap_extents -= amount; | |
4466 | spin_unlock(&bg->lock); | |
4467 | } | |
52bb7a21 BB |
4468 | |
4469 | enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size) | |
4470 | { | |
4471 | if (size <= SZ_128K) | |
4472 | return BTRFS_BG_SZ_SMALL; | |
4473 | if (size <= SZ_8M) | |
4474 | return BTRFS_BG_SZ_MEDIUM; | |
4475 | return BTRFS_BG_SZ_LARGE; | |
4476 | } | |
4477 | ||
4478 | /* | |
4479 | * Handle a block group allocating an extent in a size class | |
4480 | * | |
4481 | * @bg: The block group we allocated in. | |
4482 | * @size_class: The size class of the allocation. | |
4483 | * @force_wrong_size_class: Whether we are desperate enough to allow | |
4484 | * mismatched size classes. | |
4485 | * | |
4486 | * Returns: 0 if the size class was valid for this block_group, -EAGAIN in the | |
4487 | * case of a race that leads to the wrong size class without | |
4488 | * force_wrong_size_class set. | |
4489 | * | |
4490 | * find_free_extent will skip block groups with a mismatched size class until | |
4491 | * it really needs to avoid ENOSPC. In that case it will set | |
4492 | * force_wrong_size_class. However, if a block group is newly allocated and | |
4493 | * doesn't yet have a size class, then it is possible for two allocations of | |
4494 | * different sizes to race and both try to use it. The loser is caught here and | |
4495 | * has to retry. | |
4496 | */ | |
4497 | int btrfs_use_block_group_size_class(struct btrfs_block_group *bg, | |
4498 | enum btrfs_block_group_size_class size_class, | |
4499 | bool force_wrong_size_class) | |
4500 | { | |
4501 | ASSERT(size_class != BTRFS_BG_SZ_NONE); | |
4502 | ||
4503 | /* The new allocation is in the right size class, do nothing */ | |
4504 | if (bg->size_class == size_class) | |
4505 | return 0; | |
4506 | /* | |
4507 | * The new allocation is in a mismatched size class. | |
4508 | * This means one of two things: | |
4509 | * | |
4510 | * 1. Two tasks in find_free_extent for different size_classes raced | |
4511 | * and hit the same empty block_group. Make the loser try again. | |
4512 | * 2. A call to find_free_extent got desperate enough to set | |
4513 | * 'force_wrong_slab'. Don't change the size_class, but allow the | |
4514 | * allocation. | |
4515 | */ | |
4516 | if (bg->size_class != BTRFS_BG_SZ_NONE) { | |
4517 | if (force_wrong_size_class) | |
4518 | return 0; | |
4519 | return -EAGAIN; | |
4520 | } | |
4521 | /* | |
4522 | * The happy new block group case: the new allocation is the first | |
4523 | * one in the block_group so we set size_class. | |
4524 | */ | |
4525 | bg->size_class = size_class; | |
4526 | ||
4527 | return 0; | |
4528 | } | |
cb0922f2 BB |
4529 | |
4530 | bool btrfs_block_group_should_use_size_class(struct btrfs_block_group *bg) | |
4531 | { | |
4532 | if (btrfs_is_zoned(bg->fs_info)) | |
4533 | return false; | |
4534 | if (!btrfs_is_block_group_data_only(bg)) | |
4535 | return false; | |
4536 | return true; | |
4537 | } |