Commit | Line | Data |
---|---|---|
2e405ad8 JB |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
784352fe | 3 | #include "misc.h" |
2e405ad8 JB |
4 | #include "ctree.h" |
5 | #include "block-group.h" | |
3eeb3226 | 6 | #include "space-info.h" |
9f21246d JB |
7 | #include "disk-io.h" |
8 | #include "free-space-cache.h" | |
9 | #include "free-space-tree.h" | |
e3e0520b JB |
10 | #include "disk-io.h" |
11 | #include "volumes.h" | |
12 | #include "transaction.h" | |
13 | #include "ref-verify.h" | |
4358d963 JB |
14 | #include "sysfs.h" |
15 | #include "tree-log.h" | |
77745c05 | 16 | #include "delalloc-space.h" |
b0643e59 | 17 | #include "discard.h" |
96a14336 | 18 | #include "raid56.h" |
2e405ad8 | 19 | |
878d7b67 JB |
20 | /* |
21 | * Return target flags in extended format or 0 if restripe for this chunk_type | |
22 | * is not in progress | |
23 | * | |
24 | * Should be called with balance_lock held | |
25 | */ | |
e11c0406 | 26 | static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) |
878d7b67 JB |
27 | { |
28 | struct btrfs_balance_control *bctl = fs_info->balance_ctl; | |
29 | u64 target = 0; | |
30 | ||
31 | if (!bctl) | |
32 | return 0; | |
33 | ||
34 | if (flags & BTRFS_BLOCK_GROUP_DATA && | |
35 | bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) { | |
36 | target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target; | |
37 | } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM && | |
38 | bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) { | |
39 | target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target; | |
40 | } else if (flags & BTRFS_BLOCK_GROUP_METADATA && | |
41 | bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) { | |
42 | target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target; | |
43 | } | |
44 | ||
45 | return target; | |
46 | } | |
47 | ||
48 | /* | |
49 | * @flags: available profiles in extended format (see ctree.h) | |
50 | * | |
51 | * Return reduced profile in chunk format. If profile changing is in progress | |
52 | * (either running or paused) picks the target profile (if it's already | |
53 | * available), otherwise falls back to plain reducing. | |
54 | */ | |
55 | static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) | |
56 | { | |
57 | u64 num_devices = fs_info->fs_devices->rw_devices; | |
58 | u64 target; | |
59 | u64 raid_type; | |
60 | u64 allowed = 0; | |
61 | ||
62 | /* | |
63 | * See if restripe for this chunk_type is in progress, if so try to | |
64 | * reduce to the target profile | |
65 | */ | |
66 | spin_lock(&fs_info->balance_lock); | |
e11c0406 | 67 | target = get_restripe_target(fs_info, flags); |
878d7b67 JB |
68 | if (target) { |
69 | /* Pick target profile only if it's already available */ | |
70 | if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) { | |
71 | spin_unlock(&fs_info->balance_lock); | |
72 | return extended_to_chunk(target); | |
73 | } | |
74 | } | |
75 | spin_unlock(&fs_info->balance_lock); | |
76 | ||
77 | /* First, mask out the RAID levels which aren't possible */ | |
78 | for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { | |
79 | if (num_devices >= btrfs_raid_array[raid_type].devs_min) | |
80 | allowed |= btrfs_raid_array[raid_type].bg_flag; | |
81 | } | |
82 | allowed &= flags; | |
83 | ||
84 | if (allowed & BTRFS_BLOCK_GROUP_RAID6) | |
85 | allowed = BTRFS_BLOCK_GROUP_RAID6; | |
86 | else if (allowed & BTRFS_BLOCK_GROUP_RAID5) | |
87 | allowed = BTRFS_BLOCK_GROUP_RAID5; | |
88 | else if (allowed & BTRFS_BLOCK_GROUP_RAID10) | |
89 | allowed = BTRFS_BLOCK_GROUP_RAID10; | |
90 | else if (allowed & BTRFS_BLOCK_GROUP_RAID1) | |
91 | allowed = BTRFS_BLOCK_GROUP_RAID1; | |
92 | else if (allowed & BTRFS_BLOCK_GROUP_RAID0) | |
93 | allowed = BTRFS_BLOCK_GROUP_RAID0; | |
94 | ||
95 | flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK; | |
96 | ||
97 | return extended_to_chunk(flags | allowed); | |
98 | } | |
99 | ||
ef0a82da | 100 | u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags) |
878d7b67 JB |
101 | { |
102 | unsigned seq; | |
103 | u64 flags; | |
104 | ||
105 | do { | |
106 | flags = orig_flags; | |
107 | seq = read_seqbegin(&fs_info->profiles_lock); | |
108 | ||
109 | if (flags & BTRFS_BLOCK_GROUP_DATA) | |
110 | flags |= fs_info->avail_data_alloc_bits; | |
111 | else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) | |
112 | flags |= fs_info->avail_system_alloc_bits; | |
113 | else if (flags & BTRFS_BLOCK_GROUP_METADATA) | |
114 | flags |= fs_info->avail_metadata_alloc_bits; | |
115 | } while (read_seqretry(&fs_info->profiles_lock, seq)); | |
116 | ||
117 | return btrfs_reduce_alloc_profile(fs_info, flags); | |
118 | } | |
119 | ||
32da5386 | 120 | void btrfs_get_block_group(struct btrfs_block_group *cache) |
3cad1284 JB |
121 | { |
122 | atomic_inc(&cache->count); | |
123 | } | |
124 | ||
32da5386 | 125 | void btrfs_put_block_group(struct btrfs_block_group *cache) |
3cad1284 JB |
126 | { |
127 | if (atomic_dec_and_test(&cache->count)) { | |
128 | WARN_ON(cache->pinned > 0); | |
129 | WARN_ON(cache->reserved > 0); | |
130 | ||
b0643e59 DZ |
131 | /* |
132 | * A block_group shouldn't be on the discard_list anymore. | |
133 | * Remove the block_group from the discard_list to prevent us | |
134 | * from causing a panic due to NULL pointer dereference. | |
135 | */ | |
136 | if (WARN_ON(!list_empty(&cache->discard_list))) | |
137 | btrfs_discard_cancel_work(&cache->fs_info->discard_ctl, | |
138 | cache); | |
139 | ||
3cad1284 JB |
140 | /* |
141 | * If not empty, someone is still holding mutex of | |
142 | * full_stripe_lock, which can only be released by caller. | |
143 | * And it will definitely cause use-after-free when caller | |
144 | * tries to release full stripe lock. | |
145 | * | |
146 | * No better way to resolve, but only to warn. | |
147 | */ | |
148 | WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root)); | |
149 | kfree(cache->free_space_ctl); | |
150 | kfree(cache); | |
151 | } | |
152 | } | |
153 | ||
4358d963 JB |
154 | /* |
155 | * This adds the block group to the fs_info rb tree for the block group cache | |
156 | */ | |
157 | static int btrfs_add_block_group_cache(struct btrfs_fs_info *info, | |
32da5386 | 158 | struct btrfs_block_group *block_group) |
4358d963 JB |
159 | { |
160 | struct rb_node **p; | |
161 | struct rb_node *parent = NULL; | |
32da5386 | 162 | struct btrfs_block_group *cache; |
4358d963 JB |
163 | |
164 | spin_lock(&info->block_group_cache_lock); | |
165 | p = &info->block_group_cache_tree.rb_node; | |
166 | ||
167 | while (*p) { | |
168 | parent = *p; | |
32da5386 | 169 | cache = rb_entry(parent, struct btrfs_block_group, cache_node); |
b3470b5d | 170 | if (block_group->start < cache->start) { |
4358d963 | 171 | p = &(*p)->rb_left; |
b3470b5d | 172 | } else if (block_group->start > cache->start) { |
4358d963 JB |
173 | p = &(*p)->rb_right; |
174 | } else { | |
175 | spin_unlock(&info->block_group_cache_lock); | |
176 | return -EEXIST; | |
177 | } | |
178 | } | |
179 | ||
180 | rb_link_node(&block_group->cache_node, parent, p); | |
181 | rb_insert_color(&block_group->cache_node, | |
182 | &info->block_group_cache_tree); | |
183 | ||
b3470b5d DS |
184 | if (info->first_logical_byte > block_group->start) |
185 | info->first_logical_byte = block_group->start; | |
4358d963 JB |
186 | |
187 | spin_unlock(&info->block_group_cache_lock); | |
188 | ||
189 | return 0; | |
190 | } | |
191 | ||
2e405ad8 JB |
192 | /* |
193 | * This will return the block group at or after bytenr if contains is 0, else | |
194 | * it will return the block group that contains the bytenr | |
195 | */ | |
32da5386 | 196 | static struct btrfs_block_group *block_group_cache_tree_search( |
2e405ad8 JB |
197 | struct btrfs_fs_info *info, u64 bytenr, int contains) |
198 | { | |
32da5386 | 199 | struct btrfs_block_group *cache, *ret = NULL; |
2e405ad8 JB |
200 | struct rb_node *n; |
201 | u64 end, start; | |
202 | ||
203 | spin_lock(&info->block_group_cache_lock); | |
204 | n = info->block_group_cache_tree.rb_node; | |
205 | ||
206 | while (n) { | |
32da5386 | 207 | cache = rb_entry(n, struct btrfs_block_group, cache_node); |
b3470b5d DS |
208 | end = cache->start + cache->length - 1; |
209 | start = cache->start; | |
2e405ad8 JB |
210 | |
211 | if (bytenr < start) { | |
b3470b5d | 212 | if (!contains && (!ret || start < ret->start)) |
2e405ad8 JB |
213 | ret = cache; |
214 | n = n->rb_left; | |
215 | } else if (bytenr > start) { | |
216 | if (contains && bytenr <= end) { | |
217 | ret = cache; | |
218 | break; | |
219 | } | |
220 | n = n->rb_right; | |
221 | } else { | |
222 | ret = cache; | |
223 | break; | |
224 | } | |
225 | } | |
226 | if (ret) { | |
227 | btrfs_get_block_group(ret); | |
b3470b5d DS |
228 | if (bytenr == 0 && info->first_logical_byte > ret->start) |
229 | info->first_logical_byte = ret->start; | |
2e405ad8 JB |
230 | } |
231 | spin_unlock(&info->block_group_cache_lock); | |
232 | ||
233 | return ret; | |
234 | } | |
235 | ||
236 | /* | |
237 | * Return the block group that starts at or after bytenr | |
238 | */ | |
32da5386 | 239 | struct btrfs_block_group *btrfs_lookup_first_block_group( |
2e405ad8 JB |
240 | struct btrfs_fs_info *info, u64 bytenr) |
241 | { | |
242 | return block_group_cache_tree_search(info, bytenr, 0); | |
243 | } | |
244 | ||
245 | /* | |
246 | * Return the block group that contains the given bytenr | |
247 | */ | |
32da5386 | 248 | struct btrfs_block_group *btrfs_lookup_block_group( |
2e405ad8 JB |
249 | struct btrfs_fs_info *info, u64 bytenr) |
250 | { | |
251 | return block_group_cache_tree_search(info, bytenr, 1); | |
252 | } | |
253 | ||
32da5386 DS |
254 | struct btrfs_block_group *btrfs_next_block_group( |
255 | struct btrfs_block_group *cache) | |
2e405ad8 JB |
256 | { |
257 | struct btrfs_fs_info *fs_info = cache->fs_info; | |
258 | struct rb_node *node; | |
259 | ||
260 | spin_lock(&fs_info->block_group_cache_lock); | |
261 | ||
262 | /* If our block group was removed, we need a full search. */ | |
263 | if (RB_EMPTY_NODE(&cache->cache_node)) { | |
b3470b5d | 264 | const u64 next_bytenr = cache->start + cache->length; |
2e405ad8 JB |
265 | |
266 | spin_unlock(&fs_info->block_group_cache_lock); | |
267 | btrfs_put_block_group(cache); | |
268 | cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache; | |
269 | } | |
270 | node = rb_next(&cache->cache_node); | |
271 | btrfs_put_block_group(cache); | |
272 | if (node) { | |
32da5386 | 273 | cache = rb_entry(node, struct btrfs_block_group, cache_node); |
2e405ad8 JB |
274 | btrfs_get_block_group(cache); |
275 | } else | |
276 | cache = NULL; | |
277 | spin_unlock(&fs_info->block_group_cache_lock); | |
278 | return cache; | |
279 | } | |
3eeb3226 JB |
280 | |
281 | bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) | |
282 | { | |
32da5386 | 283 | struct btrfs_block_group *bg; |
3eeb3226 JB |
284 | bool ret = true; |
285 | ||
286 | bg = btrfs_lookup_block_group(fs_info, bytenr); | |
287 | if (!bg) | |
288 | return false; | |
289 | ||
290 | spin_lock(&bg->lock); | |
291 | if (bg->ro) | |
292 | ret = false; | |
293 | else | |
294 | atomic_inc(&bg->nocow_writers); | |
295 | spin_unlock(&bg->lock); | |
296 | ||
297 | /* No put on block group, done by btrfs_dec_nocow_writers */ | |
298 | if (!ret) | |
299 | btrfs_put_block_group(bg); | |
300 | ||
301 | return ret; | |
302 | } | |
303 | ||
304 | void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) | |
305 | { | |
32da5386 | 306 | struct btrfs_block_group *bg; |
3eeb3226 JB |
307 | |
308 | bg = btrfs_lookup_block_group(fs_info, bytenr); | |
309 | ASSERT(bg); | |
310 | if (atomic_dec_and_test(&bg->nocow_writers)) | |
311 | wake_up_var(&bg->nocow_writers); | |
312 | /* | |
313 | * Once for our lookup and once for the lookup done by a previous call | |
314 | * to btrfs_inc_nocow_writers() | |
315 | */ | |
316 | btrfs_put_block_group(bg); | |
317 | btrfs_put_block_group(bg); | |
318 | } | |
319 | ||
32da5386 | 320 | void btrfs_wait_nocow_writers(struct btrfs_block_group *bg) |
3eeb3226 JB |
321 | { |
322 | wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); | |
323 | } | |
324 | ||
325 | void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, | |
326 | const u64 start) | |
327 | { | |
32da5386 | 328 | struct btrfs_block_group *bg; |
3eeb3226 JB |
329 | |
330 | bg = btrfs_lookup_block_group(fs_info, start); | |
331 | ASSERT(bg); | |
332 | if (atomic_dec_and_test(&bg->reservations)) | |
333 | wake_up_var(&bg->reservations); | |
334 | btrfs_put_block_group(bg); | |
335 | } | |
336 | ||
32da5386 | 337 | void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg) |
3eeb3226 JB |
338 | { |
339 | struct btrfs_space_info *space_info = bg->space_info; | |
340 | ||
341 | ASSERT(bg->ro); | |
342 | ||
343 | if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA)) | |
344 | return; | |
345 | ||
346 | /* | |
347 | * Our block group is read only but before we set it to read only, | |
348 | * some task might have had allocated an extent from it already, but it | |
349 | * has not yet created a respective ordered extent (and added it to a | |
350 | * root's list of ordered extents). | |
351 | * Therefore wait for any task currently allocating extents, since the | |
352 | * block group's reservations counter is incremented while a read lock | |
353 | * on the groups' semaphore is held and decremented after releasing | |
354 | * the read access on that semaphore and creating the ordered extent. | |
355 | */ | |
356 | down_write(&space_info->groups_sem); | |
357 | up_write(&space_info->groups_sem); | |
358 | ||
359 | wait_var_event(&bg->reservations, !atomic_read(&bg->reservations)); | |
360 | } | |
9f21246d JB |
361 | |
362 | struct btrfs_caching_control *btrfs_get_caching_control( | |
32da5386 | 363 | struct btrfs_block_group *cache) |
9f21246d JB |
364 | { |
365 | struct btrfs_caching_control *ctl; | |
366 | ||
367 | spin_lock(&cache->lock); | |
368 | if (!cache->caching_ctl) { | |
369 | spin_unlock(&cache->lock); | |
370 | return NULL; | |
371 | } | |
372 | ||
373 | ctl = cache->caching_ctl; | |
374 | refcount_inc(&ctl->count); | |
375 | spin_unlock(&cache->lock); | |
376 | return ctl; | |
377 | } | |
378 | ||
379 | void btrfs_put_caching_control(struct btrfs_caching_control *ctl) | |
380 | { | |
381 | if (refcount_dec_and_test(&ctl->count)) | |
382 | kfree(ctl); | |
383 | } | |
384 | ||
385 | /* | |
386 | * When we wait for progress in the block group caching, its because our | |
387 | * allocation attempt failed at least once. So, we must sleep and let some | |
388 | * progress happen before we try again. | |
389 | * | |
390 | * This function will sleep at least once waiting for new free space to show | |
391 | * up, and then it will check the block group free space numbers for our min | |
392 | * num_bytes. Another option is to have it go ahead and look in the rbtree for | |
393 | * a free extent of a given size, but this is a good start. | |
394 | * | |
395 | * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using | |
396 | * any of the information in this block group. | |
397 | */ | |
32da5386 | 398 | void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache, |
9f21246d JB |
399 | u64 num_bytes) |
400 | { | |
401 | struct btrfs_caching_control *caching_ctl; | |
402 | ||
403 | caching_ctl = btrfs_get_caching_control(cache); | |
404 | if (!caching_ctl) | |
405 | return; | |
406 | ||
32da5386 | 407 | wait_event(caching_ctl->wait, btrfs_block_group_done(cache) || |
9f21246d JB |
408 | (cache->free_space_ctl->free_space >= num_bytes)); |
409 | ||
410 | btrfs_put_caching_control(caching_ctl); | |
411 | } | |
412 | ||
32da5386 | 413 | int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache) |
9f21246d JB |
414 | { |
415 | struct btrfs_caching_control *caching_ctl; | |
416 | int ret = 0; | |
417 | ||
418 | caching_ctl = btrfs_get_caching_control(cache); | |
419 | if (!caching_ctl) | |
420 | return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; | |
421 | ||
32da5386 | 422 | wait_event(caching_ctl->wait, btrfs_block_group_done(cache)); |
9f21246d JB |
423 | if (cache->cached == BTRFS_CACHE_ERROR) |
424 | ret = -EIO; | |
425 | btrfs_put_caching_control(caching_ctl); | |
426 | return ret; | |
427 | } | |
428 | ||
429 | #ifdef CONFIG_BTRFS_DEBUG | |
32da5386 | 430 | static void fragment_free_space(struct btrfs_block_group *block_group) |
9f21246d JB |
431 | { |
432 | struct btrfs_fs_info *fs_info = block_group->fs_info; | |
b3470b5d DS |
433 | u64 start = block_group->start; |
434 | u64 len = block_group->length; | |
9f21246d JB |
435 | u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ? |
436 | fs_info->nodesize : fs_info->sectorsize; | |
437 | u64 step = chunk << 1; | |
438 | ||
439 | while (len > chunk) { | |
440 | btrfs_remove_free_space(block_group, start, chunk); | |
441 | start += step; | |
442 | if (len < step) | |
443 | len = 0; | |
444 | else | |
445 | len -= step; | |
446 | } | |
447 | } | |
448 | #endif | |
449 | ||
450 | /* | |
451 | * This is only called by btrfs_cache_block_group, since we could have freed | |
452 | * extents we need to check the pinned_extents for any extents that can't be | |
453 | * used yet since their free space will be released as soon as the transaction | |
454 | * commits. | |
455 | */ | |
32da5386 | 456 | u64 add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end) |
9f21246d JB |
457 | { |
458 | struct btrfs_fs_info *info = block_group->fs_info; | |
459 | u64 extent_start, extent_end, size, total_added = 0; | |
460 | int ret; | |
461 | ||
462 | while (start < end) { | |
fe119a6e | 463 | ret = find_first_extent_bit(&info->excluded_extents, start, |
9f21246d JB |
464 | &extent_start, &extent_end, |
465 | EXTENT_DIRTY | EXTENT_UPTODATE, | |
466 | NULL); | |
467 | if (ret) | |
468 | break; | |
469 | ||
470 | if (extent_start <= start) { | |
471 | start = extent_end + 1; | |
472 | } else if (extent_start > start && extent_start < end) { | |
473 | size = extent_start - start; | |
474 | total_added += size; | |
b0643e59 DZ |
475 | ret = btrfs_add_free_space_async_trimmed(block_group, |
476 | start, size); | |
9f21246d JB |
477 | BUG_ON(ret); /* -ENOMEM or logic error */ |
478 | start = extent_end + 1; | |
479 | } else { | |
480 | break; | |
481 | } | |
482 | } | |
483 | ||
484 | if (start < end) { | |
485 | size = end - start; | |
486 | total_added += size; | |
b0643e59 DZ |
487 | ret = btrfs_add_free_space_async_trimmed(block_group, start, |
488 | size); | |
9f21246d JB |
489 | BUG_ON(ret); /* -ENOMEM or logic error */ |
490 | } | |
491 | ||
492 | return total_added; | |
493 | } | |
494 | ||
495 | static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) | |
496 | { | |
32da5386 | 497 | struct btrfs_block_group *block_group = caching_ctl->block_group; |
9f21246d JB |
498 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
499 | struct btrfs_root *extent_root = fs_info->extent_root; | |
500 | struct btrfs_path *path; | |
501 | struct extent_buffer *leaf; | |
502 | struct btrfs_key key; | |
503 | u64 total_found = 0; | |
504 | u64 last = 0; | |
505 | u32 nritems; | |
506 | int ret; | |
507 | bool wakeup = true; | |
508 | ||
509 | path = btrfs_alloc_path(); | |
510 | if (!path) | |
511 | return -ENOMEM; | |
512 | ||
b3470b5d | 513 | last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET); |
9f21246d JB |
514 | |
515 | #ifdef CONFIG_BTRFS_DEBUG | |
516 | /* | |
517 | * If we're fragmenting we don't want to make anybody think we can | |
518 | * allocate from this block group until we've had a chance to fragment | |
519 | * the free space. | |
520 | */ | |
521 | if (btrfs_should_fragment_free_space(block_group)) | |
522 | wakeup = false; | |
523 | #endif | |
524 | /* | |
525 | * We don't want to deadlock with somebody trying to allocate a new | |
526 | * extent for the extent root while also trying to search the extent | |
527 | * root to add free space. So we skip locking and search the commit | |
528 | * root, since its read-only | |
529 | */ | |
530 | path->skip_locking = 1; | |
531 | path->search_commit_root = 1; | |
532 | path->reada = READA_FORWARD; | |
533 | ||
534 | key.objectid = last; | |
535 | key.offset = 0; | |
536 | key.type = BTRFS_EXTENT_ITEM_KEY; | |
537 | ||
538 | next: | |
539 | ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); | |
540 | if (ret < 0) | |
541 | goto out; | |
542 | ||
543 | leaf = path->nodes[0]; | |
544 | nritems = btrfs_header_nritems(leaf); | |
545 | ||
546 | while (1) { | |
547 | if (btrfs_fs_closing(fs_info) > 1) { | |
548 | last = (u64)-1; | |
549 | break; | |
550 | } | |
551 | ||
552 | if (path->slots[0] < nritems) { | |
553 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | |
554 | } else { | |
555 | ret = btrfs_find_next_key(extent_root, path, &key, 0, 0); | |
556 | if (ret) | |
557 | break; | |
558 | ||
559 | if (need_resched() || | |
560 | rwsem_is_contended(&fs_info->commit_root_sem)) { | |
561 | if (wakeup) | |
562 | caching_ctl->progress = last; | |
563 | btrfs_release_path(path); | |
564 | up_read(&fs_info->commit_root_sem); | |
565 | mutex_unlock(&caching_ctl->mutex); | |
566 | cond_resched(); | |
567 | mutex_lock(&caching_ctl->mutex); | |
568 | down_read(&fs_info->commit_root_sem); | |
569 | goto next; | |
570 | } | |
571 | ||
572 | ret = btrfs_next_leaf(extent_root, path); | |
573 | if (ret < 0) | |
574 | goto out; | |
575 | if (ret) | |
576 | break; | |
577 | leaf = path->nodes[0]; | |
578 | nritems = btrfs_header_nritems(leaf); | |
579 | continue; | |
580 | } | |
581 | ||
582 | if (key.objectid < last) { | |
583 | key.objectid = last; | |
584 | key.offset = 0; | |
585 | key.type = BTRFS_EXTENT_ITEM_KEY; | |
586 | ||
587 | if (wakeup) | |
588 | caching_ctl->progress = last; | |
589 | btrfs_release_path(path); | |
590 | goto next; | |
591 | } | |
592 | ||
b3470b5d | 593 | if (key.objectid < block_group->start) { |
9f21246d JB |
594 | path->slots[0]++; |
595 | continue; | |
596 | } | |
597 | ||
b3470b5d | 598 | if (key.objectid >= block_group->start + block_group->length) |
9f21246d JB |
599 | break; |
600 | ||
601 | if (key.type == BTRFS_EXTENT_ITEM_KEY || | |
602 | key.type == BTRFS_METADATA_ITEM_KEY) { | |
603 | total_found += add_new_free_space(block_group, last, | |
604 | key.objectid); | |
605 | if (key.type == BTRFS_METADATA_ITEM_KEY) | |
606 | last = key.objectid + | |
607 | fs_info->nodesize; | |
608 | else | |
609 | last = key.objectid + key.offset; | |
610 | ||
611 | if (total_found > CACHING_CTL_WAKE_UP) { | |
612 | total_found = 0; | |
613 | if (wakeup) | |
614 | wake_up(&caching_ctl->wait); | |
615 | } | |
616 | } | |
617 | path->slots[0]++; | |
618 | } | |
619 | ret = 0; | |
620 | ||
621 | total_found += add_new_free_space(block_group, last, | |
b3470b5d | 622 | block_group->start + block_group->length); |
9f21246d JB |
623 | caching_ctl->progress = (u64)-1; |
624 | ||
625 | out: | |
626 | btrfs_free_path(path); | |
627 | return ret; | |
628 | } | |
629 | ||
630 | static noinline void caching_thread(struct btrfs_work *work) | |
631 | { | |
32da5386 | 632 | struct btrfs_block_group *block_group; |
9f21246d JB |
633 | struct btrfs_fs_info *fs_info; |
634 | struct btrfs_caching_control *caching_ctl; | |
635 | int ret; | |
636 | ||
637 | caching_ctl = container_of(work, struct btrfs_caching_control, work); | |
638 | block_group = caching_ctl->block_group; | |
639 | fs_info = block_group->fs_info; | |
640 | ||
641 | mutex_lock(&caching_ctl->mutex); | |
642 | down_read(&fs_info->commit_root_sem); | |
643 | ||
644 | if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) | |
645 | ret = load_free_space_tree(caching_ctl); | |
646 | else | |
647 | ret = load_extent_tree_free(caching_ctl); | |
648 | ||
649 | spin_lock(&block_group->lock); | |
650 | block_group->caching_ctl = NULL; | |
651 | block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED; | |
652 | spin_unlock(&block_group->lock); | |
653 | ||
654 | #ifdef CONFIG_BTRFS_DEBUG | |
655 | if (btrfs_should_fragment_free_space(block_group)) { | |
656 | u64 bytes_used; | |
657 | ||
658 | spin_lock(&block_group->space_info->lock); | |
659 | spin_lock(&block_group->lock); | |
b3470b5d | 660 | bytes_used = block_group->length - block_group->used; |
9f21246d JB |
661 | block_group->space_info->bytes_used += bytes_used >> 1; |
662 | spin_unlock(&block_group->lock); | |
663 | spin_unlock(&block_group->space_info->lock); | |
e11c0406 | 664 | fragment_free_space(block_group); |
9f21246d JB |
665 | } |
666 | #endif | |
667 | ||
668 | caching_ctl->progress = (u64)-1; | |
669 | ||
670 | up_read(&fs_info->commit_root_sem); | |
671 | btrfs_free_excluded_extents(block_group); | |
672 | mutex_unlock(&caching_ctl->mutex); | |
673 | ||
674 | wake_up(&caching_ctl->wait); | |
675 | ||
676 | btrfs_put_caching_control(caching_ctl); | |
677 | btrfs_put_block_group(block_group); | |
678 | } | |
679 | ||
32da5386 | 680 | int btrfs_cache_block_group(struct btrfs_block_group *cache, int load_cache_only) |
9f21246d JB |
681 | { |
682 | DEFINE_WAIT(wait); | |
683 | struct btrfs_fs_info *fs_info = cache->fs_info; | |
684 | struct btrfs_caching_control *caching_ctl; | |
685 | int ret = 0; | |
686 | ||
687 | caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS); | |
688 | if (!caching_ctl) | |
689 | return -ENOMEM; | |
690 | ||
691 | INIT_LIST_HEAD(&caching_ctl->list); | |
692 | mutex_init(&caching_ctl->mutex); | |
693 | init_waitqueue_head(&caching_ctl->wait); | |
694 | caching_ctl->block_group = cache; | |
b3470b5d | 695 | caching_ctl->progress = cache->start; |
9f21246d | 696 | refcount_set(&caching_ctl->count, 1); |
a0cac0ec | 697 | btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL); |
9f21246d JB |
698 | |
699 | spin_lock(&cache->lock); | |
700 | /* | |
701 | * This should be a rare occasion, but this could happen I think in the | |
702 | * case where one thread starts to load the space cache info, and then | |
703 | * some other thread starts a transaction commit which tries to do an | |
704 | * allocation while the other thread is still loading the space cache | |
705 | * info. The previous loop should have kept us from choosing this block | |
706 | * group, but if we've moved to the state where we will wait on caching | |
707 | * block groups we need to first check if we're doing a fast load here, | |
708 | * so we can wait for it to finish, otherwise we could end up allocating | |
709 | * from a block group who's cache gets evicted for one reason or | |
710 | * another. | |
711 | */ | |
712 | while (cache->cached == BTRFS_CACHE_FAST) { | |
713 | struct btrfs_caching_control *ctl; | |
714 | ||
715 | ctl = cache->caching_ctl; | |
716 | refcount_inc(&ctl->count); | |
717 | prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE); | |
718 | spin_unlock(&cache->lock); | |
719 | ||
720 | schedule(); | |
721 | ||
722 | finish_wait(&ctl->wait, &wait); | |
723 | btrfs_put_caching_control(ctl); | |
724 | spin_lock(&cache->lock); | |
725 | } | |
726 | ||
727 | if (cache->cached != BTRFS_CACHE_NO) { | |
728 | spin_unlock(&cache->lock); | |
729 | kfree(caching_ctl); | |
730 | return 0; | |
731 | } | |
732 | WARN_ON(cache->caching_ctl); | |
733 | cache->caching_ctl = caching_ctl; | |
734 | cache->cached = BTRFS_CACHE_FAST; | |
735 | spin_unlock(&cache->lock); | |
736 | ||
737 | if (btrfs_test_opt(fs_info, SPACE_CACHE)) { | |
738 | mutex_lock(&caching_ctl->mutex); | |
739 | ret = load_free_space_cache(cache); | |
740 | ||
741 | spin_lock(&cache->lock); | |
742 | if (ret == 1) { | |
743 | cache->caching_ctl = NULL; | |
744 | cache->cached = BTRFS_CACHE_FINISHED; | |
745 | cache->last_byte_to_unpin = (u64)-1; | |
746 | caching_ctl->progress = (u64)-1; | |
747 | } else { | |
748 | if (load_cache_only) { | |
749 | cache->caching_ctl = NULL; | |
750 | cache->cached = BTRFS_CACHE_NO; | |
751 | } else { | |
752 | cache->cached = BTRFS_CACHE_STARTED; | |
753 | cache->has_caching_ctl = 1; | |
754 | } | |
755 | } | |
756 | spin_unlock(&cache->lock); | |
757 | #ifdef CONFIG_BTRFS_DEBUG | |
758 | if (ret == 1 && | |
759 | btrfs_should_fragment_free_space(cache)) { | |
760 | u64 bytes_used; | |
761 | ||
762 | spin_lock(&cache->space_info->lock); | |
763 | spin_lock(&cache->lock); | |
b3470b5d | 764 | bytes_used = cache->length - cache->used; |
9f21246d JB |
765 | cache->space_info->bytes_used += bytes_used >> 1; |
766 | spin_unlock(&cache->lock); | |
767 | spin_unlock(&cache->space_info->lock); | |
e11c0406 | 768 | fragment_free_space(cache); |
9f21246d JB |
769 | } |
770 | #endif | |
771 | mutex_unlock(&caching_ctl->mutex); | |
772 | ||
773 | wake_up(&caching_ctl->wait); | |
774 | if (ret == 1) { | |
775 | btrfs_put_caching_control(caching_ctl); | |
776 | btrfs_free_excluded_extents(cache); | |
777 | return 0; | |
778 | } | |
779 | } else { | |
780 | /* | |
781 | * We're either using the free space tree or no caching at all. | |
782 | * Set cached to the appropriate value and wakeup any waiters. | |
783 | */ | |
784 | spin_lock(&cache->lock); | |
785 | if (load_cache_only) { | |
786 | cache->caching_ctl = NULL; | |
787 | cache->cached = BTRFS_CACHE_NO; | |
788 | } else { | |
789 | cache->cached = BTRFS_CACHE_STARTED; | |
790 | cache->has_caching_ctl = 1; | |
791 | } | |
792 | spin_unlock(&cache->lock); | |
793 | wake_up(&caching_ctl->wait); | |
794 | } | |
795 | ||
796 | if (load_cache_only) { | |
797 | btrfs_put_caching_control(caching_ctl); | |
798 | return 0; | |
799 | } | |
800 | ||
801 | down_write(&fs_info->commit_root_sem); | |
802 | refcount_inc(&caching_ctl->count); | |
803 | list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups); | |
804 | up_write(&fs_info->commit_root_sem); | |
805 | ||
806 | btrfs_get_block_group(cache); | |
807 | ||
808 | btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work); | |
809 | ||
810 | return ret; | |
811 | } | |
e3e0520b JB |
812 | |
813 | static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) | |
814 | { | |
815 | u64 extra_flags = chunk_to_extended(flags) & | |
816 | BTRFS_EXTENDED_PROFILE_MASK; | |
817 | ||
818 | write_seqlock(&fs_info->profiles_lock); | |
819 | if (flags & BTRFS_BLOCK_GROUP_DATA) | |
820 | fs_info->avail_data_alloc_bits &= ~extra_flags; | |
821 | if (flags & BTRFS_BLOCK_GROUP_METADATA) | |
822 | fs_info->avail_metadata_alloc_bits &= ~extra_flags; | |
823 | if (flags & BTRFS_BLOCK_GROUP_SYSTEM) | |
824 | fs_info->avail_system_alloc_bits &= ~extra_flags; | |
825 | write_sequnlock(&fs_info->profiles_lock); | |
826 | } | |
827 | ||
828 | /* | |
829 | * Clear incompat bits for the following feature(s): | |
830 | * | |
831 | * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group | |
832 | * in the whole filesystem | |
9c907446 DS |
833 | * |
834 | * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups | |
e3e0520b JB |
835 | */ |
836 | static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags) | |
837 | { | |
9c907446 DS |
838 | bool found_raid56 = false; |
839 | bool found_raid1c34 = false; | |
840 | ||
841 | if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) || | |
842 | (flags & BTRFS_BLOCK_GROUP_RAID1C3) || | |
843 | (flags & BTRFS_BLOCK_GROUP_RAID1C4)) { | |
e3e0520b JB |
844 | struct list_head *head = &fs_info->space_info; |
845 | struct btrfs_space_info *sinfo; | |
846 | ||
847 | list_for_each_entry_rcu(sinfo, head, list) { | |
e3e0520b JB |
848 | down_read(&sinfo->groups_sem); |
849 | if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5])) | |
9c907446 | 850 | found_raid56 = true; |
e3e0520b | 851 | if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6])) |
9c907446 DS |
852 | found_raid56 = true; |
853 | if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3])) | |
854 | found_raid1c34 = true; | |
855 | if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4])) | |
856 | found_raid1c34 = true; | |
e3e0520b | 857 | up_read(&sinfo->groups_sem); |
e3e0520b | 858 | } |
d8e6fd5c | 859 | if (!found_raid56) |
9c907446 | 860 | btrfs_clear_fs_incompat(fs_info, RAID56); |
d8e6fd5c | 861 | if (!found_raid1c34) |
9c907446 | 862 | btrfs_clear_fs_incompat(fs_info, RAID1C34); |
e3e0520b JB |
863 | } |
864 | } | |
865 | ||
866 | int btrfs_remove_block_group(struct btrfs_trans_handle *trans, | |
867 | u64 group_start, struct extent_map *em) | |
868 | { | |
869 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
870 | struct btrfs_root *root = fs_info->extent_root; | |
871 | struct btrfs_path *path; | |
32da5386 | 872 | struct btrfs_block_group *block_group; |
e3e0520b JB |
873 | struct btrfs_free_cluster *cluster; |
874 | struct btrfs_root *tree_root = fs_info->tree_root; | |
875 | struct btrfs_key key; | |
876 | struct inode *inode; | |
877 | struct kobject *kobj = NULL; | |
878 | int ret; | |
879 | int index; | |
880 | int factor; | |
881 | struct btrfs_caching_control *caching_ctl = NULL; | |
882 | bool remove_em; | |
883 | bool remove_rsv = false; | |
884 | ||
885 | block_group = btrfs_lookup_block_group(fs_info, group_start); | |
886 | BUG_ON(!block_group); | |
887 | BUG_ON(!block_group->ro); | |
888 | ||
889 | trace_btrfs_remove_block_group(block_group); | |
890 | /* | |
891 | * Free the reserved super bytes from this block group before | |
892 | * remove it. | |
893 | */ | |
894 | btrfs_free_excluded_extents(block_group); | |
b3470b5d DS |
895 | btrfs_free_ref_tree_range(fs_info, block_group->start, |
896 | block_group->length); | |
e3e0520b | 897 | |
e3e0520b JB |
898 | index = btrfs_bg_flags_to_raid_index(block_group->flags); |
899 | factor = btrfs_bg_type_to_factor(block_group->flags); | |
900 | ||
901 | /* make sure this block group isn't part of an allocation cluster */ | |
902 | cluster = &fs_info->data_alloc_cluster; | |
903 | spin_lock(&cluster->refill_lock); | |
904 | btrfs_return_cluster_to_free_space(block_group, cluster); | |
905 | spin_unlock(&cluster->refill_lock); | |
906 | ||
907 | /* | |
908 | * make sure this block group isn't part of a metadata | |
909 | * allocation cluster | |
910 | */ | |
911 | cluster = &fs_info->meta_alloc_cluster; | |
912 | spin_lock(&cluster->refill_lock); | |
913 | btrfs_return_cluster_to_free_space(block_group, cluster); | |
914 | spin_unlock(&cluster->refill_lock); | |
915 | ||
916 | path = btrfs_alloc_path(); | |
917 | if (!path) { | |
918 | ret = -ENOMEM; | |
919 | goto out; | |
920 | } | |
921 | ||
922 | /* | |
923 | * get the inode first so any iput calls done for the io_list | |
924 | * aren't the final iput (no unlinks allowed now) | |
925 | */ | |
926 | inode = lookup_free_space_inode(block_group, path); | |
927 | ||
928 | mutex_lock(&trans->transaction->cache_write_mutex); | |
929 | /* | |
930 | * Make sure our free space cache IO is done before removing the | |
931 | * free space inode | |
932 | */ | |
933 | spin_lock(&trans->transaction->dirty_bgs_lock); | |
934 | if (!list_empty(&block_group->io_list)) { | |
935 | list_del_init(&block_group->io_list); | |
936 | ||
937 | WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode); | |
938 | ||
939 | spin_unlock(&trans->transaction->dirty_bgs_lock); | |
940 | btrfs_wait_cache_io(trans, block_group, path); | |
941 | btrfs_put_block_group(block_group); | |
942 | spin_lock(&trans->transaction->dirty_bgs_lock); | |
943 | } | |
944 | ||
945 | if (!list_empty(&block_group->dirty_list)) { | |
946 | list_del_init(&block_group->dirty_list); | |
947 | remove_rsv = true; | |
948 | btrfs_put_block_group(block_group); | |
949 | } | |
950 | spin_unlock(&trans->transaction->dirty_bgs_lock); | |
951 | mutex_unlock(&trans->transaction->cache_write_mutex); | |
952 | ||
953 | if (!IS_ERR(inode)) { | |
954 | ret = btrfs_orphan_add(trans, BTRFS_I(inode)); | |
955 | if (ret) { | |
956 | btrfs_add_delayed_iput(inode); | |
957 | goto out; | |
958 | } | |
959 | clear_nlink(inode); | |
960 | /* One for the block groups ref */ | |
961 | spin_lock(&block_group->lock); | |
962 | if (block_group->iref) { | |
963 | block_group->iref = 0; | |
964 | block_group->inode = NULL; | |
965 | spin_unlock(&block_group->lock); | |
966 | iput(inode); | |
967 | } else { | |
968 | spin_unlock(&block_group->lock); | |
969 | } | |
970 | /* One for our lookup ref */ | |
971 | btrfs_add_delayed_iput(inode); | |
972 | } | |
973 | ||
974 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | |
e3e0520b | 975 | key.type = 0; |
b3470b5d | 976 | key.offset = block_group->start; |
e3e0520b JB |
977 | |
978 | ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); | |
979 | if (ret < 0) | |
980 | goto out; | |
981 | if (ret > 0) | |
982 | btrfs_release_path(path); | |
983 | if (ret == 0) { | |
984 | ret = btrfs_del_item(trans, tree_root, path); | |
985 | if (ret) | |
986 | goto out; | |
987 | btrfs_release_path(path); | |
988 | } | |
989 | ||
990 | spin_lock(&fs_info->block_group_cache_lock); | |
991 | rb_erase(&block_group->cache_node, | |
992 | &fs_info->block_group_cache_tree); | |
993 | RB_CLEAR_NODE(&block_group->cache_node); | |
994 | ||
b3470b5d | 995 | if (fs_info->first_logical_byte == block_group->start) |
e3e0520b JB |
996 | fs_info->first_logical_byte = (u64)-1; |
997 | spin_unlock(&fs_info->block_group_cache_lock); | |
998 | ||
999 | down_write(&block_group->space_info->groups_sem); | |
1000 | /* | |
1001 | * we must use list_del_init so people can check to see if they | |
1002 | * are still on the list after taking the semaphore | |
1003 | */ | |
1004 | list_del_init(&block_group->list); | |
1005 | if (list_empty(&block_group->space_info->block_groups[index])) { | |
1006 | kobj = block_group->space_info->block_group_kobjs[index]; | |
1007 | block_group->space_info->block_group_kobjs[index] = NULL; | |
1008 | clear_avail_alloc_bits(fs_info, block_group->flags); | |
1009 | } | |
1010 | up_write(&block_group->space_info->groups_sem); | |
1011 | clear_incompat_bg_bits(fs_info, block_group->flags); | |
1012 | if (kobj) { | |
1013 | kobject_del(kobj); | |
1014 | kobject_put(kobj); | |
1015 | } | |
1016 | ||
1017 | if (block_group->has_caching_ctl) | |
1018 | caching_ctl = btrfs_get_caching_control(block_group); | |
1019 | if (block_group->cached == BTRFS_CACHE_STARTED) | |
1020 | btrfs_wait_block_group_cache_done(block_group); | |
1021 | if (block_group->has_caching_ctl) { | |
1022 | down_write(&fs_info->commit_root_sem); | |
1023 | if (!caching_ctl) { | |
1024 | struct btrfs_caching_control *ctl; | |
1025 | ||
1026 | list_for_each_entry(ctl, | |
1027 | &fs_info->caching_block_groups, list) | |
1028 | if (ctl->block_group == block_group) { | |
1029 | caching_ctl = ctl; | |
1030 | refcount_inc(&caching_ctl->count); | |
1031 | break; | |
1032 | } | |
1033 | } | |
1034 | if (caching_ctl) | |
1035 | list_del_init(&caching_ctl->list); | |
1036 | up_write(&fs_info->commit_root_sem); | |
1037 | if (caching_ctl) { | |
1038 | /* Once for the caching bgs list and once for us. */ | |
1039 | btrfs_put_caching_control(caching_ctl); | |
1040 | btrfs_put_caching_control(caching_ctl); | |
1041 | } | |
1042 | } | |
1043 | ||
1044 | spin_lock(&trans->transaction->dirty_bgs_lock); | |
1045 | WARN_ON(!list_empty(&block_group->dirty_list)); | |
1046 | WARN_ON(!list_empty(&block_group->io_list)); | |
1047 | spin_unlock(&trans->transaction->dirty_bgs_lock); | |
1048 | ||
1049 | btrfs_remove_free_space_cache(block_group); | |
1050 | ||
1051 | spin_lock(&block_group->space_info->lock); | |
1052 | list_del_init(&block_group->ro_list); | |
1053 | ||
1054 | if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { | |
1055 | WARN_ON(block_group->space_info->total_bytes | |
b3470b5d | 1056 | < block_group->length); |
e3e0520b | 1057 | WARN_ON(block_group->space_info->bytes_readonly |
b3470b5d | 1058 | < block_group->length); |
e3e0520b | 1059 | WARN_ON(block_group->space_info->disk_total |
b3470b5d | 1060 | < block_group->length * factor); |
e3e0520b | 1061 | } |
b3470b5d DS |
1062 | block_group->space_info->total_bytes -= block_group->length; |
1063 | block_group->space_info->bytes_readonly -= block_group->length; | |
1064 | block_group->space_info->disk_total -= block_group->length * factor; | |
e3e0520b JB |
1065 | |
1066 | spin_unlock(&block_group->space_info->lock); | |
1067 | ||
b3470b5d DS |
1068 | key.objectid = block_group->start; |
1069 | key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; | |
1070 | key.offset = block_group->length; | |
e3e0520b JB |
1071 | |
1072 | mutex_lock(&fs_info->chunk_mutex); | |
1073 | spin_lock(&block_group->lock); | |
1074 | block_group->removed = 1; | |
1075 | /* | |
1076 | * At this point trimming can't start on this block group, because we | |
1077 | * removed the block group from the tree fs_info->block_group_cache_tree | |
1078 | * so no one can't find it anymore and even if someone already got this | |
1079 | * block group before we removed it from the rbtree, they have already | |
1080 | * incremented block_group->trimming - if they didn't, they won't find | |
1081 | * any free space entries because we already removed them all when we | |
1082 | * called btrfs_remove_free_space_cache(). | |
1083 | * | |
1084 | * And we must not remove the extent map from the fs_info->mapping_tree | |
1085 | * to prevent the same logical address range and physical device space | |
1086 | * ranges from being reused for a new block group. This is because our | |
1087 | * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is | |
1088 | * completely transactionless, so while it is trimming a range the | |
1089 | * currently running transaction might finish and a new one start, | |
1090 | * allowing for new block groups to be created that can reuse the same | |
1091 | * physical device locations unless we take this special care. | |
1092 | * | |
1093 | * There may also be an implicit trim operation if the file system | |
1094 | * is mounted with -odiscard. The same protections must remain | |
1095 | * in place until the extents have been discarded completely when | |
1096 | * the transaction commit has completed. | |
1097 | */ | |
1098 | remove_em = (atomic_read(&block_group->trimming) == 0); | |
1099 | spin_unlock(&block_group->lock); | |
1100 | ||
1101 | mutex_unlock(&fs_info->chunk_mutex); | |
1102 | ||
1103 | ret = remove_block_group_free_space(trans, block_group); | |
1104 | if (ret) | |
1105 | goto out; | |
1106 | ||
1107 | btrfs_put_block_group(block_group); | |
1108 | btrfs_put_block_group(block_group); | |
1109 | ||
1110 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | |
1111 | if (ret > 0) | |
1112 | ret = -EIO; | |
1113 | if (ret < 0) | |
1114 | goto out; | |
1115 | ||
1116 | ret = btrfs_del_item(trans, root, path); | |
1117 | if (ret) | |
1118 | goto out; | |
1119 | ||
1120 | if (remove_em) { | |
1121 | struct extent_map_tree *em_tree; | |
1122 | ||
1123 | em_tree = &fs_info->mapping_tree; | |
1124 | write_lock(&em_tree->lock); | |
1125 | remove_extent_mapping(em_tree, em); | |
1126 | write_unlock(&em_tree->lock); | |
1127 | /* once for the tree */ | |
1128 | free_extent_map(em); | |
1129 | } | |
1130 | out: | |
1131 | if (remove_rsv) | |
1132 | btrfs_delayed_refs_rsv_release(fs_info, 1); | |
1133 | btrfs_free_path(path); | |
1134 | return ret; | |
1135 | } | |
1136 | ||
1137 | struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( | |
1138 | struct btrfs_fs_info *fs_info, const u64 chunk_offset) | |
1139 | { | |
1140 | struct extent_map_tree *em_tree = &fs_info->mapping_tree; | |
1141 | struct extent_map *em; | |
1142 | struct map_lookup *map; | |
1143 | unsigned int num_items; | |
1144 | ||
1145 | read_lock(&em_tree->lock); | |
1146 | em = lookup_extent_mapping(em_tree, chunk_offset, 1); | |
1147 | read_unlock(&em_tree->lock); | |
1148 | ASSERT(em && em->start == chunk_offset); | |
1149 | ||
1150 | /* | |
1151 | * We need to reserve 3 + N units from the metadata space info in order | |
1152 | * to remove a block group (done at btrfs_remove_chunk() and at | |
1153 | * btrfs_remove_block_group()), which are used for: | |
1154 | * | |
1155 | * 1 unit for adding the free space inode's orphan (located in the tree | |
1156 | * of tree roots). | |
1157 | * 1 unit for deleting the block group item (located in the extent | |
1158 | * tree). | |
1159 | * 1 unit for deleting the free space item (located in tree of tree | |
1160 | * roots). | |
1161 | * N units for deleting N device extent items corresponding to each | |
1162 | * stripe (located in the device tree). | |
1163 | * | |
1164 | * In order to remove a block group we also need to reserve units in the | |
1165 | * system space info in order to update the chunk tree (update one or | |
1166 | * more device items and remove one chunk item), but this is done at | |
1167 | * btrfs_remove_chunk() through a call to check_system_chunk(). | |
1168 | */ | |
1169 | map = em->map_lookup; | |
1170 | num_items = 3 + map->num_stripes; | |
1171 | free_extent_map(em); | |
1172 | ||
1173 | return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root, | |
1174 | num_items, 1); | |
1175 | } | |
1176 | ||
26ce2095 JB |
1177 | /* |
1178 | * Mark block group @cache read-only, so later write won't happen to block | |
1179 | * group @cache. | |
1180 | * | |
1181 | * If @force is not set, this function will only mark the block group readonly | |
1182 | * if we have enough free space (1M) in other metadata/system block groups. | |
1183 | * If @force is not set, this function will mark the block group readonly | |
1184 | * without checking free space. | |
1185 | * | |
1186 | * NOTE: This function doesn't care if other block groups can contain all the | |
1187 | * data in this block group. That check should be done by relocation routine, | |
1188 | * not this function. | |
1189 | */ | |
32da5386 | 1190 | static int inc_block_group_ro(struct btrfs_block_group *cache, int force) |
26ce2095 JB |
1191 | { |
1192 | struct btrfs_space_info *sinfo = cache->space_info; | |
1193 | u64 num_bytes; | |
26ce2095 JB |
1194 | int ret = -ENOSPC; |
1195 | ||
26ce2095 JB |
1196 | spin_lock(&sinfo->lock); |
1197 | spin_lock(&cache->lock); | |
1198 | ||
1199 | if (cache->ro) { | |
1200 | cache->ro++; | |
1201 | ret = 0; | |
1202 | goto out; | |
1203 | } | |
1204 | ||
b3470b5d | 1205 | num_bytes = cache->length - cache->reserved - cache->pinned - |
bf38be65 | 1206 | cache->bytes_super - cache->used; |
26ce2095 JB |
1207 | |
1208 | /* | |
a30a3d20 JB |
1209 | * Data never overcommits, even in mixed mode, so do just the straight |
1210 | * check of left over space in how much we have allocated. | |
26ce2095 | 1211 | */ |
a30a3d20 JB |
1212 | if (force) { |
1213 | ret = 0; | |
1214 | } else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) { | |
1215 | u64 sinfo_used = btrfs_space_info_used(sinfo, true); | |
1216 | ||
1217 | /* | |
1218 | * Here we make sure if we mark this bg RO, we still have enough | |
1219 | * free space as buffer. | |
1220 | */ | |
1221 | if (sinfo_used + num_bytes <= sinfo->total_bytes) | |
1222 | ret = 0; | |
1223 | } else { | |
1224 | /* | |
1225 | * We overcommit metadata, so we need to do the | |
1226 | * btrfs_can_overcommit check here, and we need to pass in | |
1227 | * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of | |
1228 | * leeway to allow us to mark this block group as read only. | |
1229 | */ | |
1230 | if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes, | |
1231 | BTRFS_RESERVE_NO_FLUSH)) | |
1232 | ret = 0; | |
1233 | } | |
1234 | ||
1235 | if (!ret) { | |
26ce2095 JB |
1236 | sinfo->bytes_readonly += num_bytes; |
1237 | cache->ro++; | |
1238 | list_add_tail(&cache->ro_list, &sinfo->ro_bgs); | |
26ce2095 JB |
1239 | } |
1240 | out: | |
1241 | spin_unlock(&cache->lock); | |
1242 | spin_unlock(&sinfo->lock); | |
1243 | if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { | |
1244 | btrfs_info(cache->fs_info, | |
b3470b5d | 1245 | "unable to make block group %llu ro", cache->start); |
26ce2095 JB |
1246 | btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); |
1247 | } | |
1248 | return ret; | |
1249 | } | |
1250 | ||
fe119a6e NB |
1251 | static bool clean_pinned_extents(struct btrfs_trans_handle *trans, |
1252 | struct btrfs_block_group *bg) | |
45bb5d6a NB |
1253 | { |
1254 | struct btrfs_fs_info *fs_info = bg->fs_info; | |
fe119a6e | 1255 | struct btrfs_transaction *prev_trans = NULL; |
45bb5d6a NB |
1256 | const u64 start = bg->start; |
1257 | const u64 end = start + bg->length - 1; | |
1258 | int ret; | |
1259 | ||
fe119a6e NB |
1260 | spin_lock(&fs_info->trans_lock); |
1261 | if (trans->transaction->list.prev != &fs_info->trans_list) { | |
1262 | prev_trans = list_last_entry(&trans->transaction->list, | |
1263 | struct btrfs_transaction, list); | |
1264 | refcount_inc(&prev_trans->use_count); | |
1265 | } | |
1266 | spin_unlock(&fs_info->trans_lock); | |
1267 | ||
45bb5d6a NB |
1268 | /* |
1269 | * Hold the unused_bg_unpin_mutex lock to avoid racing with | |
1270 | * btrfs_finish_extent_commit(). If we are at transaction N, another | |
1271 | * task might be running finish_extent_commit() for the previous | |
1272 | * transaction N - 1, and have seen a range belonging to the block | |
fe119a6e NB |
1273 | * group in pinned_extents before we were able to clear the whole block |
1274 | * group range from pinned_extents. This means that task can lookup for | |
1275 | * the block group after we unpinned it from pinned_extents and removed | |
1276 | * it, leading to a BUG_ON() at unpin_extent_range(). | |
45bb5d6a NB |
1277 | */ |
1278 | mutex_lock(&fs_info->unused_bg_unpin_mutex); | |
fe119a6e NB |
1279 | if (prev_trans) { |
1280 | ret = clear_extent_bits(&prev_trans->pinned_extents, start, end, | |
1281 | EXTENT_DIRTY); | |
1282 | if (ret) | |
1283 | goto err; | |
1284 | } | |
45bb5d6a | 1285 | |
fe119a6e | 1286 | ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end, |
45bb5d6a NB |
1287 | EXTENT_DIRTY); |
1288 | if (ret) | |
1289 | goto err; | |
1290 | mutex_unlock(&fs_info->unused_bg_unpin_mutex); | |
1291 | ||
1292 | return true; | |
1293 | ||
1294 | err: | |
1295 | mutex_unlock(&fs_info->unused_bg_unpin_mutex); | |
1296 | btrfs_dec_block_group_ro(bg); | |
1297 | return false; | |
1298 | } | |
1299 | ||
e3e0520b JB |
1300 | /* |
1301 | * Process the unused_bgs list and remove any that don't have any allocated | |
1302 | * space inside of them. | |
1303 | */ | |
1304 | void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) | |
1305 | { | |
32da5386 | 1306 | struct btrfs_block_group *block_group; |
e3e0520b JB |
1307 | struct btrfs_space_info *space_info; |
1308 | struct btrfs_trans_handle *trans; | |
6e80d4f8 | 1309 | const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC); |
e3e0520b JB |
1310 | int ret = 0; |
1311 | ||
1312 | if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) | |
1313 | return; | |
1314 | ||
1315 | spin_lock(&fs_info->unused_bgs_lock); | |
1316 | while (!list_empty(&fs_info->unused_bgs)) { | |
e3e0520b JB |
1317 | int trimming; |
1318 | ||
1319 | block_group = list_first_entry(&fs_info->unused_bgs, | |
32da5386 | 1320 | struct btrfs_block_group, |
e3e0520b JB |
1321 | bg_list); |
1322 | list_del_init(&block_group->bg_list); | |
1323 | ||
1324 | space_info = block_group->space_info; | |
1325 | ||
1326 | if (ret || btrfs_mixed_space_info(space_info)) { | |
1327 | btrfs_put_block_group(block_group); | |
1328 | continue; | |
1329 | } | |
1330 | spin_unlock(&fs_info->unused_bgs_lock); | |
1331 | ||
b0643e59 DZ |
1332 | btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group); |
1333 | ||
e3e0520b JB |
1334 | mutex_lock(&fs_info->delete_unused_bgs_mutex); |
1335 | ||
1336 | /* Don't want to race with allocators so take the groups_sem */ | |
1337 | down_write(&space_info->groups_sem); | |
6e80d4f8 DZ |
1338 | |
1339 | /* | |
1340 | * Async discard moves the final block group discard to be prior | |
1341 | * to the unused_bgs code path. Therefore, if it's not fully | |
1342 | * trimmed, punt it back to the async discard lists. | |
1343 | */ | |
1344 | if (btrfs_test_opt(fs_info, DISCARD_ASYNC) && | |
1345 | !btrfs_is_free_space_trimmed(block_group)) { | |
1346 | trace_btrfs_skip_unused_block_group(block_group); | |
1347 | up_write(&space_info->groups_sem); | |
1348 | /* Requeue if we failed because of async discard */ | |
1349 | btrfs_discard_queue_work(&fs_info->discard_ctl, | |
1350 | block_group); | |
1351 | goto next; | |
1352 | } | |
1353 | ||
e3e0520b JB |
1354 | spin_lock(&block_group->lock); |
1355 | if (block_group->reserved || block_group->pinned || | |
bf38be65 | 1356 | block_group->used || block_group->ro || |
e3e0520b JB |
1357 | list_is_singular(&block_group->list)) { |
1358 | /* | |
1359 | * We want to bail if we made new allocations or have | |
1360 | * outstanding allocations in this block group. We do | |
1361 | * the ro check in case balance is currently acting on | |
1362 | * this block group. | |
1363 | */ | |
1364 | trace_btrfs_skip_unused_block_group(block_group); | |
1365 | spin_unlock(&block_group->lock); | |
1366 | up_write(&space_info->groups_sem); | |
1367 | goto next; | |
1368 | } | |
1369 | spin_unlock(&block_group->lock); | |
1370 | ||
1371 | /* We don't want to force the issue, only flip if it's ok. */ | |
e11c0406 | 1372 | ret = inc_block_group_ro(block_group, 0); |
e3e0520b JB |
1373 | up_write(&space_info->groups_sem); |
1374 | if (ret < 0) { | |
1375 | ret = 0; | |
1376 | goto next; | |
1377 | } | |
1378 | ||
1379 | /* | |
1380 | * Want to do this before we do anything else so we can recover | |
1381 | * properly if we fail to join the transaction. | |
1382 | */ | |
1383 | trans = btrfs_start_trans_remove_block_group(fs_info, | |
b3470b5d | 1384 | block_group->start); |
e3e0520b JB |
1385 | if (IS_ERR(trans)) { |
1386 | btrfs_dec_block_group_ro(block_group); | |
1387 | ret = PTR_ERR(trans); | |
1388 | goto next; | |
1389 | } | |
1390 | ||
1391 | /* | |
1392 | * We could have pending pinned extents for this block group, | |
1393 | * just delete them, we don't care about them anymore. | |
1394 | */ | |
fe119a6e | 1395 | if (!clean_pinned_extents(trans, block_group)) |
e3e0520b | 1396 | goto end_trans; |
e3e0520b | 1397 | |
b0643e59 DZ |
1398 | /* |
1399 | * At this point, the block_group is read only and should fail | |
1400 | * new allocations. However, btrfs_finish_extent_commit() can | |
1401 | * cause this block_group to be placed back on the discard | |
1402 | * lists because now the block_group isn't fully discarded. | |
1403 | * Bail here and try again later after discarding everything. | |
1404 | */ | |
1405 | spin_lock(&fs_info->discard_ctl.lock); | |
1406 | if (!list_empty(&block_group->discard_list)) { | |
1407 | spin_unlock(&fs_info->discard_ctl.lock); | |
1408 | btrfs_dec_block_group_ro(block_group); | |
1409 | btrfs_discard_queue_work(&fs_info->discard_ctl, | |
1410 | block_group); | |
1411 | goto end_trans; | |
1412 | } | |
1413 | spin_unlock(&fs_info->discard_ctl.lock); | |
1414 | ||
e3e0520b JB |
1415 | /* Reset pinned so btrfs_put_block_group doesn't complain */ |
1416 | spin_lock(&space_info->lock); | |
1417 | spin_lock(&block_group->lock); | |
1418 | ||
1419 | btrfs_space_info_update_bytes_pinned(fs_info, space_info, | |
1420 | -block_group->pinned); | |
1421 | space_info->bytes_readonly += block_group->pinned; | |
1422 | percpu_counter_add_batch(&space_info->total_bytes_pinned, | |
1423 | -block_group->pinned, | |
1424 | BTRFS_TOTAL_BYTES_PINNED_BATCH); | |
1425 | block_group->pinned = 0; | |
1426 | ||
1427 | spin_unlock(&block_group->lock); | |
1428 | spin_unlock(&space_info->lock); | |
1429 | ||
6e80d4f8 DZ |
1430 | /* |
1431 | * The normal path here is an unused block group is passed here, | |
1432 | * then trimming is handled in the transaction commit path. | |
1433 | * Async discard interposes before this to do the trimming | |
1434 | * before coming down the unused block group path as trimming | |
1435 | * will no longer be done later in the transaction commit path. | |
1436 | */ | |
1437 | if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC)) | |
1438 | goto flip_async; | |
1439 | ||
e3e0520b | 1440 | /* DISCARD can flip during remount */ |
46b27f50 | 1441 | trimming = btrfs_test_opt(fs_info, DISCARD_SYNC); |
e3e0520b JB |
1442 | |
1443 | /* Implicit trim during transaction commit. */ | |
1444 | if (trimming) | |
1445 | btrfs_get_block_group_trimming(block_group); | |
1446 | ||
1447 | /* | |
1448 | * Btrfs_remove_chunk will abort the transaction if things go | |
1449 | * horribly wrong. | |
1450 | */ | |
b3470b5d | 1451 | ret = btrfs_remove_chunk(trans, block_group->start); |
e3e0520b JB |
1452 | |
1453 | if (ret) { | |
1454 | if (trimming) | |
1455 | btrfs_put_block_group_trimming(block_group); | |
1456 | goto end_trans; | |
1457 | } | |
1458 | ||
1459 | /* | |
1460 | * If we're not mounted with -odiscard, we can just forget | |
1461 | * about this block group. Otherwise we'll need to wait | |
1462 | * until transaction commit to do the actual discard. | |
1463 | */ | |
1464 | if (trimming) { | |
1465 | spin_lock(&fs_info->unused_bgs_lock); | |
1466 | /* | |
1467 | * A concurrent scrub might have added us to the list | |
1468 | * fs_info->unused_bgs, so use a list_move operation | |
1469 | * to add the block group to the deleted_bgs list. | |
1470 | */ | |
1471 | list_move(&block_group->bg_list, | |
1472 | &trans->transaction->deleted_bgs); | |
1473 | spin_unlock(&fs_info->unused_bgs_lock); | |
1474 | btrfs_get_block_group(block_group); | |
1475 | } | |
1476 | end_trans: | |
1477 | btrfs_end_transaction(trans); | |
1478 | next: | |
1479 | mutex_unlock(&fs_info->delete_unused_bgs_mutex); | |
1480 | btrfs_put_block_group(block_group); | |
1481 | spin_lock(&fs_info->unused_bgs_lock); | |
1482 | } | |
1483 | spin_unlock(&fs_info->unused_bgs_lock); | |
6e80d4f8 DZ |
1484 | return; |
1485 | ||
1486 | flip_async: | |
1487 | btrfs_end_transaction(trans); | |
1488 | mutex_unlock(&fs_info->delete_unused_bgs_mutex); | |
1489 | btrfs_put_block_group(block_group); | |
1490 | btrfs_discard_punt_unused_bgs_list(fs_info); | |
e3e0520b JB |
1491 | } |
1492 | ||
32da5386 | 1493 | void btrfs_mark_bg_unused(struct btrfs_block_group *bg) |
e3e0520b JB |
1494 | { |
1495 | struct btrfs_fs_info *fs_info = bg->fs_info; | |
1496 | ||
1497 | spin_lock(&fs_info->unused_bgs_lock); | |
1498 | if (list_empty(&bg->bg_list)) { | |
1499 | btrfs_get_block_group(bg); | |
1500 | trace_btrfs_add_unused_block_group(bg); | |
1501 | list_add_tail(&bg->bg_list, &fs_info->unused_bgs); | |
1502 | } | |
1503 | spin_unlock(&fs_info->unused_bgs_lock); | |
1504 | } | |
4358d963 JB |
1505 | |
1506 | static int find_first_block_group(struct btrfs_fs_info *fs_info, | |
1507 | struct btrfs_path *path, | |
1508 | struct btrfs_key *key) | |
1509 | { | |
1510 | struct btrfs_root *root = fs_info->extent_root; | |
1511 | int ret = 0; | |
1512 | struct btrfs_key found_key; | |
1513 | struct extent_buffer *leaf; | |
1514 | struct btrfs_block_group_item bg; | |
1515 | u64 flags; | |
1516 | int slot; | |
1517 | ||
1518 | ret = btrfs_search_slot(NULL, root, key, path, 0, 0); | |
1519 | if (ret < 0) | |
1520 | goto out; | |
1521 | ||
1522 | while (1) { | |
1523 | slot = path->slots[0]; | |
1524 | leaf = path->nodes[0]; | |
1525 | if (slot >= btrfs_header_nritems(leaf)) { | |
1526 | ret = btrfs_next_leaf(root, path); | |
1527 | if (ret == 0) | |
1528 | continue; | |
1529 | if (ret < 0) | |
1530 | goto out; | |
1531 | break; | |
1532 | } | |
1533 | btrfs_item_key_to_cpu(leaf, &found_key, slot); | |
1534 | ||
1535 | if (found_key.objectid >= key->objectid && | |
1536 | found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) { | |
1537 | struct extent_map_tree *em_tree; | |
1538 | struct extent_map *em; | |
1539 | ||
1540 | em_tree = &root->fs_info->mapping_tree; | |
1541 | read_lock(&em_tree->lock); | |
1542 | em = lookup_extent_mapping(em_tree, found_key.objectid, | |
1543 | found_key.offset); | |
1544 | read_unlock(&em_tree->lock); | |
1545 | if (!em) { | |
1546 | btrfs_err(fs_info, | |
1547 | "logical %llu len %llu found bg but no related chunk", | |
1548 | found_key.objectid, found_key.offset); | |
1549 | ret = -ENOENT; | |
1550 | } else if (em->start != found_key.objectid || | |
1551 | em->len != found_key.offset) { | |
1552 | btrfs_err(fs_info, | |
1553 | "block group %llu len %llu mismatch with chunk %llu len %llu", | |
1554 | found_key.objectid, found_key.offset, | |
1555 | em->start, em->len); | |
1556 | ret = -EUCLEAN; | |
1557 | } else { | |
1558 | read_extent_buffer(leaf, &bg, | |
1559 | btrfs_item_ptr_offset(leaf, slot), | |
1560 | sizeof(bg)); | |
de0dc456 | 1561 | flags = btrfs_stack_block_group_flags(&bg) & |
4358d963 JB |
1562 | BTRFS_BLOCK_GROUP_TYPE_MASK; |
1563 | ||
1564 | if (flags != (em->map_lookup->type & | |
1565 | BTRFS_BLOCK_GROUP_TYPE_MASK)) { | |
1566 | btrfs_err(fs_info, | |
1567 | "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx", | |
1568 | found_key.objectid, | |
1569 | found_key.offset, flags, | |
1570 | (BTRFS_BLOCK_GROUP_TYPE_MASK & | |
1571 | em->map_lookup->type)); | |
1572 | ret = -EUCLEAN; | |
1573 | } else { | |
1574 | ret = 0; | |
1575 | } | |
1576 | } | |
1577 | free_extent_map(em); | |
1578 | goto out; | |
1579 | } | |
1580 | path->slots[0]++; | |
1581 | } | |
1582 | out: | |
1583 | return ret; | |
1584 | } | |
1585 | ||
1586 | static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags) | |
1587 | { | |
1588 | u64 extra_flags = chunk_to_extended(flags) & | |
1589 | BTRFS_EXTENDED_PROFILE_MASK; | |
1590 | ||
1591 | write_seqlock(&fs_info->profiles_lock); | |
1592 | if (flags & BTRFS_BLOCK_GROUP_DATA) | |
1593 | fs_info->avail_data_alloc_bits |= extra_flags; | |
1594 | if (flags & BTRFS_BLOCK_GROUP_METADATA) | |
1595 | fs_info->avail_metadata_alloc_bits |= extra_flags; | |
1596 | if (flags & BTRFS_BLOCK_GROUP_SYSTEM) | |
1597 | fs_info->avail_system_alloc_bits |= extra_flags; | |
1598 | write_sequnlock(&fs_info->profiles_lock); | |
1599 | } | |
1600 | ||
96a14336 NB |
1601 | /** |
1602 | * btrfs_rmap_block - Map a physical disk address to a list of logical addresses | |
1603 | * @chunk_start: logical address of block group | |
1604 | * @physical: physical address to map to logical addresses | |
1605 | * @logical: return array of logical addresses which map to @physical | |
1606 | * @naddrs: length of @logical | |
1607 | * @stripe_len: size of IO stripe for the given block group | |
1608 | * | |
1609 | * Maps a particular @physical disk address to a list of @logical addresses. | |
1610 | * Used primarily to exclude those portions of a block group that contain super | |
1611 | * block copies. | |
1612 | */ | |
1613 | EXPORT_FOR_TESTS | |
1614 | int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start, | |
1615 | u64 physical, u64 **logical, int *naddrs, int *stripe_len) | |
1616 | { | |
1617 | struct extent_map *em; | |
1618 | struct map_lookup *map; | |
1619 | u64 *buf; | |
1620 | u64 bytenr; | |
1776ad17 NB |
1621 | u64 data_stripe_length; |
1622 | u64 io_stripe_size; | |
1623 | int i, nr = 0; | |
1624 | int ret = 0; | |
96a14336 NB |
1625 | |
1626 | em = btrfs_get_chunk_map(fs_info, chunk_start, 1); | |
1627 | if (IS_ERR(em)) | |
1628 | return -EIO; | |
1629 | ||
1630 | map = em->map_lookup; | |
1776ad17 NB |
1631 | data_stripe_length = em->len; |
1632 | io_stripe_size = map->stripe_len; | |
96a14336 NB |
1633 | |
1634 | if (map->type & BTRFS_BLOCK_GROUP_RAID10) | |
1776ad17 NB |
1635 | data_stripe_length = div_u64(data_stripe_length, |
1636 | map->num_stripes / map->sub_stripes); | |
96a14336 | 1637 | else if (map->type & BTRFS_BLOCK_GROUP_RAID0) |
1776ad17 | 1638 | data_stripe_length = div_u64(data_stripe_length, map->num_stripes); |
96a14336 | 1639 | else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { |
1776ad17 NB |
1640 | data_stripe_length = div_u64(data_stripe_length, |
1641 | nr_data_stripes(map)); | |
1642 | io_stripe_size = map->stripe_len * nr_data_stripes(map); | |
96a14336 NB |
1643 | } |
1644 | ||
1645 | buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); | |
1776ad17 NB |
1646 | if (!buf) { |
1647 | ret = -ENOMEM; | |
1648 | goto out; | |
1649 | } | |
96a14336 NB |
1650 | |
1651 | for (i = 0; i < map->num_stripes; i++) { | |
1776ad17 NB |
1652 | bool already_inserted = false; |
1653 | u64 stripe_nr; | |
1654 | int j; | |
1655 | ||
1656 | if (!in_range(physical, map->stripes[i].physical, | |
1657 | data_stripe_length)) | |
96a14336 NB |
1658 | continue; |
1659 | ||
1660 | stripe_nr = physical - map->stripes[i].physical; | |
1661 | stripe_nr = div64_u64(stripe_nr, map->stripe_len); | |
1662 | ||
1663 | if (map->type & BTRFS_BLOCK_GROUP_RAID10) { | |
1664 | stripe_nr = stripe_nr * map->num_stripes + i; | |
1665 | stripe_nr = div_u64(stripe_nr, map->sub_stripes); | |
1666 | } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { | |
1667 | stripe_nr = stripe_nr * map->num_stripes + i; | |
1668 | } | |
1669 | /* | |
1670 | * The remaining case would be for RAID56, multiply by | |
1671 | * nr_data_stripes(). Alternatively, just use rmap_len below | |
1672 | * instead of map->stripe_len | |
1673 | */ | |
1674 | ||
1776ad17 NB |
1675 | bytenr = chunk_start + stripe_nr * io_stripe_size; |
1676 | ||
1677 | /* Ensure we don't add duplicate addresses */ | |
96a14336 | 1678 | for (j = 0; j < nr; j++) { |
1776ad17 NB |
1679 | if (buf[j] == bytenr) { |
1680 | already_inserted = true; | |
96a14336 | 1681 | break; |
1776ad17 | 1682 | } |
96a14336 | 1683 | } |
1776ad17 NB |
1684 | |
1685 | if (!already_inserted) | |
96a14336 | 1686 | buf[nr++] = bytenr; |
96a14336 NB |
1687 | } |
1688 | ||
1689 | *logical = buf; | |
1690 | *naddrs = nr; | |
1776ad17 NB |
1691 | *stripe_len = io_stripe_size; |
1692 | out: | |
96a14336 | 1693 | free_extent_map(em); |
1776ad17 | 1694 | return ret; |
96a14336 NB |
1695 | } |
1696 | ||
32da5386 | 1697 | static int exclude_super_stripes(struct btrfs_block_group *cache) |
4358d963 JB |
1698 | { |
1699 | struct btrfs_fs_info *fs_info = cache->fs_info; | |
1700 | u64 bytenr; | |
1701 | u64 *logical; | |
1702 | int stripe_len; | |
1703 | int i, nr, ret; | |
1704 | ||
b3470b5d DS |
1705 | if (cache->start < BTRFS_SUPER_INFO_OFFSET) { |
1706 | stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start; | |
4358d963 | 1707 | cache->bytes_super += stripe_len; |
b3470b5d | 1708 | ret = btrfs_add_excluded_extent(fs_info, cache->start, |
4358d963 JB |
1709 | stripe_len); |
1710 | if (ret) | |
1711 | return ret; | |
1712 | } | |
1713 | ||
1714 | for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { | |
1715 | bytenr = btrfs_sb_offset(i); | |
b3470b5d | 1716 | ret = btrfs_rmap_block(fs_info, cache->start, |
4358d963 JB |
1717 | bytenr, &logical, &nr, &stripe_len); |
1718 | if (ret) | |
1719 | return ret; | |
1720 | ||
1721 | while (nr--) { | |
1722 | u64 start, len; | |
1723 | ||
b3470b5d | 1724 | if (logical[nr] > cache->start + cache->length) |
4358d963 JB |
1725 | continue; |
1726 | ||
b3470b5d | 1727 | if (logical[nr] + stripe_len <= cache->start) |
4358d963 JB |
1728 | continue; |
1729 | ||
1730 | start = logical[nr]; | |
b3470b5d DS |
1731 | if (start < cache->start) { |
1732 | start = cache->start; | |
4358d963 JB |
1733 | len = (logical[nr] + stripe_len) - start; |
1734 | } else { | |
1735 | len = min_t(u64, stripe_len, | |
b3470b5d | 1736 | cache->start + cache->length - start); |
4358d963 JB |
1737 | } |
1738 | ||
1739 | cache->bytes_super += len; | |
1740 | ret = btrfs_add_excluded_extent(fs_info, start, len); | |
1741 | if (ret) { | |
1742 | kfree(logical); | |
1743 | return ret; | |
1744 | } | |
1745 | } | |
1746 | ||
1747 | kfree(logical); | |
1748 | } | |
1749 | return 0; | |
1750 | } | |
1751 | ||
32da5386 | 1752 | static void link_block_group(struct btrfs_block_group *cache) |
4358d963 JB |
1753 | { |
1754 | struct btrfs_space_info *space_info = cache->space_info; | |
1755 | int index = btrfs_bg_flags_to_raid_index(cache->flags); | |
1756 | bool first = false; | |
1757 | ||
1758 | down_write(&space_info->groups_sem); | |
1759 | if (list_empty(&space_info->block_groups[index])) | |
1760 | first = true; | |
1761 | list_add_tail(&cache->list, &space_info->block_groups[index]); | |
1762 | up_write(&space_info->groups_sem); | |
1763 | ||
1764 | if (first) | |
1765 | btrfs_sysfs_add_block_group_type(cache); | |
1766 | } | |
1767 | ||
32da5386 | 1768 | static struct btrfs_block_group *btrfs_create_block_group_cache( |
4358d963 JB |
1769 | struct btrfs_fs_info *fs_info, u64 start, u64 size) |
1770 | { | |
32da5386 | 1771 | struct btrfs_block_group *cache; |
4358d963 JB |
1772 | |
1773 | cache = kzalloc(sizeof(*cache), GFP_NOFS); | |
1774 | if (!cache) | |
1775 | return NULL; | |
1776 | ||
1777 | cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), | |
1778 | GFP_NOFS); | |
1779 | if (!cache->free_space_ctl) { | |
1780 | kfree(cache); | |
1781 | return NULL; | |
1782 | } | |
1783 | ||
b3470b5d DS |
1784 | cache->start = start; |
1785 | cache->length = size; | |
4358d963 JB |
1786 | |
1787 | cache->fs_info = fs_info; | |
1788 | cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); | |
1789 | set_free_space_tree_thresholds(cache); | |
1790 | ||
6e80d4f8 DZ |
1791 | cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED; |
1792 | ||
4358d963 JB |
1793 | atomic_set(&cache->count, 1); |
1794 | spin_lock_init(&cache->lock); | |
1795 | init_rwsem(&cache->data_rwsem); | |
1796 | INIT_LIST_HEAD(&cache->list); | |
1797 | INIT_LIST_HEAD(&cache->cluster_list); | |
1798 | INIT_LIST_HEAD(&cache->bg_list); | |
1799 | INIT_LIST_HEAD(&cache->ro_list); | |
b0643e59 | 1800 | INIT_LIST_HEAD(&cache->discard_list); |
4358d963 JB |
1801 | INIT_LIST_HEAD(&cache->dirty_list); |
1802 | INIT_LIST_HEAD(&cache->io_list); | |
1803 | btrfs_init_free_space_ctl(cache); | |
1804 | atomic_set(&cache->trimming, 0); | |
1805 | mutex_init(&cache->free_space_lock); | |
1806 | btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root); | |
1807 | ||
1808 | return cache; | |
1809 | } | |
1810 | ||
1811 | /* | |
1812 | * Iterate all chunks and verify that each of them has the corresponding block | |
1813 | * group | |
1814 | */ | |
1815 | static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) | |
1816 | { | |
1817 | struct extent_map_tree *map_tree = &fs_info->mapping_tree; | |
1818 | struct extent_map *em; | |
32da5386 | 1819 | struct btrfs_block_group *bg; |
4358d963 JB |
1820 | u64 start = 0; |
1821 | int ret = 0; | |
1822 | ||
1823 | while (1) { | |
1824 | read_lock(&map_tree->lock); | |
1825 | /* | |
1826 | * lookup_extent_mapping will return the first extent map | |
1827 | * intersecting the range, so setting @len to 1 is enough to | |
1828 | * get the first chunk. | |
1829 | */ | |
1830 | em = lookup_extent_mapping(map_tree, start, 1); | |
1831 | read_unlock(&map_tree->lock); | |
1832 | if (!em) | |
1833 | break; | |
1834 | ||
1835 | bg = btrfs_lookup_block_group(fs_info, em->start); | |
1836 | if (!bg) { | |
1837 | btrfs_err(fs_info, | |
1838 | "chunk start=%llu len=%llu doesn't have corresponding block group", | |
1839 | em->start, em->len); | |
1840 | ret = -EUCLEAN; | |
1841 | free_extent_map(em); | |
1842 | break; | |
1843 | } | |
b3470b5d | 1844 | if (bg->start != em->start || bg->length != em->len || |
4358d963 JB |
1845 | (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) != |
1846 | (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) { | |
1847 | btrfs_err(fs_info, | |
1848 | "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx", | |
1849 | em->start, em->len, | |
1850 | em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK, | |
b3470b5d | 1851 | bg->start, bg->length, |
4358d963 JB |
1852 | bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK); |
1853 | ret = -EUCLEAN; | |
1854 | free_extent_map(em); | |
1855 | btrfs_put_block_group(bg); | |
1856 | break; | |
1857 | } | |
1858 | start = em->start + em->len; | |
1859 | free_extent_map(em); | |
1860 | btrfs_put_block_group(bg); | |
1861 | } | |
1862 | return ret; | |
1863 | } | |
1864 | ||
ffb9e0f0 QW |
1865 | static int read_one_block_group(struct btrfs_fs_info *info, |
1866 | struct btrfs_path *path, | |
d49a2ddb | 1867 | const struct btrfs_key *key, |
ffb9e0f0 QW |
1868 | int need_clear) |
1869 | { | |
1870 | struct extent_buffer *leaf = path->nodes[0]; | |
32da5386 | 1871 | struct btrfs_block_group *cache; |
ffb9e0f0 | 1872 | struct btrfs_space_info *space_info; |
ffb9e0f0 QW |
1873 | struct btrfs_block_group_item bgi; |
1874 | const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS); | |
1875 | int slot = path->slots[0]; | |
1876 | int ret; | |
1877 | ||
d49a2ddb | 1878 | ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY); |
ffb9e0f0 | 1879 | |
d49a2ddb | 1880 | cache = btrfs_create_block_group_cache(info, key->objectid, key->offset); |
ffb9e0f0 QW |
1881 | if (!cache) |
1882 | return -ENOMEM; | |
1883 | ||
1884 | if (need_clear) { | |
1885 | /* | |
1886 | * When we mount with old space cache, we need to | |
1887 | * set BTRFS_DC_CLEAR and set dirty flag. | |
1888 | * | |
1889 | * a) Setting 'BTRFS_DC_CLEAR' makes sure that we | |
1890 | * truncate the old free space cache inode and | |
1891 | * setup a new one. | |
1892 | * b) Setting 'dirty flag' makes sure that we flush | |
1893 | * the new space cache info onto disk. | |
1894 | */ | |
1895 | if (btrfs_test_opt(info, SPACE_CACHE)) | |
1896 | cache->disk_cache_state = BTRFS_DC_CLEAR; | |
1897 | } | |
1898 | read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot), | |
1899 | sizeof(bgi)); | |
1900 | cache->used = btrfs_stack_block_group_used(&bgi); | |
1901 | cache->flags = btrfs_stack_block_group_flags(&bgi); | |
1902 | if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && | |
1903 | (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { | |
1904 | btrfs_err(info, | |
1905 | "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups", | |
1906 | cache->start); | |
1907 | ret = -EINVAL; | |
1908 | goto error; | |
1909 | } | |
1910 | ||
1911 | /* | |
1912 | * We need to exclude the super stripes now so that the space info has | |
1913 | * super bytes accounted for, otherwise we'll think we have more space | |
1914 | * than we actually do. | |
1915 | */ | |
1916 | ret = exclude_super_stripes(cache); | |
1917 | if (ret) { | |
1918 | /* We may have excluded something, so call this just in case. */ | |
1919 | btrfs_free_excluded_extents(cache); | |
1920 | goto error; | |
1921 | } | |
1922 | ||
1923 | /* | |
1924 | * Check for two cases, either we are full, and therefore don't need | |
1925 | * to bother with the caching work since we won't find any space, or we | |
1926 | * are empty, and we can just add all the space in and be done with it. | |
1927 | * This saves us _a_lot_ of time, particularly in the full case. | |
1928 | */ | |
d49a2ddb | 1929 | if (key->offset == cache->used) { |
ffb9e0f0 QW |
1930 | cache->last_byte_to_unpin = (u64)-1; |
1931 | cache->cached = BTRFS_CACHE_FINISHED; | |
1932 | btrfs_free_excluded_extents(cache); | |
1933 | } else if (cache->used == 0) { | |
1934 | cache->last_byte_to_unpin = (u64)-1; | |
1935 | cache->cached = BTRFS_CACHE_FINISHED; | |
d49a2ddb QW |
1936 | add_new_free_space(cache, key->objectid, |
1937 | key->objectid + key->offset); | |
ffb9e0f0 QW |
1938 | btrfs_free_excluded_extents(cache); |
1939 | } | |
1940 | ||
1941 | ret = btrfs_add_block_group_cache(info, cache); | |
1942 | if (ret) { | |
1943 | btrfs_remove_free_space_cache(cache); | |
1944 | goto error; | |
1945 | } | |
1946 | trace_btrfs_add_block_group(info, cache, 0); | |
d49a2ddb | 1947 | btrfs_update_space_info(info, cache->flags, key->offset, |
ffb9e0f0 QW |
1948 | cache->used, cache->bytes_super, &space_info); |
1949 | ||
1950 | cache->space_info = space_info; | |
1951 | ||
1952 | link_block_group(cache); | |
1953 | ||
1954 | set_avail_alloc_bits(info, cache->flags); | |
1955 | if (btrfs_chunk_readonly(info, cache->start)) { | |
1956 | inc_block_group_ro(cache, 1); | |
1957 | } else if (cache->used == 0) { | |
1958 | ASSERT(list_empty(&cache->bg_list)); | |
6e80d4f8 DZ |
1959 | if (btrfs_test_opt(info, DISCARD_ASYNC)) |
1960 | btrfs_discard_queue_work(&info->discard_ctl, cache); | |
1961 | else | |
1962 | btrfs_mark_bg_unused(cache); | |
ffb9e0f0 QW |
1963 | } |
1964 | return 0; | |
1965 | error: | |
1966 | btrfs_put_block_group(cache); | |
1967 | return ret; | |
1968 | } | |
1969 | ||
4358d963 JB |
1970 | int btrfs_read_block_groups(struct btrfs_fs_info *info) |
1971 | { | |
1972 | struct btrfs_path *path; | |
1973 | int ret; | |
32da5386 | 1974 | struct btrfs_block_group *cache; |
4358d963 JB |
1975 | struct btrfs_space_info *space_info; |
1976 | struct btrfs_key key; | |
4358d963 JB |
1977 | int need_clear = 0; |
1978 | u64 cache_gen; | |
4358d963 JB |
1979 | |
1980 | key.objectid = 0; | |
1981 | key.offset = 0; | |
1982 | key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; | |
1983 | path = btrfs_alloc_path(); | |
1984 | if (!path) | |
1985 | return -ENOMEM; | |
1986 | path->reada = READA_FORWARD; | |
1987 | ||
1988 | cache_gen = btrfs_super_cache_generation(info->super_copy); | |
1989 | if (btrfs_test_opt(info, SPACE_CACHE) && | |
1990 | btrfs_super_generation(info->super_copy) != cache_gen) | |
1991 | need_clear = 1; | |
1992 | if (btrfs_test_opt(info, CLEAR_CACHE)) | |
1993 | need_clear = 1; | |
1994 | ||
1995 | while (1) { | |
1996 | ret = find_first_block_group(info, path, &key); | |
1997 | if (ret > 0) | |
1998 | break; | |
1999 | if (ret != 0) | |
2000 | goto error; | |
2001 | ||
ffb9e0f0 | 2002 | btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); |
d49a2ddb | 2003 | ret = read_one_block_group(info, path, &key, need_clear); |
ffb9e0f0 | 2004 | if (ret < 0) |
4358d963 | 2005 | goto error; |
ffb9e0f0 QW |
2006 | key.objectid += key.offset; |
2007 | key.offset = 0; | |
4358d963 | 2008 | btrfs_release_path(path); |
4358d963 JB |
2009 | } |
2010 | ||
29566c9c | 2011 | rcu_read_lock(); |
4358d963 JB |
2012 | list_for_each_entry_rcu(space_info, &info->space_info, list) { |
2013 | if (!(btrfs_get_alloc_profile(info, space_info->flags) & | |
2014 | (BTRFS_BLOCK_GROUP_RAID10 | | |
2015 | BTRFS_BLOCK_GROUP_RAID1_MASK | | |
2016 | BTRFS_BLOCK_GROUP_RAID56_MASK | | |
2017 | BTRFS_BLOCK_GROUP_DUP))) | |
2018 | continue; | |
2019 | /* | |
2020 | * Avoid allocating from un-mirrored block group if there are | |
2021 | * mirrored block groups. | |
2022 | */ | |
2023 | list_for_each_entry(cache, | |
2024 | &space_info->block_groups[BTRFS_RAID_RAID0], | |
2025 | list) | |
e11c0406 | 2026 | inc_block_group_ro(cache, 1); |
4358d963 JB |
2027 | list_for_each_entry(cache, |
2028 | &space_info->block_groups[BTRFS_RAID_SINGLE], | |
2029 | list) | |
e11c0406 | 2030 | inc_block_group_ro(cache, 1); |
4358d963 | 2031 | } |
29566c9c | 2032 | rcu_read_unlock(); |
4358d963 JB |
2033 | |
2034 | btrfs_init_global_block_rsv(info); | |
2035 | ret = check_chunk_block_group_mappings(info); | |
2036 | error: | |
2037 | btrfs_free_path(path); | |
2038 | return ret; | |
2039 | } | |
2040 | ||
2041 | void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) | |
2042 | { | |
2043 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
32da5386 | 2044 | struct btrfs_block_group *block_group; |
4358d963 JB |
2045 | struct btrfs_root *extent_root = fs_info->extent_root; |
2046 | struct btrfs_block_group_item item; | |
2047 | struct btrfs_key key; | |
2048 | int ret = 0; | |
2049 | ||
2050 | if (!trans->can_flush_pending_bgs) | |
2051 | return; | |
2052 | ||
2053 | while (!list_empty(&trans->new_bgs)) { | |
2054 | block_group = list_first_entry(&trans->new_bgs, | |
32da5386 | 2055 | struct btrfs_block_group, |
4358d963 JB |
2056 | bg_list); |
2057 | if (ret) | |
2058 | goto next; | |
2059 | ||
2060 | spin_lock(&block_group->lock); | |
de0dc456 DS |
2061 | btrfs_set_stack_block_group_used(&item, block_group->used); |
2062 | btrfs_set_stack_block_group_chunk_objectid(&item, | |
3d976388 | 2063 | BTRFS_FIRST_CHUNK_TREE_OBJECTID); |
de0dc456 | 2064 | btrfs_set_stack_block_group_flags(&item, block_group->flags); |
b3470b5d DS |
2065 | key.objectid = block_group->start; |
2066 | key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; | |
2067 | key.offset = block_group->length; | |
4358d963 JB |
2068 | spin_unlock(&block_group->lock); |
2069 | ||
2070 | ret = btrfs_insert_item(trans, extent_root, &key, &item, | |
2071 | sizeof(item)); | |
2072 | if (ret) | |
2073 | btrfs_abort_transaction(trans, ret); | |
2074 | ret = btrfs_finish_chunk_alloc(trans, key.objectid, key.offset); | |
2075 | if (ret) | |
2076 | btrfs_abort_transaction(trans, ret); | |
2077 | add_block_group_free_space(trans, block_group); | |
2078 | /* Already aborted the transaction if it failed. */ | |
2079 | next: | |
2080 | btrfs_delayed_refs_rsv_release(fs_info, 1); | |
2081 | list_del_init(&block_group->bg_list); | |
2082 | } | |
2083 | btrfs_trans_release_chunk_metadata(trans); | |
2084 | } | |
2085 | ||
2086 | int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, | |
2087 | u64 type, u64 chunk_offset, u64 size) | |
2088 | { | |
2089 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
32da5386 | 2090 | struct btrfs_block_group *cache; |
4358d963 JB |
2091 | int ret; |
2092 | ||
2093 | btrfs_set_log_full_commit(trans); | |
2094 | ||
2095 | cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size); | |
2096 | if (!cache) | |
2097 | return -ENOMEM; | |
2098 | ||
bf38be65 | 2099 | cache->used = bytes_used; |
4358d963 JB |
2100 | cache->flags = type; |
2101 | cache->last_byte_to_unpin = (u64)-1; | |
2102 | cache->cached = BTRFS_CACHE_FINISHED; | |
2103 | cache->needs_free_space = 1; | |
2104 | ret = exclude_super_stripes(cache); | |
2105 | if (ret) { | |
2106 | /* We may have excluded something, so call this just in case */ | |
2107 | btrfs_free_excluded_extents(cache); | |
2108 | btrfs_put_block_group(cache); | |
2109 | return ret; | |
2110 | } | |
2111 | ||
2112 | add_new_free_space(cache, chunk_offset, chunk_offset + size); | |
2113 | ||
2114 | btrfs_free_excluded_extents(cache); | |
2115 | ||
2116 | #ifdef CONFIG_BTRFS_DEBUG | |
2117 | if (btrfs_should_fragment_free_space(cache)) { | |
2118 | u64 new_bytes_used = size - bytes_used; | |
2119 | ||
2120 | bytes_used += new_bytes_used >> 1; | |
e11c0406 | 2121 | fragment_free_space(cache); |
4358d963 JB |
2122 | } |
2123 | #endif | |
2124 | /* | |
2125 | * Ensure the corresponding space_info object is created and | |
2126 | * assigned to our block group. We want our bg to be added to the rbtree | |
2127 | * with its ->space_info set. | |
2128 | */ | |
2129 | cache->space_info = btrfs_find_space_info(fs_info, cache->flags); | |
2130 | ASSERT(cache->space_info); | |
2131 | ||
2132 | ret = btrfs_add_block_group_cache(fs_info, cache); | |
2133 | if (ret) { | |
2134 | btrfs_remove_free_space_cache(cache); | |
2135 | btrfs_put_block_group(cache); | |
2136 | return ret; | |
2137 | } | |
2138 | ||
2139 | /* | |
2140 | * Now that our block group has its ->space_info set and is inserted in | |
2141 | * the rbtree, update the space info's counters. | |
2142 | */ | |
2143 | trace_btrfs_add_block_group(fs_info, cache, 1); | |
2144 | btrfs_update_space_info(fs_info, cache->flags, size, bytes_used, | |
2145 | cache->bytes_super, &cache->space_info); | |
2146 | btrfs_update_global_block_rsv(fs_info); | |
2147 | ||
2148 | link_block_group(cache); | |
2149 | ||
2150 | list_add_tail(&cache->bg_list, &trans->new_bgs); | |
2151 | trans->delayed_ref_updates++; | |
2152 | btrfs_update_delayed_refs_rsv(trans); | |
2153 | ||
2154 | set_avail_alloc_bits(fs_info, type); | |
2155 | return 0; | |
2156 | } | |
26ce2095 JB |
2157 | |
2158 | static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags) | |
2159 | { | |
2160 | u64 num_devices; | |
2161 | u64 stripped; | |
2162 | ||
2163 | /* | |
2164 | * if restripe for this chunk_type is on pick target profile and | |
2165 | * return, otherwise do the usual balance | |
2166 | */ | |
e11c0406 | 2167 | stripped = get_restripe_target(fs_info, flags); |
26ce2095 JB |
2168 | if (stripped) |
2169 | return extended_to_chunk(stripped); | |
2170 | ||
2171 | num_devices = fs_info->fs_devices->rw_devices; | |
2172 | ||
2173 | stripped = BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID56_MASK | | |
2174 | BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10; | |
2175 | ||
2176 | if (num_devices == 1) { | |
2177 | stripped |= BTRFS_BLOCK_GROUP_DUP; | |
2178 | stripped = flags & ~stripped; | |
2179 | ||
2180 | /* turn raid0 into single device chunks */ | |
2181 | if (flags & BTRFS_BLOCK_GROUP_RAID0) | |
2182 | return stripped; | |
2183 | ||
2184 | /* turn mirroring into duplication */ | |
2185 | if (flags & (BTRFS_BLOCK_GROUP_RAID1_MASK | | |
2186 | BTRFS_BLOCK_GROUP_RAID10)) | |
2187 | return stripped | BTRFS_BLOCK_GROUP_DUP; | |
2188 | } else { | |
2189 | /* they already had raid on here, just return */ | |
2190 | if (flags & stripped) | |
2191 | return flags; | |
2192 | ||
2193 | stripped |= BTRFS_BLOCK_GROUP_DUP; | |
2194 | stripped = flags & ~stripped; | |
2195 | ||
2196 | /* switch duplicated blocks with raid1 */ | |
2197 | if (flags & BTRFS_BLOCK_GROUP_DUP) | |
2198 | return stripped | BTRFS_BLOCK_GROUP_RAID1; | |
2199 | ||
2200 | /* this is drive concat, leave it alone */ | |
2201 | } | |
2202 | ||
2203 | return flags; | |
2204 | } | |
2205 | ||
b12de528 QW |
2206 | /* |
2207 | * Mark one block group RO, can be called several times for the same block | |
2208 | * group. | |
2209 | * | |
2210 | * @cache: the destination block group | |
2211 | * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to | |
2212 | * ensure we still have some free space after marking this | |
2213 | * block group RO. | |
2214 | */ | |
2215 | int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, | |
2216 | bool do_chunk_alloc) | |
26ce2095 JB |
2217 | { |
2218 | struct btrfs_fs_info *fs_info = cache->fs_info; | |
2219 | struct btrfs_trans_handle *trans; | |
2220 | u64 alloc_flags; | |
2221 | int ret; | |
2222 | ||
2223 | again: | |
2224 | trans = btrfs_join_transaction(fs_info->extent_root); | |
2225 | if (IS_ERR(trans)) | |
2226 | return PTR_ERR(trans); | |
2227 | ||
2228 | /* | |
2229 | * we're not allowed to set block groups readonly after the dirty | |
2230 | * block groups cache has started writing. If it already started, | |
2231 | * back off and let this transaction commit | |
2232 | */ | |
2233 | mutex_lock(&fs_info->ro_block_group_mutex); | |
2234 | if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) { | |
2235 | u64 transid = trans->transid; | |
2236 | ||
2237 | mutex_unlock(&fs_info->ro_block_group_mutex); | |
2238 | btrfs_end_transaction(trans); | |
2239 | ||
2240 | ret = btrfs_wait_for_commit(fs_info, transid); | |
2241 | if (ret) | |
2242 | return ret; | |
2243 | goto again; | |
2244 | } | |
2245 | ||
b12de528 | 2246 | if (do_chunk_alloc) { |
26ce2095 | 2247 | /* |
b12de528 QW |
2248 | * If we are changing raid levels, try to allocate a |
2249 | * corresponding block group with the new raid level. | |
26ce2095 | 2250 | */ |
b12de528 QW |
2251 | alloc_flags = update_block_group_flags(fs_info, cache->flags); |
2252 | if (alloc_flags != cache->flags) { | |
2253 | ret = btrfs_chunk_alloc(trans, alloc_flags, | |
2254 | CHUNK_ALLOC_FORCE); | |
2255 | /* | |
2256 | * ENOSPC is allowed here, we may have enough space | |
2257 | * already allocated at the new raid level to carry on | |
2258 | */ | |
2259 | if (ret == -ENOSPC) | |
2260 | ret = 0; | |
2261 | if (ret < 0) | |
2262 | goto out; | |
2263 | } | |
26ce2095 JB |
2264 | } |
2265 | ||
a7a63acc | 2266 | ret = inc_block_group_ro(cache, 0); |
b12de528 QW |
2267 | if (!do_chunk_alloc) |
2268 | goto unlock_out; | |
26ce2095 JB |
2269 | if (!ret) |
2270 | goto out; | |
2271 | alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); | |
2272 | ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); | |
2273 | if (ret < 0) | |
2274 | goto out; | |
e11c0406 | 2275 | ret = inc_block_group_ro(cache, 0); |
26ce2095 JB |
2276 | out: |
2277 | if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { | |
2278 | alloc_flags = update_block_group_flags(fs_info, cache->flags); | |
2279 | mutex_lock(&fs_info->chunk_mutex); | |
2280 | check_system_chunk(trans, alloc_flags); | |
2281 | mutex_unlock(&fs_info->chunk_mutex); | |
2282 | } | |
b12de528 | 2283 | unlock_out: |
26ce2095 JB |
2284 | mutex_unlock(&fs_info->ro_block_group_mutex); |
2285 | ||
2286 | btrfs_end_transaction(trans); | |
2287 | return ret; | |
2288 | } | |
2289 | ||
32da5386 | 2290 | void btrfs_dec_block_group_ro(struct btrfs_block_group *cache) |
26ce2095 JB |
2291 | { |
2292 | struct btrfs_space_info *sinfo = cache->space_info; | |
2293 | u64 num_bytes; | |
2294 | ||
2295 | BUG_ON(!cache->ro); | |
2296 | ||
2297 | spin_lock(&sinfo->lock); | |
2298 | spin_lock(&cache->lock); | |
2299 | if (!--cache->ro) { | |
b3470b5d | 2300 | num_bytes = cache->length - cache->reserved - |
bf38be65 | 2301 | cache->pinned - cache->bytes_super - cache->used; |
26ce2095 JB |
2302 | sinfo->bytes_readonly -= num_bytes; |
2303 | list_del_init(&cache->ro_list); | |
2304 | } | |
2305 | spin_unlock(&cache->lock); | |
2306 | spin_unlock(&sinfo->lock); | |
2307 | } | |
77745c05 JB |
2308 | |
2309 | static int write_one_cache_group(struct btrfs_trans_handle *trans, | |
2310 | struct btrfs_path *path, | |
32da5386 | 2311 | struct btrfs_block_group *cache) |
77745c05 JB |
2312 | { |
2313 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
2314 | int ret; | |
2315 | struct btrfs_root *extent_root = fs_info->extent_root; | |
2316 | unsigned long bi; | |
2317 | struct extent_buffer *leaf; | |
bf38be65 | 2318 | struct btrfs_block_group_item bgi; |
b3470b5d DS |
2319 | struct btrfs_key key; |
2320 | ||
2321 | key.objectid = cache->start; | |
2322 | key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; | |
2323 | key.offset = cache->length; | |
77745c05 | 2324 | |
b3470b5d | 2325 | ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 1); |
77745c05 JB |
2326 | if (ret) { |
2327 | if (ret > 0) | |
2328 | ret = -ENOENT; | |
2329 | goto fail; | |
2330 | } | |
2331 | ||
2332 | leaf = path->nodes[0]; | |
2333 | bi = btrfs_item_ptr_offset(leaf, path->slots[0]); | |
de0dc456 DS |
2334 | btrfs_set_stack_block_group_used(&bgi, cache->used); |
2335 | btrfs_set_stack_block_group_chunk_objectid(&bgi, | |
3d976388 | 2336 | BTRFS_FIRST_CHUNK_TREE_OBJECTID); |
de0dc456 | 2337 | btrfs_set_stack_block_group_flags(&bgi, cache->flags); |
bf38be65 | 2338 | write_extent_buffer(leaf, &bgi, bi, sizeof(bgi)); |
77745c05 JB |
2339 | btrfs_mark_buffer_dirty(leaf); |
2340 | fail: | |
2341 | btrfs_release_path(path); | |
2342 | return ret; | |
2343 | ||
2344 | } | |
2345 | ||
32da5386 | 2346 | static int cache_save_setup(struct btrfs_block_group *block_group, |
77745c05 JB |
2347 | struct btrfs_trans_handle *trans, |
2348 | struct btrfs_path *path) | |
2349 | { | |
2350 | struct btrfs_fs_info *fs_info = block_group->fs_info; | |
2351 | struct btrfs_root *root = fs_info->tree_root; | |
2352 | struct inode *inode = NULL; | |
2353 | struct extent_changeset *data_reserved = NULL; | |
2354 | u64 alloc_hint = 0; | |
2355 | int dcs = BTRFS_DC_ERROR; | |
2356 | u64 num_pages = 0; | |
2357 | int retries = 0; | |
2358 | int ret = 0; | |
2359 | ||
2360 | /* | |
2361 | * If this block group is smaller than 100 megs don't bother caching the | |
2362 | * block group. | |
2363 | */ | |
b3470b5d | 2364 | if (block_group->length < (100 * SZ_1M)) { |
77745c05 JB |
2365 | spin_lock(&block_group->lock); |
2366 | block_group->disk_cache_state = BTRFS_DC_WRITTEN; | |
2367 | spin_unlock(&block_group->lock); | |
2368 | return 0; | |
2369 | } | |
2370 | ||
bf31f87f | 2371 | if (TRANS_ABORTED(trans)) |
77745c05 JB |
2372 | return 0; |
2373 | again: | |
2374 | inode = lookup_free_space_inode(block_group, path); | |
2375 | if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { | |
2376 | ret = PTR_ERR(inode); | |
2377 | btrfs_release_path(path); | |
2378 | goto out; | |
2379 | } | |
2380 | ||
2381 | if (IS_ERR(inode)) { | |
2382 | BUG_ON(retries); | |
2383 | retries++; | |
2384 | ||
2385 | if (block_group->ro) | |
2386 | goto out_free; | |
2387 | ||
2388 | ret = create_free_space_inode(trans, block_group, path); | |
2389 | if (ret) | |
2390 | goto out_free; | |
2391 | goto again; | |
2392 | } | |
2393 | ||
2394 | /* | |
2395 | * We want to set the generation to 0, that way if anything goes wrong | |
2396 | * from here on out we know not to trust this cache when we load up next | |
2397 | * time. | |
2398 | */ | |
2399 | BTRFS_I(inode)->generation = 0; | |
2400 | ret = btrfs_update_inode(trans, root, inode); | |
2401 | if (ret) { | |
2402 | /* | |
2403 | * So theoretically we could recover from this, simply set the | |
2404 | * super cache generation to 0 so we know to invalidate the | |
2405 | * cache, but then we'd have to keep track of the block groups | |
2406 | * that fail this way so we know we _have_ to reset this cache | |
2407 | * before the next commit or risk reading stale cache. So to | |
2408 | * limit our exposure to horrible edge cases lets just abort the | |
2409 | * transaction, this only happens in really bad situations | |
2410 | * anyway. | |
2411 | */ | |
2412 | btrfs_abort_transaction(trans, ret); | |
2413 | goto out_put; | |
2414 | } | |
2415 | WARN_ON(ret); | |
2416 | ||
2417 | /* We've already setup this transaction, go ahead and exit */ | |
2418 | if (block_group->cache_generation == trans->transid && | |
2419 | i_size_read(inode)) { | |
2420 | dcs = BTRFS_DC_SETUP; | |
2421 | goto out_put; | |
2422 | } | |
2423 | ||
2424 | if (i_size_read(inode) > 0) { | |
2425 | ret = btrfs_check_trunc_cache_free_space(fs_info, | |
2426 | &fs_info->global_block_rsv); | |
2427 | if (ret) | |
2428 | goto out_put; | |
2429 | ||
2430 | ret = btrfs_truncate_free_space_cache(trans, NULL, inode); | |
2431 | if (ret) | |
2432 | goto out_put; | |
2433 | } | |
2434 | ||
2435 | spin_lock(&block_group->lock); | |
2436 | if (block_group->cached != BTRFS_CACHE_FINISHED || | |
2437 | !btrfs_test_opt(fs_info, SPACE_CACHE)) { | |
2438 | /* | |
2439 | * don't bother trying to write stuff out _if_ | |
2440 | * a) we're not cached, | |
2441 | * b) we're with nospace_cache mount option, | |
2442 | * c) we're with v2 space_cache (FREE_SPACE_TREE). | |
2443 | */ | |
2444 | dcs = BTRFS_DC_WRITTEN; | |
2445 | spin_unlock(&block_group->lock); | |
2446 | goto out_put; | |
2447 | } | |
2448 | spin_unlock(&block_group->lock); | |
2449 | ||
2450 | /* | |
2451 | * We hit an ENOSPC when setting up the cache in this transaction, just | |
2452 | * skip doing the setup, we've already cleared the cache so we're safe. | |
2453 | */ | |
2454 | if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) { | |
2455 | ret = -ENOSPC; | |
2456 | goto out_put; | |
2457 | } | |
2458 | ||
2459 | /* | |
2460 | * Try to preallocate enough space based on how big the block group is. | |
2461 | * Keep in mind this has to include any pinned space which could end up | |
2462 | * taking up quite a bit since it's not folded into the other space | |
2463 | * cache. | |
2464 | */ | |
b3470b5d | 2465 | num_pages = div_u64(block_group->length, SZ_256M); |
77745c05 JB |
2466 | if (!num_pages) |
2467 | num_pages = 1; | |
2468 | ||
2469 | num_pages *= 16; | |
2470 | num_pages *= PAGE_SIZE; | |
2471 | ||
2472 | ret = btrfs_check_data_free_space(inode, &data_reserved, 0, num_pages); | |
2473 | if (ret) | |
2474 | goto out_put; | |
2475 | ||
2476 | ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages, | |
2477 | num_pages, num_pages, | |
2478 | &alloc_hint); | |
2479 | /* | |
2480 | * Our cache requires contiguous chunks so that we don't modify a bunch | |
2481 | * of metadata or split extents when writing the cache out, which means | |
2482 | * we can enospc if we are heavily fragmented in addition to just normal | |
2483 | * out of space conditions. So if we hit this just skip setting up any | |
2484 | * other block groups for this transaction, maybe we'll unpin enough | |
2485 | * space the next time around. | |
2486 | */ | |
2487 | if (!ret) | |
2488 | dcs = BTRFS_DC_SETUP; | |
2489 | else if (ret == -ENOSPC) | |
2490 | set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags); | |
2491 | ||
2492 | out_put: | |
2493 | iput(inode); | |
2494 | out_free: | |
2495 | btrfs_release_path(path); | |
2496 | out: | |
2497 | spin_lock(&block_group->lock); | |
2498 | if (!ret && dcs == BTRFS_DC_SETUP) | |
2499 | block_group->cache_generation = trans->transid; | |
2500 | block_group->disk_cache_state = dcs; | |
2501 | spin_unlock(&block_group->lock); | |
2502 | ||
2503 | extent_changeset_free(data_reserved); | |
2504 | return ret; | |
2505 | } | |
2506 | ||
2507 | int btrfs_setup_space_cache(struct btrfs_trans_handle *trans) | |
2508 | { | |
2509 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
32da5386 | 2510 | struct btrfs_block_group *cache, *tmp; |
77745c05 JB |
2511 | struct btrfs_transaction *cur_trans = trans->transaction; |
2512 | struct btrfs_path *path; | |
2513 | ||
2514 | if (list_empty(&cur_trans->dirty_bgs) || | |
2515 | !btrfs_test_opt(fs_info, SPACE_CACHE)) | |
2516 | return 0; | |
2517 | ||
2518 | path = btrfs_alloc_path(); | |
2519 | if (!path) | |
2520 | return -ENOMEM; | |
2521 | ||
2522 | /* Could add new block groups, use _safe just in case */ | |
2523 | list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, | |
2524 | dirty_list) { | |
2525 | if (cache->disk_cache_state == BTRFS_DC_CLEAR) | |
2526 | cache_save_setup(cache, trans, path); | |
2527 | } | |
2528 | ||
2529 | btrfs_free_path(path); | |
2530 | return 0; | |
2531 | } | |
2532 | ||
2533 | /* | |
2534 | * Transaction commit does final block group cache writeback during a critical | |
2535 | * section where nothing is allowed to change the FS. This is required in | |
2536 | * order for the cache to actually match the block group, but can introduce a | |
2537 | * lot of latency into the commit. | |
2538 | * | |
2539 | * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO. | |
2540 | * There's a chance we'll have to redo some of it if the block group changes | |
2541 | * again during the commit, but it greatly reduces the commit latency by | |
2542 | * getting rid of the easy block groups while we're still allowing others to | |
2543 | * join the commit. | |
2544 | */ | |
2545 | int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans) | |
2546 | { | |
2547 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
32da5386 | 2548 | struct btrfs_block_group *cache; |
77745c05 JB |
2549 | struct btrfs_transaction *cur_trans = trans->transaction; |
2550 | int ret = 0; | |
2551 | int should_put; | |
2552 | struct btrfs_path *path = NULL; | |
2553 | LIST_HEAD(dirty); | |
2554 | struct list_head *io = &cur_trans->io_bgs; | |
2555 | int num_started = 0; | |
2556 | int loops = 0; | |
2557 | ||
2558 | spin_lock(&cur_trans->dirty_bgs_lock); | |
2559 | if (list_empty(&cur_trans->dirty_bgs)) { | |
2560 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
2561 | return 0; | |
2562 | } | |
2563 | list_splice_init(&cur_trans->dirty_bgs, &dirty); | |
2564 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
2565 | ||
2566 | again: | |
2567 | /* Make sure all the block groups on our dirty list actually exist */ | |
2568 | btrfs_create_pending_block_groups(trans); | |
2569 | ||
2570 | if (!path) { | |
2571 | path = btrfs_alloc_path(); | |
2572 | if (!path) | |
2573 | return -ENOMEM; | |
2574 | } | |
2575 | ||
2576 | /* | |
2577 | * cache_write_mutex is here only to save us from balance or automatic | |
2578 | * removal of empty block groups deleting this block group while we are | |
2579 | * writing out the cache | |
2580 | */ | |
2581 | mutex_lock(&trans->transaction->cache_write_mutex); | |
2582 | while (!list_empty(&dirty)) { | |
2583 | bool drop_reserve = true; | |
2584 | ||
32da5386 | 2585 | cache = list_first_entry(&dirty, struct btrfs_block_group, |
77745c05 JB |
2586 | dirty_list); |
2587 | /* | |
2588 | * This can happen if something re-dirties a block group that | |
2589 | * is already under IO. Just wait for it to finish and then do | |
2590 | * it all again | |
2591 | */ | |
2592 | if (!list_empty(&cache->io_list)) { | |
2593 | list_del_init(&cache->io_list); | |
2594 | btrfs_wait_cache_io(trans, cache, path); | |
2595 | btrfs_put_block_group(cache); | |
2596 | } | |
2597 | ||
2598 | ||
2599 | /* | |
2600 | * btrfs_wait_cache_io uses the cache->dirty_list to decide if | |
2601 | * it should update the cache_state. Don't delete until after | |
2602 | * we wait. | |
2603 | * | |
2604 | * Since we're not running in the commit critical section | |
2605 | * we need the dirty_bgs_lock to protect from update_block_group | |
2606 | */ | |
2607 | spin_lock(&cur_trans->dirty_bgs_lock); | |
2608 | list_del_init(&cache->dirty_list); | |
2609 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
2610 | ||
2611 | should_put = 1; | |
2612 | ||
2613 | cache_save_setup(cache, trans, path); | |
2614 | ||
2615 | if (cache->disk_cache_state == BTRFS_DC_SETUP) { | |
2616 | cache->io_ctl.inode = NULL; | |
2617 | ret = btrfs_write_out_cache(trans, cache, path); | |
2618 | if (ret == 0 && cache->io_ctl.inode) { | |
2619 | num_started++; | |
2620 | should_put = 0; | |
2621 | ||
2622 | /* | |
2623 | * The cache_write_mutex is protecting the | |
2624 | * io_list, also refer to the definition of | |
2625 | * btrfs_transaction::io_bgs for more details | |
2626 | */ | |
2627 | list_add_tail(&cache->io_list, io); | |
2628 | } else { | |
2629 | /* | |
2630 | * If we failed to write the cache, the | |
2631 | * generation will be bad and life goes on | |
2632 | */ | |
2633 | ret = 0; | |
2634 | } | |
2635 | } | |
2636 | if (!ret) { | |
2637 | ret = write_one_cache_group(trans, path, cache); | |
2638 | /* | |
2639 | * Our block group might still be attached to the list | |
2640 | * of new block groups in the transaction handle of some | |
2641 | * other task (struct btrfs_trans_handle->new_bgs). This | |
2642 | * means its block group item isn't yet in the extent | |
2643 | * tree. If this happens ignore the error, as we will | |
2644 | * try again later in the critical section of the | |
2645 | * transaction commit. | |
2646 | */ | |
2647 | if (ret == -ENOENT) { | |
2648 | ret = 0; | |
2649 | spin_lock(&cur_trans->dirty_bgs_lock); | |
2650 | if (list_empty(&cache->dirty_list)) { | |
2651 | list_add_tail(&cache->dirty_list, | |
2652 | &cur_trans->dirty_bgs); | |
2653 | btrfs_get_block_group(cache); | |
2654 | drop_reserve = false; | |
2655 | } | |
2656 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
2657 | } else if (ret) { | |
2658 | btrfs_abort_transaction(trans, ret); | |
2659 | } | |
2660 | } | |
2661 | ||
2662 | /* If it's not on the io list, we need to put the block group */ | |
2663 | if (should_put) | |
2664 | btrfs_put_block_group(cache); | |
2665 | if (drop_reserve) | |
2666 | btrfs_delayed_refs_rsv_release(fs_info, 1); | |
2667 | ||
2668 | if (ret) | |
2669 | break; | |
2670 | ||
2671 | /* | |
2672 | * Avoid blocking other tasks for too long. It might even save | |
2673 | * us from writing caches for block groups that are going to be | |
2674 | * removed. | |
2675 | */ | |
2676 | mutex_unlock(&trans->transaction->cache_write_mutex); | |
2677 | mutex_lock(&trans->transaction->cache_write_mutex); | |
2678 | } | |
2679 | mutex_unlock(&trans->transaction->cache_write_mutex); | |
2680 | ||
2681 | /* | |
2682 | * Go through delayed refs for all the stuff we've just kicked off | |
2683 | * and then loop back (just once) | |
2684 | */ | |
2685 | ret = btrfs_run_delayed_refs(trans, 0); | |
2686 | if (!ret && loops == 0) { | |
2687 | loops++; | |
2688 | spin_lock(&cur_trans->dirty_bgs_lock); | |
2689 | list_splice_init(&cur_trans->dirty_bgs, &dirty); | |
2690 | /* | |
2691 | * dirty_bgs_lock protects us from concurrent block group | |
2692 | * deletes too (not just cache_write_mutex). | |
2693 | */ | |
2694 | if (!list_empty(&dirty)) { | |
2695 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
2696 | goto again; | |
2697 | } | |
2698 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
2699 | } else if (ret < 0) { | |
2700 | btrfs_cleanup_dirty_bgs(cur_trans, fs_info); | |
2701 | } | |
2702 | ||
2703 | btrfs_free_path(path); | |
2704 | return ret; | |
2705 | } | |
2706 | ||
2707 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) | |
2708 | { | |
2709 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
32da5386 | 2710 | struct btrfs_block_group *cache; |
77745c05 JB |
2711 | struct btrfs_transaction *cur_trans = trans->transaction; |
2712 | int ret = 0; | |
2713 | int should_put; | |
2714 | struct btrfs_path *path; | |
2715 | struct list_head *io = &cur_trans->io_bgs; | |
2716 | int num_started = 0; | |
2717 | ||
2718 | path = btrfs_alloc_path(); | |
2719 | if (!path) | |
2720 | return -ENOMEM; | |
2721 | ||
2722 | /* | |
2723 | * Even though we are in the critical section of the transaction commit, | |
2724 | * we can still have concurrent tasks adding elements to this | |
2725 | * transaction's list of dirty block groups. These tasks correspond to | |
2726 | * endio free space workers started when writeback finishes for a | |
2727 | * space cache, which run inode.c:btrfs_finish_ordered_io(), and can | |
2728 | * allocate new block groups as a result of COWing nodes of the root | |
2729 | * tree when updating the free space inode. The writeback for the space | |
2730 | * caches is triggered by an earlier call to | |
2731 | * btrfs_start_dirty_block_groups() and iterations of the following | |
2732 | * loop. | |
2733 | * Also we want to do the cache_save_setup first and then run the | |
2734 | * delayed refs to make sure we have the best chance at doing this all | |
2735 | * in one shot. | |
2736 | */ | |
2737 | spin_lock(&cur_trans->dirty_bgs_lock); | |
2738 | while (!list_empty(&cur_trans->dirty_bgs)) { | |
2739 | cache = list_first_entry(&cur_trans->dirty_bgs, | |
32da5386 | 2740 | struct btrfs_block_group, |
77745c05 JB |
2741 | dirty_list); |
2742 | ||
2743 | /* | |
2744 | * This can happen if cache_save_setup re-dirties a block group | |
2745 | * that is already under IO. Just wait for it to finish and | |
2746 | * then do it all again | |
2747 | */ | |
2748 | if (!list_empty(&cache->io_list)) { | |
2749 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
2750 | list_del_init(&cache->io_list); | |
2751 | btrfs_wait_cache_io(trans, cache, path); | |
2752 | btrfs_put_block_group(cache); | |
2753 | spin_lock(&cur_trans->dirty_bgs_lock); | |
2754 | } | |
2755 | ||
2756 | /* | |
2757 | * Don't remove from the dirty list until after we've waited on | |
2758 | * any pending IO | |
2759 | */ | |
2760 | list_del_init(&cache->dirty_list); | |
2761 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
2762 | should_put = 1; | |
2763 | ||
2764 | cache_save_setup(cache, trans, path); | |
2765 | ||
2766 | if (!ret) | |
2767 | ret = btrfs_run_delayed_refs(trans, | |
2768 | (unsigned long) -1); | |
2769 | ||
2770 | if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { | |
2771 | cache->io_ctl.inode = NULL; | |
2772 | ret = btrfs_write_out_cache(trans, cache, path); | |
2773 | if (ret == 0 && cache->io_ctl.inode) { | |
2774 | num_started++; | |
2775 | should_put = 0; | |
2776 | list_add_tail(&cache->io_list, io); | |
2777 | } else { | |
2778 | /* | |
2779 | * If we failed to write the cache, the | |
2780 | * generation will be bad and life goes on | |
2781 | */ | |
2782 | ret = 0; | |
2783 | } | |
2784 | } | |
2785 | if (!ret) { | |
2786 | ret = write_one_cache_group(trans, path, cache); | |
2787 | /* | |
2788 | * One of the free space endio workers might have | |
2789 | * created a new block group while updating a free space | |
2790 | * cache's inode (at inode.c:btrfs_finish_ordered_io()) | |
2791 | * and hasn't released its transaction handle yet, in | |
2792 | * which case the new block group is still attached to | |
2793 | * its transaction handle and its creation has not | |
2794 | * finished yet (no block group item in the extent tree | |
2795 | * yet, etc). If this is the case, wait for all free | |
2796 | * space endio workers to finish and retry. This is a | |
2797 | * a very rare case so no need for a more efficient and | |
2798 | * complex approach. | |
2799 | */ | |
2800 | if (ret == -ENOENT) { | |
2801 | wait_event(cur_trans->writer_wait, | |
2802 | atomic_read(&cur_trans->num_writers) == 1); | |
2803 | ret = write_one_cache_group(trans, path, cache); | |
2804 | } | |
2805 | if (ret) | |
2806 | btrfs_abort_transaction(trans, ret); | |
2807 | } | |
2808 | ||
2809 | /* If its not on the io list, we need to put the block group */ | |
2810 | if (should_put) | |
2811 | btrfs_put_block_group(cache); | |
2812 | btrfs_delayed_refs_rsv_release(fs_info, 1); | |
2813 | spin_lock(&cur_trans->dirty_bgs_lock); | |
2814 | } | |
2815 | spin_unlock(&cur_trans->dirty_bgs_lock); | |
2816 | ||
2817 | /* | |
2818 | * Refer to the definition of io_bgs member for details why it's safe | |
2819 | * to use it without any locking | |
2820 | */ | |
2821 | while (!list_empty(io)) { | |
32da5386 | 2822 | cache = list_first_entry(io, struct btrfs_block_group, |
77745c05 JB |
2823 | io_list); |
2824 | list_del_init(&cache->io_list); | |
2825 | btrfs_wait_cache_io(trans, cache, path); | |
2826 | btrfs_put_block_group(cache); | |
2827 | } | |
2828 | ||
2829 | btrfs_free_path(path); | |
2830 | return ret; | |
2831 | } | |
606d1bf1 JB |
2832 | |
2833 | int btrfs_update_block_group(struct btrfs_trans_handle *trans, | |
2834 | u64 bytenr, u64 num_bytes, int alloc) | |
2835 | { | |
2836 | struct btrfs_fs_info *info = trans->fs_info; | |
32da5386 | 2837 | struct btrfs_block_group *cache = NULL; |
606d1bf1 JB |
2838 | u64 total = num_bytes; |
2839 | u64 old_val; | |
2840 | u64 byte_in_group; | |
2841 | int factor; | |
2842 | int ret = 0; | |
2843 | ||
2844 | /* Block accounting for super block */ | |
2845 | spin_lock(&info->delalloc_root_lock); | |
2846 | old_val = btrfs_super_bytes_used(info->super_copy); | |
2847 | if (alloc) | |
2848 | old_val += num_bytes; | |
2849 | else | |
2850 | old_val -= num_bytes; | |
2851 | btrfs_set_super_bytes_used(info->super_copy, old_val); | |
2852 | spin_unlock(&info->delalloc_root_lock); | |
2853 | ||
2854 | while (total) { | |
2855 | cache = btrfs_lookup_block_group(info, bytenr); | |
2856 | if (!cache) { | |
2857 | ret = -ENOENT; | |
2858 | break; | |
2859 | } | |
2860 | factor = btrfs_bg_type_to_factor(cache->flags); | |
2861 | ||
2862 | /* | |
2863 | * If this block group has free space cache written out, we | |
2864 | * need to make sure to load it if we are removing space. This | |
2865 | * is because we need the unpinning stage to actually add the | |
2866 | * space back to the block group, otherwise we will leak space. | |
2867 | */ | |
32da5386 | 2868 | if (!alloc && !btrfs_block_group_done(cache)) |
606d1bf1 JB |
2869 | btrfs_cache_block_group(cache, 1); |
2870 | ||
b3470b5d DS |
2871 | byte_in_group = bytenr - cache->start; |
2872 | WARN_ON(byte_in_group > cache->length); | |
606d1bf1 JB |
2873 | |
2874 | spin_lock(&cache->space_info->lock); | |
2875 | spin_lock(&cache->lock); | |
2876 | ||
2877 | if (btrfs_test_opt(info, SPACE_CACHE) && | |
2878 | cache->disk_cache_state < BTRFS_DC_CLEAR) | |
2879 | cache->disk_cache_state = BTRFS_DC_CLEAR; | |
2880 | ||
bf38be65 | 2881 | old_val = cache->used; |
b3470b5d | 2882 | num_bytes = min(total, cache->length - byte_in_group); |
606d1bf1 JB |
2883 | if (alloc) { |
2884 | old_val += num_bytes; | |
bf38be65 | 2885 | cache->used = old_val; |
606d1bf1 JB |
2886 | cache->reserved -= num_bytes; |
2887 | cache->space_info->bytes_reserved -= num_bytes; | |
2888 | cache->space_info->bytes_used += num_bytes; | |
2889 | cache->space_info->disk_used += num_bytes * factor; | |
2890 | spin_unlock(&cache->lock); | |
2891 | spin_unlock(&cache->space_info->lock); | |
2892 | } else { | |
2893 | old_val -= num_bytes; | |
bf38be65 | 2894 | cache->used = old_val; |
606d1bf1 JB |
2895 | cache->pinned += num_bytes; |
2896 | btrfs_space_info_update_bytes_pinned(info, | |
2897 | cache->space_info, num_bytes); | |
2898 | cache->space_info->bytes_used -= num_bytes; | |
2899 | cache->space_info->disk_used -= num_bytes * factor; | |
2900 | spin_unlock(&cache->lock); | |
2901 | spin_unlock(&cache->space_info->lock); | |
2902 | ||
606d1bf1 JB |
2903 | percpu_counter_add_batch( |
2904 | &cache->space_info->total_bytes_pinned, | |
2905 | num_bytes, | |
2906 | BTRFS_TOTAL_BYTES_PINNED_BATCH); | |
fe119a6e | 2907 | set_extent_dirty(&trans->transaction->pinned_extents, |
606d1bf1 JB |
2908 | bytenr, bytenr + num_bytes - 1, |
2909 | GFP_NOFS | __GFP_NOFAIL); | |
2910 | } | |
2911 | ||
2912 | spin_lock(&trans->transaction->dirty_bgs_lock); | |
2913 | if (list_empty(&cache->dirty_list)) { | |
2914 | list_add_tail(&cache->dirty_list, | |
2915 | &trans->transaction->dirty_bgs); | |
2916 | trans->delayed_ref_updates++; | |
2917 | btrfs_get_block_group(cache); | |
2918 | } | |
2919 | spin_unlock(&trans->transaction->dirty_bgs_lock); | |
2920 | ||
2921 | /* | |
2922 | * No longer have used bytes in this block group, queue it for | |
2923 | * deletion. We do this after adding the block group to the | |
2924 | * dirty list to avoid races between cleaner kthread and space | |
2925 | * cache writeout. | |
2926 | */ | |
6e80d4f8 DZ |
2927 | if (!alloc && old_val == 0) { |
2928 | if (!btrfs_test_opt(info, DISCARD_ASYNC)) | |
2929 | btrfs_mark_bg_unused(cache); | |
2930 | } | |
606d1bf1 JB |
2931 | |
2932 | btrfs_put_block_group(cache); | |
2933 | total -= num_bytes; | |
2934 | bytenr += num_bytes; | |
2935 | } | |
2936 | ||
2937 | /* Modified block groups are accounted for in the delayed_refs_rsv. */ | |
2938 | btrfs_update_delayed_refs_rsv(trans); | |
2939 | return ret; | |
2940 | } | |
2941 | ||
2942 | /** | |
2943 | * btrfs_add_reserved_bytes - update the block_group and space info counters | |
2944 | * @cache: The cache we are manipulating | |
2945 | * @ram_bytes: The number of bytes of file content, and will be same to | |
2946 | * @num_bytes except for the compress path. | |
2947 | * @num_bytes: The number of bytes in question | |
2948 | * @delalloc: The blocks are allocated for the delalloc write | |
2949 | * | |
2950 | * This is called by the allocator when it reserves space. If this is a | |
2951 | * reservation and the block group has become read only we cannot make the | |
2952 | * reservation and return -EAGAIN, otherwise this function always succeeds. | |
2953 | */ | |
32da5386 | 2954 | int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, |
606d1bf1 JB |
2955 | u64 ram_bytes, u64 num_bytes, int delalloc) |
2956 | { | |
2957 | struct btrfs_space_info *space_info = cache->space_info; | |
2958 | int ret = 0; | |
2959 | ||
2960 | spin_lock(&space_info->lock); | |
2961 | spin_lock(&cache->lock); | |
2962 | if (cache->ro) { | |
2963 | ret = -EAGAIN; | |
2964 | } else { | |
2965 | cache->reserved += num_bytes; | |
2966 | space_info->bytes_reserved += num_bytes; | |
a43c3835 JB |
2967 | trace_btrfs_space_reservation(cache->fs_info, "space_info", |
2968 | space_info->flags, num_bytes, 1); | |
606d1bf1 JB |
2969 | btrfs_space_info_update_bytes_may_use(cache->fs_info, |
2970 | space_info, -ram_bytes); | |
2971 | if (delalloc) | |
2972 | cache->delalloc_bytes += num_bytes; | |
2973 | } | |
2974 | spin_unlock(&cache->lock); | |
2975 | spin_unlock(&space_info->lock); | |
2976 | return ret; | |
2977 | } | |
2978 | ||
2979 | /** | |
2980 | * btrfs_free_reserved_bytes - update the block_group and space info counters | |
2981 | * @cache: The cache we are manipulating | |
2982 | * @num_bytes: The number of bytes in question | |
2983 | * @delalloc: The blocks are allocated for the delalloc write | |
2984 | * | |
2985 | * This is called by somebody who is freeing space that was never actually used | |
2986 | * on disk. For example if you reserve some space for a new leaf in transaction | |
2987 | * A and before transaction A commits you free that leaf, you call this with | |
2988 | * reserve set to 0 in order to clear the reservation. | |
2989 | */ | |
32da5386 | 2990 | void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, |
606d1bf1 JB |
2991 | u64 num_bytes, int delalloc) |
2992 | { | |
2993 | struct btrfs_space_info *space_info = cache->space_info; | |
2994 | ||
2995 | spin_lock(&space_info->lock); | |
2996 | spin_lock(&cache->lock); | |
2997 | if (cache->ro) | |
2998 | space_info->bytes_readonly += num_bytes; | |
2999 | cache->reserved -= num_bytes; | |
3000 | space_info->bytes_reserved -= num_bytes; | |
3001 | space_info->max_extent_size = 0; | |
3002 | ||
3003 | if (delalloc) | |
3004 | cache->delalloc_bytes -= num_bytes; | |
3005 | spin_unlock(&cache->lock); | |
3006 | spin_unlock(&space_info->lock); | |
3007 | } | |
07730d87 JB |
3008 | |
3009 | static void force_metadata_allocation(struct btrfs_fs_info *info) | |
3010 | { | |
3011 | struct list_head *head = &info->space_info; | |
3012 | struct btrfs_space_info *found; | |
3013 | ||
3014 | rcu_read_lock(); | |
3015 | list_for_each_entry_rcu(found, head, list) { | |
3016 | if (found->flags & BTRFS_BLOCK_GROUP_METADATA) | |
3017 | found->force_alloc = CHUNK_ALLOC_FORCE; | |
3018 | } | |
3019 | rcu_read_unlock(); | |
3020 | } | |
3021 | ||
3022 | static int should_alloc_chunk(struct btrfs_fs_info *fs_info, | |
3023 | struct btrfs_space_info *sinfo, int force) | |
3024 | { | |
3025 | u64 bytes_used = btrfs_space_info_used(sinfo, false); | |
3026 | u64 thresh; | |
3027 | ||
3028 | if (force == CHUNK_ALLOC_FORCE) | |
3029 | return 1; | |
3030 | ||
3031 | /* | |
3032 | * in limited mode, we want to have some free space up to | |
3033 | * about 1% of the FS size. | |
3034 | */ | |
3035 | if (force == CHUNK_ALLOC_LIMITED) { | |
3036 | thresh = btrfs_super_total_bytes(fs_info->super_copy); | |
3037 | thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1)); | |
3038 | ||
3039 | if (sinfo->total_bytes - bytes_used < thresh) | |
3040 | return 1; | |
3041 | } | |
3042 | ||
3043 | if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8)) | |
3044 | return 0; | |
3045 | return 1; | |
3046 | } | |
3047 | ||
3048 | int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type) | |
3049 | { | |
3050 | u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); | |
3051 | ||
3052 | return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); | |
3053 | } | |
3054 | ||
3055 | /* | |
3056 | * If force is CHUNK_ALLOC_FORCE: | |
3057 | * - return 1 if it successfully allocates a chunk, | |
3058 | * - return errors including -ENOSPC otherwise. | |
3059 | * If force is NOT CHUNK_ALLOC_FORCE: | |
3060 | * - return 0 if it doesn't need to allocate a new chunk, | |
3061 | * - return 1 if it successfully allocates a chunk, | |
3062 | * - return errors including -ENOSPC otherwise. | |
3063 | */ | |
3064 | int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, | |
3065 | enum btrfs_chunk_alloc_enum force) | |
3066 | { | |
3067 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
3068 | struct btrfs_space_info *space_info; | |
3069 | bool wait_for_alloc = false; | |
3070 | bool should_alloc = false; | |
3071 | int ret = 0; | |
3072 | ||
3073 | /* Don't re-enter if we're already allocating a chunk */ | |
3074 | if (trans->allocating_chunk) | |
3075 | return -ENOSPC; | |
3076 | ||
3077 | space_info = btrfs_find_space_info(fs_info, flags); | |
3078 | ASSERT(space_info); | |
3079 | ||
3080 | do { | |
3081 | spin_lock(&space_info->lock); | |
3082 | if (force < space_info->force_alloc) | |
3083 | force = space_info->force_alloc; | |
3084 | should_alloc = should_alloc_chunk(fs_info, space_info, force); | |
3085 | if (space_info->full) { | |
3086 | /* No more free physical space */ | |
3087 | if (should_alloc) | |
3088 | ret = -ENOSPC; | |
3089 | else | |
3090 | ret = 0; | |
3091 | spin_unlock(&space_info->lock); | |
3092 | return ret; | |
3093 | } else if (!should_alloc) { | |
3094 | spin_unlock(&space_info->lock); | |
3095 | return 0; | |
3096 | } else if (space_info->chunk_alloc) { | |
3097 | /* | |
3098 | * Someone is already allocating, so we need to block | |
3099 | * until this someone is finished and then loop to | |
3100 | * recheck if we should continue with our allocation | |
3101 | * attempt. | |
3102 | */ | |
3103 | wait_for_alloc = true; | |
3104 | spin_unlock(&space_info->lock); | |
3105 | mutex_lock(&fs_info->chunk_mutex); | |
3106 | mutex_unlock(&fs_info->chunk_mutex); | |
3107 | } else { | |
3108 | /* Proceed with allocation */ | |
3109 | space_info->chunk_alloc = 1; | |
3110 | wait_for_alloc = false; | |
3111 | spin_unlock(&space_info->lock); | |
3112 | } | |
3113 | ||
3114 | cond_resched(); | |
3115 | } while (wait_for_alloc); | |
3116 | ||
3117 | mutex_lock(&fs_info->chunk_mutex); | |
3118 | trans->allocating_chunk = true; | |
3119 | ||
3120 | /* | |
3121 | * If we have mixed data/metadata chunks we want to make sure we keep | |
3122 | * allocating mixed chunks instead of individual chunks. | |
3123 | */ | |
3124 | if (btrfs_mixed_space_info(space_info)) | |
3125 | flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA); | |
3126 | ||
3127 | /* | |
3128 | * if we're doing a data chunk, go ahead and make sure that | |
3129 | * we keep a reasonable number of metadata chunks allocated in the | |
3130 | * FS as well. | |
3131 | */ | |
3132 | if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) { | |
3133 | fs_info->data_chunk_allocations++; | |
3134 | if (!(fs_info->data_chunk_allocations % | |
3135 | fs_info->metadata_ratio)) | |
3136 | force_metadata_allocation(fs_info); | |
3137 | } | |
3138 | ||
3139 | /* | |
3140 | * Check if we have enough space in SYSTEM chunk because we may need | |
3141 | * to update devices. | |
3142 | */ | |
3143 | check_system_chunk(trans, flags); | |
3144 | ||
3145 | ret = btrfs_alloc_chunk(trans, flags); | |
3146 | trans->allocating_chunk = false; | |
3147 | ||
3148 | spin_lock(&space_info->lock); | |
3149 | if (ret < 0) { | |
3150 | if (ret == -ENOSPC) | |
3151 | space_info->full = 1; | |
3152 | else | |
3153 | goto out; | |
3154 | } else { | |
3155 | ret = 1; | |
3156 | space_info->max_extent_size = 0; | |
3157 | } | |
3158 | ||
3159 | space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; | |
3160 | out: | |
3161 | space_info->chunk_alloc = 0; | |
3162 | spin_unlock(&space_info->lock); | |
3163 | mutex_unlock(&fs_info->chunk_mutex); | |
3164 | /* | |
3165 | * When we allocate a new chunk we reserve space in the chunk block | |
3166 | * reserve to make sure we can COW nodes/leafs in the chunk tree or | |
3167 | * add new nodes/leafs to it if we end up needing to do it when | |
3168 | * inserting the chunk item and updating device items as part of the | |
3169 | * second phase of chunk allocation, performed by | |
3170 | * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a | |
3171 | * large number of new block groups to create in our transaction | |
3172 | * handle's new_bgs list to avoid exhausting the chunk block reserve | |
3173 | * in extreme cases - like having a single transaction create many new | |
3174 | * block groups when starting to write out the free space caches of all | |
3175 | * the block groups that were made dirty during the lifetime of the | |
3176 | * transaction. | |
3177 | */ | |
3178 | if (trans->chunk_bytes_reserved >= (u64)SZ_2M) | |
3179 | btrfs_create_pending_block_groups(trans); | |
3180 | ||
3181 | return ret; | |
3182 | } | |
3183 | ||
3184 | static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type) | |
3185 | { | |
3186 | u64 num_dev; | |
3187 | ||
3188 | num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max; | |
3189 | if (!num_dev) | |
3190 | num_dev = fs_info->fs_devices->rw_devices; | |
3191 | ||
3192 | return num_dev; | |
3193 | } | |
3194 | ||
3195 | /* | |
a9143bd3 | 3196 | * Reserve space in the system space for allocating or removing a chunk |
07730d87 JB |
3197 | */ |
3198 | void check_system_chunk(struct btrfs_trans_handle *trans, u64 type) | |
3199 | { | |
3200 | struct btrfs_fs_info *fs_info = trans->fs_info; | |
3201 | struct btrfs_space_info *info; | |
3202 | u64 left; | |
3203 | u64 thresh; | |
3204 | int ret = 0; | |
3205 | u64 num_devs; | |
3206 | ||
3207 | /* | |
3208 | * Needed because we can end up allocating a system chunk and for an | |
3209 | * atomic and race free space reservation in the chunk block reserve. | |
3210 | */ | |
3211 | lockdep_assert_held(&fs_info->chunk_mutex); | |
3212 | ||
3213 | info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); | |
3214 | spin_lock(&info->lock); | |
3215 | left = info->total_bytes - btrfs_space_info_used(info, true); | |
3216 | spin_unlock(&info->lock); | |
3217 | ||
3218 | num_devs = get_profile_num_devs(fs_info, type); | |
3219 | ||
3220 | /* num_devs device items to update and 1 chunk item to add or remove */ | |
2bd36e7b JB |
3221 | thresh = btrfs_calc_metadata_size(fs_info, num_devs) + |
3222 | btrfs_calc_insert_metadata_size(fs_info, 1); | |
07730d87 JB |
3223 | |
3224 | if (left < thresh && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { | |
3225 | btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu", | |
3226 | left, thresh, type); | |
3227 | btrfs_dump_space_info(fs_info, info, 0, 0); | |
3228 | } | |
3229 | ||
3230 | if (left < thresh) { | |
3231 | u64 flags = btrfs_system_alloc_profile(fs_info); | |
3232 | ||
3233 | /* | |
3234 | * Ignore failure to create system chunk. We might end up not | |
3235 | * needing it, as we might not need to COW all nodes/leafs from | |
3236 | * the paths we visit in the chunk tree (they were already COWed | |
3237 | * or created in the current transaction for example). | |
3238 | */ | |
3239 | ret = btrfs_alloc_chunk(trans, flags); | |
3240 | } | |
3241 | ||
3242 | if (!ret) { | |
3243 | ret = btrfs_block_rsv_add(fs_info->chunk_root, | |
3244 | &fs_info->chunk_block_rsv, | |
3245 | thresh, BTRFS_RESERVE_NO_FLUSH); | |
3246 | if (!ret) | |
3247 | trans->chunk_bytes_reserved += thresh; | |
3248 | } | |
3249 | } | |
3250 | ||
3e43c279 JB |
3251 | void btrfs_put_block_group_cache(struct btrfs_fs_info *info) |
3252 | { | |
32da5386 | 3253 | struct btrfs_block_group *block_group; |
3e43c279 JB |
3254 | u64 last = 0; |
3255 | ||
3256 | while (1) { | |
3257 | struct inode *inode; | |
3258 | ||
3259 | block_group = btrfs_lookup_first_block_group(info, last); | |
3260 | while (block_group) { | |
3261 | btrfs_wait_block_group_cache_done(block_group); | |
3262 | spin_lock(&block_group->lock); | |
3263 | if (block_group->iref) | |
3264 | break; | |
3265 | spin_unlock(&block_group->lock); | |
3266 | block_group = btrfs_next_block_group(block_group); | |
3267 | } | |
3268 | if (!block_group) { | |
3269 | if (last == 0) | |
3270 | break; | |
3271 | last = 0; | |
3272 | continue; | |
3273 | } | |
3274 | ||
3275 | inode = block_group->inode; | |
3276 | block_group->iref = 0; | |
3277 | block_group->inode = NULL; | |
3278 | spin_unlock(&block_group->lock); | |
3279 | ASSERT(block_group->io_ctl.inode == NULL); | |
3280 | iput(inode); | |
b3470b5d | 3281 | last = block_group->start + block_group->length; |
3e43c279 JB |
3282 | btrfs_put_block_group(block_group); |
3283 | } | |
3284 | } | |
3285 | ||
3286 | /* | |
3287 | * Must be called only after stopping all workers, since we could have block | |
3288 | * group caching kthreads running, and therefore they could race with us if we | |
3289 | * freed the block groups before stopping them. | |
3290 | */ | |
3291 | int btrfs_free_block_groups(struct btrfs_fs_info *info) | |
3292 | { | |
32da5386 | 3293 | struct btrfs_block_group *block_group; |
3e43c279 JB |
3294 | struct btrfs_space_info *space_info; |
3295 | struct btrfs_caching_control *caching_ctl; | |
3296 | struct rb_node *n; | |
3297 | ||
3298 | down_write(&info->commit_root_sem); | |
3299 | while (!list_empty(&info->caching_block_groups)) { | |
3300 | caching_ctl = list_entry(info->caching_block_groups.next, | |
3301 | struct btrfs_caching_control, list); | |
3302 | list_del(&caching_ctl->list); | |
3303 | btrfs_put_caching_control(caching_ctl); | |
3304 | } | |
3305 | up_write(&info->commit_root_sem); | |
3306 | ||
3307 | spin_lock(&info->unused_bgs_lock); | |
3308 | while (!list_empty(&info->unused_bgs)) { | |
3309 | block_group = list_first_entry(&info->unused_bgs, | |
32da5386 | 3310 | struct btrfs_block_group, |
3e43c279 JB |
3311 | bg_list); |
3312 | list_del_init(&block_group->bg_list); | |
3313 | btrfs_put_block_group(block_group); | |
3314 | } | |
3315 | spin_unlock(&info->unused_bgs_lock); | |
3316 | ||
3317 | spin_lock(&info->block_group_cache_lock); | |
3318 | while ((n = rb_last(&info->block_group_cache_tree)) != NULL) { | |
32da5386 | 3319 | block_group = rb_entry(n, struct btrfs_block_group, |
3e43c279 JB |
3320 | cache_node); |
3321 | rb_erase(&block_group->cache_node, | |
3322 | &info->block_group_cache_tree); | |
3323 | RB_CLEAR_NODE(&block_group->cache_node); | |
3324 | spin_unlock(&info->block_group_cache_lock); | |
3325 | ||
3326 | down_write(&block_group->space_info->groups_sem); | |
3327 | list_del(&block_group->list); | |
3328 | up_write(&block_group->space_info->groups_sem); | |
3329 | ||
3330 | /* | |
3331 | * We haven't cached this block group, which means we could | |
3332 | * possibly have excluded extents on this block group. | |
3333 | */ | |
3334 | if (block_group->cached == BTRFS_CACHE_NO || | |
3335 | block_group->cached == BTRFS_CACHE_ERROR) | |
3336 | btrfs_free_excluded_extents(block_group); | |
3337 | ||
3338 | btrfs_remove_free_space_cache(block_group); | |
3339 | ASSERT(block_group->cached != BTRFS_CACHE_STARTED); | |
3340 | ASSERT(list_empty(&block_group->dirty_list)); | |
3341 | ASSERT(list_empty(&block_group->io_list)); | |
3342 | ASSERT(list_empty(&block_group->bg_list)); | |
3343 | ASSERT(atomic_read(&block_group->count) == 1); | |
3344 | btrfs_put_block_group(block_group); | |
3345 | ||
3346 | spin_lock(&info->block_group_cache_lock); | |
3347 | } | |
3348 | spin_unlock(&info->block_group_cache_lock); | |
3349 | ||
3350 | /* | |
3351 | * Now that all the block groups are freed, go through and free all the | |
3352 | * space_info structs. This is only called during the final stages of | |
3353 | * unmount, and so we know nobody is using them. We call | |
3354 | * synchronize_rcu() once before we start, just to be on the safe side. | |
3355 | */ | |
3356 | synchronize_rcu(); | |
3357 | ||
3358 | btrfs_release_global_block_rsv(info); | |
3359 | ||
3360 | while (!list_empty(&info->space_info)) { | |
3361 | space_info = list_entry(info->space_info.next, | |
3362 | struct btrfs_space_info, | |
3363 | list); | |
3364 | ||
3365 | /* | |
3366 | * Do not hide this behind enospc_debug, this is actually | |
3367 | * important and indicates a real bug if this happens. | |
3368 | */ | |
3369 | if (WARN_ON(space_info->bytes_pinned > 0 || | |
3370 | space_info->bytes_reserved > 0 || | |
3371 | space_info->bytes_may_use > 0)) | |
3372 | btrfs_dump_space_info(info, space_info, 0, 0); | |
3373 | list_del(&space_info->list); | |
3374 | btrfs_sysfs_remove_space_info(space_info); | |
3375 | } | |
3376 | return 0; | |
3377 | } |