1 // SPDX-License-Identifier: GPL-2.0
5 #include "space-info.h"
8 #include "free-space-cache.h"
9 #include "ordered-data.h"
10 #include "transaction.h"
11 #include "block-group.h"
13 #include "accessors.h"
14 #include "extent-tree.h"
17 * HOW DOES SPACE RESERVATION WORK
19 * If you want to know about delalloc specifically, there is a separate comment
20 * for that with the delalloc code. This comment is about how the whole system
25 * 1) space_info. This is the ultimate arbiter of how much space we can use.
26 * There's a description of the bytes_ fields with the struct declaration,
27 * refer to that for specifics on each field. Suffice it to say that for
28 * reservations we care about total_bytes - SUM(space_info->bytes_) when
29 * determining if there is space to make an allocation. There is a space_info
30 * for METADATA, SYSTEM, and DATA areas.
32 * 2) block_rsv's. These are basically buckets for every different type of
33 * metadata reservation we have. You can see the comment in the block_rsv
34 * code on the rules for each type, but generally block_rsv->reserved is how
35 * much space is accounted for in space_info->bytes_may_use.
37 * 3) btrfs_calc*_size. These are the worst case calculations we used based
38 * on the number of items we will want to modify. We have one for changing
39 * items, and one for inserting new items. Generally we use these helpers to
40 * determine the size of the block reserves, and then use the actual bytes
41 * values to adjust the space_info counters.
43 * MAKING RESERVATIONS, THE NORMAL CASE
45 * We call into either btrfs_reserve_data_bytes() or
46 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
47 * num_bytes we want to reserve.
50 * space_info->bytes_may_reserve += num_bytes
53 * Call btrfs_add_reserved_bytes() which does
54 * space_info->bytes_may_reserve -= num_bytes
55 * space_info->bytes_reserved += extent_bytes
58 * Call btrfs_update_block_group() which does
59 * space_info->bytes_reserved -= extent_bytes
60 * space_info->bytes_used += extent_bytes
62 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
64 * Assume we are unable to simply make the reservation because we do not have
68 * create a reserve_ticket with ->bytes set to our reservation, add it to
69 * the tail of space_info->tickets, kick async flush thread
71 * ->handle_reserve_ticket
72 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
75 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
76 * Flushes various things attempting to free up space.
78 * -> btrfs_try_granting_tickets()
79 * This is called by anything that either subtracts space from
80 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
81 * space_info->total_bytes. This loops through the ->priority_tickets and
82 * then the ->tickets list checking to see if the reservation can be
83 * completed. If it can the space is added to space_info->bytes_may_use and
84 * the ticket is woken up.
87 * Check if ->bytes == 0, if it does we got our reservation and we can carry
88 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we
91 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
93 * Same as the above, except we add ourselves to the
94 * space_info->priority_tickets, and we do not use ticket->wait, we simply
95 * call flush_space() ourselves for the states that are safe for us to call
96 * without deadlocking and hope for the best.
100 * Generally speaking we will have two cases for each state, a "nice" state
101 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to
102 * reduce the locking over head on the various trees, and even to keep from
103 * doing any work at all in the case of delayed refs. Each of these delayed
104 * things however hold reservations, and so letting them run allows us to
105 * reclaim space so we can make new reservations.
107 * FLUSH_DELAYED_ITEMS
108 * Every inode has a delayed item to update the inode. Take a simple write
109 * for example, we would update the inode item at write time to update the
110 * mtime, and then again at finish_ordered_io() time in order to update the
111 * isize or bytes. We keep these delayed items to coalesce these operations
112 * into a single operation done on demand. These are an easy way to reclaim
116 * Look at the delalloc comment to get an idea of how much space is reserved
117 * for delayed allocation. We can reclaim some of this space simply by
118 * running delalloc, but usually we need to wait for ordered extents to
119 * reclaim the bulk of this space.
122 * We have a block reserve for the outstanding delayed refs space, and every
123 * delayed ref operation holds a reservation. Running these is a quick way
124 * to reclaim space, but we want to hold this until the end because COW can
125 * churn a lot and we can avoid making some extent tree modifications if we
126 * are able to delay for as long as possible.
129 * We will skip this the first time through space reservation, because of
130 * overcommit and we don't want to have a lot of useless metadata space when
131 * our worst case reservations will likely never come true.
134 * If we're freeing inodes we're likely freeing checksums, file extent
135 * items, and extent tree items. Loads of space could be freed up by these
136 * operations, however they won't be usable until the transaction commits.
139 * This will commit the transaction. Historically we had a lot of logic
140 * surrounding whether or not we'd commit the transaction, but this waits born
141 * out of a pre-tickets era where we could end up committing the transaction
142 * thousands of times in a row without making progress. Now thanks to our
143 * ticketing system we know if we're not making progress and can error
144 * everybody out after a few commits rather than burning the disk hoping for
145 * a different answer.
149 * Because we hold so many reservations for metadata we will allow you to
150 * reserve more space than is currently free in the currently allocate
151 * metadata space. This only happens with metadata, data does not allow
154 * You can see the current logic for when we allow overcommit in
155 * btrfs_can_overcommit(), but it only applies to unallocated space. If there
156 * is no unallocated space to be had, all reservations are kept within the
157 * free space in the allocated metadata chunks.
159 * Because of overcommitting, you generally want to use the
160 * btrfs_can_overcommit() logic for metadata allocations, as it does the right
161 * thing with or without extra unallocated space.
164 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
165 bool may_use_included)
168 return s_info->bytes_used + s_info->bytes_reserved +
169 s_info->bytes_pinned + s_info->bytes_readonly +
170 s_info->bytes_zone_unusable +
171 (may_use_included ? s_info->bytes_may_use : 0);
175 * after adding space to the filesystem, we need to clear the full flags
176 * on all the space infos.
178 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
180 struct list_head *head = &info->space_info;
181 struct btrfs_space_info *found;
183 list_for_each_entry(found, head, list)
188 * Block groups with more than this value (percents) of unusable space will be
189 * scheduled for background reclaim.
191 #define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH (75)
194 * Calculate chunk size depending on volume type (regular or zoned).
196 static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
198 if (btrfs_is_zoned(fs_info))
199 return fs_info->zone_size;
201 ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
203 if (flags & BTRFS_BLOCK_GROUP_DATA)
204 return BTRFS_MAX_DATA_CHUNK_SIZE;
205 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
208 /* Handle BTRFS_BLOCK_GROUP_METADATA */
209 if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G)
216 * Update default chunk size.
218 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
221 WRITE_ONCE(space_info->chunk_size, chunk_size);
224 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
227 struct btrfs_space_info *space_info;
231 space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
235 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
236 INIT_LIST_HEAD(&space_info->block_groups[i]);
237 init_rwsem(&space_info->groups_sem);
238 spin_lock_init(&space_info->lock);
239 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
240 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
241 INIT_LIST_HEAD(&space_info->ro_bgs);
242 INIT_LIST_HEAD(&space_info->tickets);
243 INIT_LIST_HEAD(&space_info->priority_tickets);
244 space_info->clamp = 1;
245 btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags));
247 if (btrfs_is_zoned(info))
248 space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;
250 ret = btrfs_sysfs_add_space_info_type(info, space_info);
254 list_add(&space_info->list, &info->space_info);
255 if (flags & BTRFS_BLOCK_GROUP_DATA)
256 info->data_sinfo = space_info;
261 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
263 struct btrfs_super_block *disk_super;
269 disk_super = fs_info->super_copy;
270 if (!btrfs_super_root(disk_super))
273 features = btrfs_super_incompat_flags(disk_super);
274 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
277 flags = BTRFS_BLOCK_GROUP_SYSTEM;
278 ret = create_space_info(fs_info, flags);
283 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
284 ret = create_space_info(fs_info, flags);
286 flags = BTRFS_BLOCK_GROUP_METADATA;
287 ret = create_space_info(fs_info, flags);
291 flags = BTRFS_BLOCK_GROUP_DATA;
292 ret = create_space_info(fs_info, flags);
298 void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
299 struct btrfs_block_group *block_group)
301 struct btrfs_space_info *found;
304 factor = btrfs_bg_type_to_factor(block_group->flags);
306 found = btrfs_find_space_info(info, block_group->flags);
308 spin_lock(&found->lock);
309 found->total_bytes += block_group->length;
310 found->disk_total += block_group->length * factor;
311 found->bytes_used += block_group->used;
312 found->disk_used += block_group->used * factor;
313 found->bytes_readonly += block_group->bytes_super;
314 found->bytes_zone_unusable += block_group->zone_unusable;
315 if (block_group->length > 0)
317 btrfs_try_granting_tickets(info, found);
318 spin_unlock(&found->lock);
320 block_group->space_info = found;
322 index = btrfs_bg_flags_to_raid_index(block_group->flags);
323 down_write(&found->groups_sem);
324 list_add_tail(&block_group->list, &found->block_groups[index]);
325 up_write(&found->groups_sem);
328 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
331 struct list_head *head = &info->space_info;
332 struct btrfs_space_info *found;
334 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
336 list_for_each_entry(found, head, list) {
337 if (found->flags & flags)
343 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
344 struct btrfs_space_info *space_info,
345 enum btrfs_reserve_flush_enum flush)
347 struct btrfs_space_info *data_sinfo;
353 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
354 profile = btrfs_system_alloc_profile(fs_info);
356 profile = btrfs_metadata_alloc_profile(fs_info);
358 avail = atomic64_read(&fs_info->free_chunk_space);
361 * If we have dup, raid1 or raid10 then only half of the free
362 * space is actually usable. For raid56, the space info used
363 * doesn't include the parity drive, so we don't have to
366 factor = btrfs_bg_type_to_factor(profile);
367 avail = div_u64(avail, factor);
372 * Calculate the data_chunk_size, space_info->chunk_size is the
373 * "optimal" chunk size based on the fs size. However when we actually
374 * allocate the chunk we will strip this down further, making it no more
375 * than 10% of the disk or 1G, whichever is smaller.
377 * On the zoned mode, we need to use zone_size (=
378 * data_sinfo->chunk_size) as it is.
380 data_sinfo = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
381 if (!btrfs_is_zoned(fs_info)) {
382 data_chunk_size = min(data_sinfo->chunk_size,
383 mult_perc(fs_info->fs_devices->total_rw_bytes, 10));
384 data_chunk_size = min_t(u64, data_chunk_size, SZ_1G);
386 data_chunk_size = data_sinfo->chunk_size;
390 * Since data allocations immediately use block groups as part of the
391 * reservation, because we assume that data reservations will == actual
392 * usage, we could potentially overcommit and then immediately have that
393 * available space used by a data allocation, which could put us in a
394 * bind when we get close to filling the file system.
396 * To handle this simply remove the data_chunk_size from the available
397 * space. If we are relatively empty this won't affect our ability to
398 * overcommit much, and if we're very close to full it'll keep us from
399 * getting into a position where we've given ourselves very little
400 * metadata wiggle room.
402 if (avail <= data_chunk_size)
404 avail -= data_chunk_size;
407 * If we aren't flushing all things, let us overcommit up to
408 * 1/2th of the space. If we can flush, don't let us overcommit
409 * too much, let it overcommit up to 1/8 of the space.
411 if (flush == BTRFS_RESERVE_FLUSH_ALL)
417 * On the zoned mode, we always allocate one zone as one chunk.
418 * Returning non-zone size alingned bytes here will result in
419 * less pressure for the async metadata reclaim process, and it
420 * will over-commit too much leading to ENOSPC. Align down to the
421 * zone size to avoid that.
423 if (btrfs_is_zoned(fs_info))
424 avail = ALIGN_DOWN(avail, fs_info->zone_size);
429 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
430 struct btrfs_space_info *space_info, u64 bytes,
431 enum btrfs_reserve_flush_enum flush)
436 /* Don't overcommit when in mixed mode */
437 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
440 used = btrfs_space_info_used(space_info, true);
441 avail = calc_available_free_space(fs_info, space_info, flush);
443 if (used + bytes < space_info->total_bytes + avail)
448 static void remove_ticket(struct btrfs_space_info *space_info,
449 struct reserve_ticket *ticket)
451 if (!list_empty(&ticket->list)) {
452 list_del_init(&ticket->list);
453 ASSERT(space_info->reclaim_size >= ticket->bytes);
454 space_info->reclaim_size -= ticket->bytes;
459 * This is for space we already have accounted in space_info->bytes_may_use, so
460 * basically when we're returning space from block_rsv's.
462 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
463 struct btrfs_space_info *space_info)
465 struct list_head *head;
466 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
468 lockdep_assert_held(&space_info->lock);
470 head = &space_info->priority_tickets;
472 while (!list_empty(head)) {
473 struct reserve_ticket *ticket;
474 u64 used = btrfs_space_info_used(space_info, true);
476 ticket = list_first_entry(head, struct reserve_ticket, list);
478 /* Check and see if our ticket can be satisfied now. */
479 if ((used + ticket->bytes <= space_info->total_bytes) ||
480 btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
482 btrfs_space_info_update_bytes_may_use(fs_info,
485 remove_ticket(space_info, ticket);
487 space_info->tickets_id++;
488 wake_up(&ticket->wait);
494 if (head == &space_info->priority_tickets) {
495 head = &space_info->tickets;
496 flush = BTRFS_RESERVE_FLUSH_ALL;
501 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \
503 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
504 spin_lock(&__rsv->lock); \
505 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \
506 __rsv->size, __rsv->reserved); \
507 spin_unlock(&__rsv->lock); \
510 static const char *space_info_flag_to_str(const struct btrfs_space_info *space_info)
512 switch (space_info->flags) {
513 case BTRFS_BLOCK_GROUP_SYSTEM:
515 case BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA:
516 return "DATA+METADATA";
517 case BTRFS_BLOCK_GROUP_DATA:
519 case BTRFS_BLOCK_GROUP_METADATA:
526 static void dump_global_block_rsv(struct btrfs_fs_info *fs_info)
528 DUMP_BLOCK_RSV(fs_info, global_block_rsv);
529 DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
530 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
531 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
532 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
535 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
536 struct btrfs_space_info *info)
538 const char *flag_str = space_info_flag_to_str(info);
539 lockdep_assert_held(&info->lock);
541 /* The free space could be negative in case of overcommit */
542 btrfs_info(fs_info, "space_info %s has %lld free, is %sfull",
544 (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
545 info->full ? "" : "not ");
547 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
548 info->total_bytes, info->bytes_used, info->bytes_pinned,
549 info->bytes_reserved, info->bytes_may_use,
550 info->bytes_readonly, info->bytes_zone_unusable);
553 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
554 struct btrfs_space_info *info, u64 bytes,
555 int dump_block_groups)
557 struct btrfs_block_group *cache;
561 spin_lock(&info->lock);
562 __btrfs_dump_space_info(fs_info, info);
563 dump_global_block_rsv(fs_info);
564 spin_unlock(&info->lock);
566 if (!dump_block_groups)
569 down_read(&info->groups_sem);
571 list_for_each_entry(cache, &info->block_groups[index], list) {
574 spin_lock(&cache->lock);
575 avail = cache->length - cache->used - cache->pinned -
576 cache->reserved - cache->delalloc_bytes -
577 cache->bytes_super - cache->zone_unusable;
579 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s",
580 cache->start, cache->length, cache->used, cache->pinned,
581 cache->reserved, cache->delalloc_bytes,
582 cache->bytes_super, cache->zone_unusable,
583 avail, cache->ro ? "[readonly]" : "");
584 spin_unlock(&cache->lock);
585 btrfs_dump_free_space(cache, bytes);
586 total_avail += avail;
588 if (++index < BTRFS_NR_RAID_TYPES)
590 up_read(&info->groups_sem);
592 btrfs_info(fs_info, "%llu bytes available across all block groups", total_avail);
595 static inline u64 calc_reclaim_items_nr(const struct btrfs_fs_info *fs_info,
601 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
602 nr = div64_u64(to_reclaim, bytes);
608 #define EXTENT_SIZE_PER_ITEM SZ_256K
611 * shrink metadata reservation for delalloc
613 static void shrink_delalloc(struct btrfs_fs_info *fs_info,
614 struct btrfs_space_info *space_info,
615 u64 to_reclaim, bool wait_ordered,
618 struct btrfs_trans_handle *trans;
625 delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
626 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
627 if (delalloc_bytes == 0 && ordered_bytes == 0)
630 /* Calc the number of the pages we need flush for space reservation */
631 if (to_reclaim == U64_MAX) {
635 * to_reclaim is set to however much metadata we need to
636 * reclaim, but reclaiming that much data doesn't really track
637 * exactly. What we really want to do is reclaim full inode's
638 * worth of reservations, however that's not available to us
639 * here. We will take a fraction of the delalloc bytes for our
640 * flushing loops and hope for the best. Delalloc will expand
641 * the amount we write to cover an entire dirty extent, which
642 * will reclaim the metadata reservation for that range. If
643 * it's not enough subsequent flush stages will be more
646 to_reclaim = max(to_reclaim, delalloc_bytes >> 3);
647 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
650 trans = current->journal_info;
653 * If we are doing more ordered than delalloc we need to just wait on
654 * ordered extents, otherwise we'll waste time trying to flush delalloc
655 * that likely won't give us the space back we need.
657 if (ordered_bytes > delalloc_bytes && !for_preempt)
661 while ((delalloc_bytes || ordered_bytes) && loops < 3) {
662 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
663 long nr_pages = min_t(u64, temp, LONG_MAX);
666 btrfs_start_delalloc_roots(fs_info, nr_pages, true);
669 * We need to make sure any outstanding async pages are now
670 * processed before we continue. This is because things like
671 * sync_inode() try to be smart and skip writing if the inode is
672 * marked clean. We don't use filemap_fwrite for flushing
673 * because we want to control how many pages we write out at a
674 * time, thus this is the only safe way to make sure we've
675 * waited for outstanding compressed workers to have started
676 * their jobs and thus have ordered extents set up properly.
678 * This exists because we do not want to wait for each
679 * individual inode to finish its async work, we simply want to
680 * start the IO on everybody, and then come back here and wait
681 * for all of the async work to catch up. Once we're done with
682 * that we know we'll have ordered extents for everything and we
683 * can decide if we wait for that or not.
685 * If we choose to replace this in the future, make absolutely
686 * sure that the proper waiting is being done in the async case,
687 * as there have been bugs in that area before.
689 async_pages = atomic_read(&fs_info->async_delalloc_pages);
694 * We don't want to wait forever, if we wrote less pages in this
695 * loop than we have outstanding, only wait for that number of
696 * pages, otherwise we can wait for all async pages to finish
699 if (async_pages > nr_pages)
700 async_pages -= nr_pages;
703 wait_event(fs_info->async_submit_wait,
704 atomic_read(&fs_info->async_delalloc_pages) <=
708 if (wait_ordered && !trans) {
709 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
711 time_left = schedule_timeout_killable(1);
717 * If we are for preemption we just want a one-shot of delalloc
718 * flushing so we can stop flushing if we decide we don't need
724 spin_lock(&space_info->lock);
725 if (list_empty(&space_info->tickets) &&
726 list_empty(&space_info->priority_tickets)) {
727 spin_unlock(&space_info->lock);
730 spin_unlock(&space_info->lock);
732 delalloc_bytes = percpu_counter_sum_positive(
733 &fs_info->delalloc_bytes);
734 ordered_bytes = percpu_counter_sum_positive(
735 &fs_info->ordered_bytes);
740 * Try to flush some data based on policy set by @state. This is only advisory
741 * and may fail for various reasons. The caller is supposed to examine the
742 * state of @space_info to detect the outcome.
744 static void flush_space(struct btrfs_fs_info *fs_info,
745 struct btrfs_space_info *space_info, u64 num_bytes,
746 enum btrfs_flush_state state, bool for_preempt)
748 struct btrfs_root *root = fs_info->tree_root;
749 struct btrfs_trans_handle *trans;
754 case FLUSH_DELAYED_ITEMS_NR:
755 case FLUSH_DELAYED_ITEMS:
756 if (state == FLUSH_DELAYED_ITEMS_NR)
757 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
761 trans = btrfs_join_transaction_nostart(root);
763 ret = PTR_ERR(trans);
768 ret = btrfs_run_delayed_items_nr(trans, nr);
769 btrfs_end_transaction(trans);
772 case FLUSH_DELALLOC_WAIT:
773 case FLUSH_DELALLOC_FULL:
774 if (state == FLUSH_DELALLOC_FULL)
776 shrink_delalloc(fs_info, space_info, num_bytes,
777 state != FLUSH_DELALLOC, for_preempt);
779 case FLUSH_DELAYED_REFS_NR:
780 case FLUSH_DELAYED_REFS:
781 trans = btrfs_join_transaction_nostart(root);
783 ret = PTR_ERR(trans);
788 if (state == FLUSH_DELAYED_REFS_NR)
789 btrfs_run_delayed_refs(trans, num_bytes);
791 btrfs_run_delayed_refs(trans, 0);
792 btrfs_end_transaction(trans);
795 case ALLOC_CHUNK_FORCE:
796 trans = btrfs_join_transaction(root);
798 ret = PTR_ERR(trans);
801 ret = btrfs_chunk_alloc(trans,
802 btrfs_get_alloc_profile(fs_info, space_info->flags),
803 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
805 btrfs_end_transaction(trans);
807 if (ret > 0 || ret == -ENOSPC)
810 case RUN_DELAYED_IPUTS:
812 * If we have pending delayed iputs then we could free up a
813 * bunch of pinned space, so make sure we run the iputs before
814 * we do our pinned bytes check below.
816 btrfs_run_delayed_iputs(fs_info);
817 btrfs_wait_on_delayed_iputs(fs_info);
820 ASSERT(current->journal_info == NULL);
822 * We don't want to start a new transaction, just attach to the
823 * current one or wait it fully commits in case its commit is
824 * happening at the moment. Note: we don't use a nostart join
825 * because that does not wait for a transaction to fully commit
826 * (only for it to be unblocked, state TRANS_STATE_UNBLOCKED).
828 trans = btrfs_attach_transaction_barrier(root);
830 ret = PTR_ERR(trans);
835 ret = btrfs_commit_transaction(trans);
842 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
848 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
849 struct btrfs_space_info *space_info)
853 u64 to_reclaim = space_info->reclaim_size;
855 lockdep_assert_held(&space_info->lock);
857 avail = calc_available_free_space(fs_info, space_info,
858 BTRFS_RESERVE_FLUSH_ALL);
859 used = btrfs_space_info_used(space_info, true);
862 * We may be flushing because suddenly we have less space than we had
863 * before, and now we're well over-committed based on our current free
864 * space. If that's the case add in our overage so we make sure to put
865 * appropriate pressure on the flushing state machine.
867 if (space_info->total_bytes + avail < used)
868 to_reclaim += used - (space_info->total_bytes + avail);
873 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
874 struct btrfs_space_info *space_info)
876 const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv);
877 u64 ordered, delalloc;
881 thresh = mult_perc(space_info->total_bytes, 90);
883 lockdep_assert_held(&space_info->lock);
885 /* If we're just plain full then async reclaim just slows us down. */
886 if ((space_info->bytes_used + space_info->bytes_reserved +
887 global_rsv_size) >= thresh)
890 used = space_info->bytes_may_use + space_info->bytes_pinned;
892 /* The total flushable belongs to the global rsv, don't flush. */
893 if (global_rsv_size >= used)
897 * 128MiB is 1/4 of the maximum global rsv size. If we have less than
898 * that devoted to other reservations then there's no sense in flushing,
899 * we don't have a lot of things that need flushing.
901 if (used - global_rsv_size <= SZ_128M)
905 * We have tickets queued, bail so we don't compete with the async
908 if (space_info->reclaim_size)
912 * If we have over half of the free space occupied by reservations or
913 * pinned then we want to start flushing.
915 * We do not do the traditional thing here, which is to say
917 * if (used >= ((total_bytes + avail) / 2))
920 * because this doesn't quite work how we want. If we had more than 50%
921 * of the space_info used by bytes_used and we had 0 available we'd just
922 * constantly run the background flusher. Instead we want it to kick in
923 * if our reclaimable space exceeds our clamped free space.
925 * Our clamping range is 2^1 -> 2^8. Practically speaking that means
928 * Amount of RAM Minimum threshold Maximum threshold
931 * 128GiB 512MiB 64GiB
936 * These are the range our thresholds will fall in, corresponding to how
937 * much delalloc we need for the background flusher to kick in.
940 thresh = calc_available_free_space(fs_info, space_info,
941 BTRFS_RESERVE_FLUSH_ALL);
942 used = space_info->bytes_used + space_info->bytes_reserved +
943 space_info->bytes_readonly + global_rsv_size;
944 if (used < space_info->total_bytes)
945 thresh += space_info->total_bytes - used;
946 thresh >>= space_info->clamp;
948 used = space_info->bytes_pinned;
951 * If we have more ordered bytes than delalloc bytes then we're either
952 * doing a lot of DIO, or we simply don't have a lot of delalloc waiting
953 * around. Preemptive flushing is only useful in that it can free up
954 * space before tickets need to wait for things to finish. In the case
955 * of ordered extents, preemptively waiting on ordered extents gets us
956 * nothing, if our reservations are tied up in ordered extents we'll
957 * simply have to slow down writers by forcing them to wait on ordered
960 * In the case that ordered is larger than delalloc, only include the
961 * block reserves that we would actually be able to directly reclaim
962 * from. In this case if we're heavy on metadata operations this will
963 * clearly be heavy enough to warrant preemptive flushing. In the case
964 * of heavy DIO or ordered reservations, preemptive flushing will just
965 * waste time and cause us to slow down.
967 * We want to make sure we truly are maxed out on ordered however, so
968 * cut ordered in half, and if it's still higher than delalloc then we
969 * can keep flushing. This is to avoid the case where we start
970 * flushing, and now delalloc == ordered and we stop preemptively
971 * flushing when we could still have several gigs of delalloc to flush.
973 ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
974 delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
975 if (ordered >= delalloc)
976 used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) +
977 btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv);
979 used += space_info->bytes_may_use - global_rsv_size;
981 return (used >= thresh && !btrfs_fs_closing(fs_info) &&
982 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
985 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
986 struct btrfs_space_info *space_info,
987 struct reserve_ticket *ticket)
989 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
995 if (global_rsv->space_info != space_info)
998 spin_lock(&global_rsv->lock);
999 min_bytes = mult_perc(global_rsv->size, 10);
1000 if (global_rsv->reserved < min_bytes + ticket->bytes) {
1001 spin_unlock(&global_rsv->lock);
1004 global_rsv->reserved -= ticket->bytes;
1005 remove_ticket(space_info, ticket);
1007 wake_up(&ticket->wait);
1008 space_info->tickets_id++;
1009 if (global_rsv->reserved < global_rsv->size)
1010 global_rsv->full = 0;
1011 spin_unlock(&global_rsv->lock);
1017 * We've exhausted our flushing, start failing tickets.
1019 * @fs_info - fs_info for this fs
1020 * @space_info - the space info we were flushing
1022 * We call this when we've exhausted our flushing ability and haven't made
1023 * progress in satisfying tickets. The reservation code handles tickets in
1024 * order, so if there is a large ticket first and then smaller ones we could
1025 * very well satisfy the smaller tickets. This will attempt to wake up any
1026 * tickets in the list to catch this case.
1028 * This function returns true if it was able to make progress by clearing out
1029 * other tickets, or if it stumbles across a ticket that was smaller than the
1032 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
1033 struct btrfs_space_info *space_info)
1035 struct reserve_ticket *ticket;
1036 u64 tickets_id = space_info->tickets_id;
1037 const bool aborted = BTRFS_FS_ERROR(fs_info);
1039 trace_btrfs_fail_all_tickets(fs_info, space_info);
1041 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1042 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
1043 __btrfs_dump_space_info(fs_info, space_info);
1046 while (!list_empty(&space_info->tickets) &&
1047 tickets_id == space_info->tickets_id) {
1048 ticket = list_first_entry(&space_info->tickets,
1049 struct reserve_ticket, list);
1051 if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket))
1054 if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1055 btrfs_info(fs_info, "failing ticket with %llu bytes",
1058 remove_ticket(space_info, ticket);
1060 ticket->error = -EIO;
1062 ticket->error = -ENOSPC;
1063 wake_up(&ticket->wait);
1066 * We're just throwing tickets away, so more flushing may not
1067 * trip over btrfs_try_granting_tickets, so we need to call it
1068 * here to see if we can make progress with the next ticket in
1072 btrfs_try_granting_tickets(fs_info, space_info);
1074 return (tickets_id != space_info->tickets_id);
1078 * This is for normal flushers, we can wait all goddamned day if we want to. We
1079 * will loop and continuously try to flush as long as we are making progress.
1080 * We count progress as clearing off tickets each time we have to loop.
1082 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
1084 struct btrfs_fs_info *fs_info;
1085 struct btrfs_space_info *space_info;
1087 enum btrfs_flush_state flush_state;
1088 int commit_cycles = 0;
1089 u64 last_tickets_id;
1091 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
1092 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1094 spin_lock(&space_info->lock);
1095 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1097 space_info->flush = 0;
1098 spin_unlock(&space_info->lock);
1101 last_tickets_id = space_info->tickets_id;
1102 spin_unlock(&space_info->lock);
1104 flush_state = FLUSH_DELAYED_ITEMS_NR;
1106 flush_space(fs_info, space_info, to_reclaim, flush_state, false);
1107 spin_lock(&space_info->lock);
1108 if (list_empty(&space_info->tickets)) {
1109 space_info->flush = 0;
1110 spin_unlock(&space_info->lock);
1113 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
1115 if (last_tickets_id == space_info->tickets_id) {
1118 last_tickets_id = space_info->tickets_id;
1119 flush_state = FLUSH_DELAYED_ITEMS_NR;
1125 * We do not want to empty the system of delalloc unless we're
1126 * under heavy pressure, so allow one trip through the flushing
1127 * logic before we start doing a FLUSH_DELALLOC_FULL.
1129 if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles)
1133 * We don't want to force a chunk allocation until we've tried
1134 * pretty hard to reclaim space. Think of the case where we
1135 * freed up a bunch of space and so have a lot of pinned space
1136 * to reclaim. We would rather use that than possibly create a
1137 * underutilized metadata chunk. So if this is our first run
1138 * through the flushing state machine skip ALLOC_CHUNK_FORCE and
1139 * commit the transaction. If nothing has changed the next go
1140 * around then we can force a chunk allocation.
1142 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
1145 if (flush_state > COMMIT_TRANS) {
1147 if (commit_cycles > 2) {
1148 if (maybe_fail_all_tickets(fs_info, space_info)) {
1149 flush_state = FLUSH_DELAYED_ITEMS_NR;
1152 space_info->flush = 0;
1155 flush_state = FLUSH_DELAYED_ITEMS_NR;
1158 spin_unlock(&space_info->lock);
1159 } while (flush_state <= COMMIT_TRANS);
1163 * This handles pre-flushing of metadata space before we get to the point that
1164 * we need to start blocking threads on tickets. The logic here is different
1165 * from the other flush paths because it doesn't rely on tickets to tell us how
1166 * much we need to flush, instead it attempts to keep us below the 80% full
1167 * watermark of space by flushing whichever reservation pool is currently the
1170 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
1172 struct btrfs_fs_info *fs_info;
1173 struct btrfs_space_info *space_info;
1174 struct btrfs_block_rsv *delayed_block_rsv;
1175 struct btrfs_block_rsv *delayed_refs_rsv;
1176 struct btrfs_block_rsv *global_rsv;
1177 struct btrfs_block_rsv *trans_rsv;
1180 fs_info = container_of(work, struct btrfs_fs_info,
1181 preempt_reclaim_work);
1182 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1183 delayed_block_rsv = &fs_info->delayed_block_rsv;
1184 delayed_refs_rsv = &fs_info->delayed_refs_rsv;
1185 global_rsv = &fs_info->global_block_rsv;
1186 trans_rsv = &fs_info->trans_block_rsv;
1188 spin_lock(&space_info->lock);
1189 while (need_preemptive_reclaim(fs_info, space_info)) {
1190 enum btrfs_flush_state flush;
1191 u64 delalloc_size = 0;
1192 u64 to_reclaim, block_rsv_size;
1193 const u64 global_rsv_size = btrfs_block_rsv_reserved(global_rsv);
1198 * We don't have a precise counter for the metadata being
1199 * reserved for delalloc, so we'll approximate it by subtracting
1200 * out the block rsv's space from the bytes_may_use. If that
1201 * amount is higher than the individual reserves, then we can
1202 * assume it's tied up in delalloc reservations.
1204 block_rsv_size = global_rsv_size +
1205 btrfs_block_rsv_reserved(delayed_block_rsv) +
1206 btrfs_block_rsv_reserved(delayed_refs_rsv) +
1207 btrfs_block_rsv_reserved(trans_rsv);
1208 if (block_rsv_size < space_info->bytes_may_use)
1209 delalloc_size = space_info->bytes_may_use - block_rsv_size;
1212 * We don't want to include the global_rsv in our calculation,
1213 * because that's space we can't touch. Subtract it from the
1214 * block_rsv_size for the next checks.
1216 block_rsv_size -= global_rsv_size;
1219 * We really want to avoid flushing delalloc too much, as it
1220 * could result in poor allocation patterns, so only flush it if
1221 * it's larger than the rest of the pools combined.
1223 if (delalloc_size > block_rsv_size) {
1224 to_reclaim = delalloc_size;
1225 flush = FLUSH_DELALLOC;
1226 } else if (space_info->bytes_pinned >
1227 (btrfs_block_rsv_reserved(delayed_block_rsv) +
1228 btrfs_block_rsv_reserved(delayed_refs_rsv))) {
1229 to_reclaim = space_info->bytes_pinned;
1230 flush = COMMIT_TRANS;
1231 } else if (btrfs_block_rsv_reserved(delayed_block_rsv) >
1232 btrfs_block_rsv_reserved(delayed_refs_rsv)) {
1233 to_reclaim = btrfs_block_rsv_reserved(delayed_block_rsv);
1234 flush = FLUSH_DELAYED_ITEMS_NR;
1236 to_reclaim = btrfs_block_rsv_reserved(delayed_refs_rsv);
1237 flush = FLUSH_DELAYED_REFS_NR;
1240 spin_unlock(&space_info->lock);
1243 * We don't want to reclaim everything, just a portion, so scale
1244 * down the to_reclaim by 1/4. If it takes us down to 0,
1245 * reclaim 1 items worth.
1249 to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
1250 flush_space(fs_info, space_info, to_reclaim, flush, true);
1252 spin_lock(&space_info->lock);
1255 /* We only went through once, back off our clamping. */
1256 if (loops == 1 && !space_info->reclaim_size)
1257 space_info->clamp = max(1, space_info->clamp - 1);
1258 trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
1259 spin_unlock(&space_info->lock);
1263 * FLUSH_DELALLOC_WAIT:
1264 * Space is freed from flushing delalloc in one of two ways.
1266 * 1) compression is on and we allocate less space than we reserved
1267 * 2) we are overwriting existing space
1269 * For #1 that extra space is reclaimed as soon as the delalloc pages are
1270 * COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent
1271 * length to ->bytes_reserved, and subtracts the reserved space from
1274 * For #2 this is trickier. Once the ordered extent runs we will drop the
1275 * extent in the range we are overwriting, which creates a delayed ref for
1276 * that freed extent. This however is not reclaimed until the transaction
1277 * commits, thus the next stages.
1280 * If we are freeing inodes, we want to make sure all delayed iputs have
1281 * completed, because they could have been on an inode with i_nlink == 0, and
1282 * thus have been truncated and freed up space. But again this space is not
1283 * immediately re-usable, it comes in the form of a delayed ref, which must be
1284 * run and then the transaction must be committed.
1287 * This is where we reclaim all of the pinned space generated by running the
1291 * For data we start with alloc chunk force, however we could have been full
1292 * before, and then the transaction commit could have freed new block groups,
1293 * so if we now have space to allocate do the force chunk allocation.
1295 static const enum btrfs_flush_state data_flush_states[] = {
1296 FLUSH_DELALLOC_FULL,
1302 static void btrfs_async_reclaim_data_space(struct work_struct *work)
1304 struct btrfs_fs_info *fs_info;
1305 struct btrfs_space_info *space_info;
1306 u64 last_tickets_id;
1307 enum btrfs_flush_state flush_state = 0;
1309 fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
1310 space_info = fs_info->data_sinfo;
1312 spin_lock(&space_info->lock);
1313 if (list_empty(&space_info->tickets)) {
1314 space_info->flush = 0;
1315 spin_unlock(&space_info->lock);
1318 last_tickets_id = space_info->tickets_id;
1319 spin_unlock(&space_info->lock);
1321 while (!space_info->full) {
1322 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1323 spin_lock(&space_info->lock);
1324 if (list_empty(&space_info->tickets)) {
1325 space_info->flush = 0;
1326 spin_unlock(&space_info->lock);
1330 /* Something happened, fail everything and bail. */
1331 if (BTRFS_FS_ERROR(fs_info))
1333 last_tickets_id = space_info->tickets_id;
1334 spin_unlock(&space_info->lock);
1337 while (flush_state < ARRAY_SIZE(data_flush_states)) {
1338 flush_space(fs_info, space_info, U64_MAX,
1339 data_flush_states[flush_state], false);
1340 spin_lock(&space_info->lock);
1341 if (list_empty(&space_info->tickets)) {
1342 space_info->flush = 0;
1343 spin_unlock(&space_info->lock);
1347 if (last_tickets_id == space_info->tickets_id) {
1350 last_tickets_id = space_info->tickets_id;
1354 if (flush_state >= ARRAY_SIZE(data_flush_states)) {
1355 if (space_info->full) {
1356 if (maybe_fail_all_tickets(fs_info, space_info))
1359 space_info->flush = 0;
1364 /* Something happened, fail everything and bail. */
1365 if (BTRFS_FS_ERROR(fs_info))
1369 spin_unlock(&space_info->lock);
1374 maybe_fail_all_tickets(fs_info, space_info);
1375 space_info->flush = 0;
1376 spin_unlock(&space_info->lock);
1379 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
1381 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
1382 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
1383 INIT_WORK(&fs_info->preempt_reclaim_work,
1384 btrfs_preempt_reclaim_metadata_space);
1387 static const enum btrfs_flush_state priority_flush_states[] = {
1388 FLUSH_DELAYED_ITEMS_NR,
1389 FLUSH_DELAYED_ITEMS,
1393 static const enum btrfs_flush_state evict_flush_states[] = {
1394 FLUSH_DELAYED_ITEMS_NR,
1395 FLUSH_DELAYED_ITEMS,
1396 FLUSH_DELAYED_REFS_NR,
1399 FLUSH_DELALLOC_WAIT,
1400 FLUSH_DELALLOC_FULL,
1405 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
1406 struct btrfs_space_info *space_info,
1407 struct reserve_ticket *ticket,
1408 const enum btrfs_flush_state *states,
1412 int flush_state = 0;
1414 spin_lock(&space_info->lock);
1415 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1417 * This is the priority reclaim path, so to_reclaim could be >0 still
1418 * because we may have only satisfied the priority tickets and still
1419 * left non priority tickets on the list. We would then have
1420 * to_reclaim but ->bytes == 0.
1422 if (ticket->bytes == 0) {
1423 spin_unlock(&space_info->lock);
1427 while (flush_state < states_nr) {
1428 spin_unlock(&space_info->lock);
1429 flush_space(fs_info, space_info, to_reclaim, states[flush_state],
1432 spin_lock(&space_info->lock);
1433 if (ticket->bytes == 0) {
1434 spin_unlock(&space_info->lock);
1440 * Attempt to steal from the global rsv if we can, except if the fs was
1441 * turned into error mode due to a transaction abort when flushing space
1442 * above, in that case fail with the abort error instead of returning
1443 * success to the caller if we can steal from the global rsv - this is
1444 * just to have caller fail immeditelly instead of later when trying to
1445 * modify the fs, making it easier to debug -ENOSPC problems.
1447 if (BTRFS_FS_ERROR(fs_info)) {
1448 ticket->error = BTRFS_FS_ERROR(fs_info);
1449 remove_ticket(space_info, ticket);
1450 } else if (!steal_from_global_rsv(fs_info, space_info, ticket)) {
1451 ticket->error = -ENOSPC;
1452 remove_ticket(space_info, ticket);
1456 * We must run try_granting_tickets here because we could be a large
1457 * ticket in front of a smaller ticket that can now be satisfied with
1458 * the available space.
1460 btrfs_try_granting_tickets(fs_info, space_info);
1461 spin_unlock(&space_info->lock);
1464 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
1465 struct btrfs_space_info *space_info,
1466 struct reserve_ticket *ticket)
1468 spin_lock(&space_info->lock);
1470 /* We could have been granted before we got here. */
1471 if (ticket->bytes == 0) {
1472 spin_unlock(&space_info->lock);
1476 while (!space_info->full) {
1477 spin_unlock(&space_info->lock);
1478 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1479 spin_lock(&space_info->lock);
1480 if (ticket->bytes == 0) {
1481 spin_unlock(&space_info->lock);
1486 ticket->error = -ENOSPC;
1487 remove_ticket(space_info, ticket);
1488 btrfs_try_granting_tickets(fs_info, space_info);
1489 spin_unlock(&space_info->lock);
1492 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
1493 struct btrfs_space_info *space_info,
1494 struct reserve_ticket *ticket)
1500 spin_lock(&space_info->lock);
1501 while (ticket->bytes > 0 && ticket->error == 0) {
1502 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
1505 * Delete us from the list. After we unlock the space
1506 * info, we don't want the async reclaim job to reserve
1507 * space for this ticket. If that would happen, then the
1508 * ticket's task would not known that space was reserved
1509 * despite getting an error, resulting in a space leak
1510 * (bytes_may_use counter of our space_info).
1512 remove_ticket(space_info, ticket);
1513 ticket->error = -EINTR;
1516 spin_unlock(&space_info->lock);
1520 finish_wait(&ticket->wait, &wait);
1521 spin_lock(&space_info->lock);
1523 spin_unlock(&space_info->lock);
1527 * Do the appropriate flushing and waiting for a ticket.
1529 * @fs_info: the filesystem
1530 * @space_info: space info for the reservation
1531 * @ticket: ticket for the reservation
1532 * @start_ns: timestamp when the reservation started
1533 * @orig_bytes: amount of bytes originally reserved
1534 * @flush: how much we can flush
1536 * This does the work of figuring out how to flush for the ticket, waiting for
1537 * the reservation, and returning the appropriate error if there is one.
1539 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
1540 struct btrfs_space_info *space_info,
1541 struct reserve_ticket *ticket,
1542 u64 start_ns, u64 orig_bytes,
1543 enum btrfs_reserve_flush_enum flush)
1548 case BTRFS_RESERVE_FLUSH_DATA:
1549 case BTRFS_RESERVE_FLUSH_ALL:
1550 case BTRFS_RESERVE_FLUSH_ALL_STEAL:
1551 wait_reserve_ticket(fs_info, space_info, ticket);
1553 case BTRFS_RESERVE_FLUSH_LIMIT:
1554 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1555 priority_flush_states,
1556 ARRAY_SIZE(priority_flush_states));
1558 case BTRFS_RESERVE_FLUSH_EVICT:
1559 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1561 ARRAY_SIZE(evict_flush_states));
1563 case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
1564 priority_reclaim_data_space(fs_info, space_info, ticket);
1571 ret = ticket->error;
1572 ASSERT(list_empty(&ticket->list));
1574 * Check that we can't have an error set if the reservation succeeded,
1575 * as that would confuse tasks and lead them to error out without
1576 * releasing reserved space (if an error happens the expectation is that
1577 * space wasn't reserved at all).
1579 ASSERT(!(ticket->bytes == 0 && ticket->error));
1580 trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
1581 start_ns, flush, ticket->error);
1586 * This returns true if this flush state will go through the ordinary flushing
1589 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
1591 return (flush == BTRFS_RESERVE_FLUSH_ALL) ||
1592 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1595 static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
1596 struct btrfs_space_info *space_info)
1598 u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
1599 u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
1602 * If we're heavy on ordered operations then clamping won't help us. We
1603 * need to clamp specifically to keep up with dirty'ing buffered
1604 * writers, because there's not a 1:1 correlation of writing delalloc
1605 * and freeing space, like there is with flushing delayed refs or
1606 * delayed nodes. If we're already more ordered than delalloc then
1607 * we're keeping up, otherwise we aren't and should probably clamp.
1609 if (ordered < delalloc)
1610 space_info->clamp = min(space_info->clamp + 1, 8);
1613 static inline bool can_steal(enum btrfs_reserve_flush_enum flush)
1615 return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1616 flush == BTRFS_RESERVE_FLUSH_EVICT);
1620 * NO_FLUSH and FLUSH_EMERGENCY don't want to create a ticket, they just want to
1621 * fail as quickly as possible.
1623 static inline bool can_ticket(enum btrfs_reserve_flush_enum flush)
1625 return (flush != BTRFS_RESERVE_NO_FLUSH &&
1626 flush != BTRFS_RESERVE_FLUSH_EMERGENCY);
1630 * Try to reserve bytes from the block_rsv's space.
1632 * @fs_info: the filesystem
1633 * @space_info: space info we want to allocate from
1634 * @orig_bytes: number of bytes we want
1635 * @flush: whether or not we can flush to make our reservation
1637 * This will reserve orig_bytes number of bytes from the space info associated
1638 * with the block_rsv. If there is not enough space it will make an attempt to
1639 * flush out space to make room. It will do this by flushing delalloc if
1640 * possible or committing the transaction. If flush is 0 then no attempts to
1641 * regain reservations will be made and this will fail if there is not enough
1644 static int __reserve_bytes(struct btrfs_fs_info *fs_info,
1645 struct btrfs_space_info *space_info, u64 orig_bytes,
1646 enum btrfs_reserve_flush_enum flush)
1648 struct work_struct *async_work;
1649 struct reserve_ticket ticket;
1653 bool pending_tickets;
1657 * If have a transaction handle (current->journal_info != NULL), then
1658 * the flush method can not be neither BTRFS_RESERVE_FLUSH_ALL* nor
1659 * BTRFS_RESERVE_FLUSH_EVICT, as we could deadlock because those
1660 * flushing methods can trigger transaction commits.
1662 if (current->journal_info) {
1663 /* One assert per line for easier debugging. */
1664 ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL);
1665 ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL_STEAL);
1666 ASSERT(flush != BTRFS_RESERVE_FLUSH_EVICT);
1669 if (flush == BTRFS_RESERVE_FLUSH_DATA)
1670 async_work = &fs_info->async_data_reclaim_work;
1672 async_work = &fs_info->async_reclaim_work;
1674 spin_lock(&space_info->lock);
1675 used = btrfs_space_info_used(space_info, true);
1678 * We don't want NO_FLUSH allocations to jump everybody, they can
1679 * generally handle ENOSPC in a different way, so treat them the same as
1680 * normal flushers when it comes to skipping pending tickets.
1682 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
1683 pending_tickets = !list_empty(&space_info->tickets) ||
1684 !list_empty(&space_info->priority_tickets);
1686 pending_tickets = !list_empty(&space_info->priority_tickets);
1689 * Carry on if we have enough space (short-circuit) OR call
1690 * can_overcommit() to ensure we can overcommit to continue.
1692 if (!pending_tickets &&
1693 ((used + orig_bytes <= space_info->total_bytes) ||
1694 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
1695 btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1701 * Things are dire, we need to make a reservation so we don't abort. We
1702 * will let this reservation go through as long as we have actual space
1703 * left to allocate for the block.
1705 if (ret && unlikely(flush == BTRFS_RESERVE_FLUSH_EMERGENCY)) {
1706 used = btrfs_space_info_used(space_info, false);
1707 if (used + orig_bytes <= space_info->total_bytes) {
1708 btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1715 * If we couldn't make a reservation then setup our reservation ticket
1716 * and kick the async worker if it's not already running.
1718 * If we are a priority flusher then we just need to add our ticket to
1719 * the list and we will do our own flushing further down.
1721 if (ret && can_ticket(flush)) {
1722 ticket.bytes = orig_bytes;
1724 space_info->reclaim_size += ticket.bytes;
1725 init_waitqueue_head(&ticket.wait);
1726 ticket.steal = can_steal(flush);
1727 if (trace_btrfs_reserve_ticket_enabled())
1728 start_ns = ktime_get_ns();
1730 if (flush == BTRFS_RESERVE_FLUSH_ALL ||
1731 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1732 flush == BTRFS_RESERVE_FLUSH_DATA) {
1733 list_add_tail(&ticket.list, &space_info->tickets);
1734 if (!space_info->flush) {
1736 * We were forced to add a reserve ticket, so
1737 * our preemptive flushing is unable to keep
1738 * up. Clamp down on the threshold for the
1739 * preemptive flushing in order to keep up with
1742 maybe_clamp_preempt(fs_info, space_info);
1744 space_info->flush = 1;
1745 trace_btrfs_trigger_flush(fs_info,
1749 queue_work(system_unbound_wq, async_work);
1752 list_add_tail(&ticket.list,
1753 &space_info->priority_tickets);
1755 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1757 * We will do the space reservation dance during log replay,
1758 * which means we won't have fs_info->fs_root set, so don't do
1759 * the async reclaim as we will panic.
1761 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1762 !work_busy(&fs_info->preempt_reclaim_work) &&
1763 need_preemptive_reclaim(fs_info, space_info)) {
1764 trace_btrfs_trigger_flush(fs_info, space_info->flags,
1765 orig_bytes, flush, "preempt");
1766 queue_work(system_unbound_wq,
1767 &fs_info->preempt_reclaim_work);
1770 spin_unlock(&space_info->lock);
1771 if (!ret || !can_ticket(flush))
1774 return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
1779 * Try to reserve metadata bytes from the block_rsv's space.
1781 * @fs_info: the filesystem
1782 * @space_info: the space_info we're allocating for
1783 * @orig_bytes: number of bytes we want
1784 * @flush: whether or not we can flush to make our reservation
1786 * This will reserve orig_bytes number of bytes from the space info associated
1787 * with the block_rsv. If there is not enough space it will make an attempt to
1788 * flush out space to make room. It will do this by flushing delalloc if
1789 * possible or committing the transaction. If flush is 0 then no attempts to
1790 * regain reservations will be made and this will fail if there is not enough
1793 int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
1794 struct btrfs_space_info *space_info,
1796 enum btrfs_reserve_flush_enum flush)
1800 ret = __reserve_bytes(fs_info, space_info, orig_bytes, flush);
1801 if (ret == -ENOSPC) {
1802 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1803 space_info->flags, orig_bytes, 1);
1805 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1806 btrfs_dump_space_info(fs_info, space_info, orig_bytes, 0);
1812 * Try to reserve data bytes for an allocation.
1814 * @fs_info: the filesystem
1815 * @bytes: number of bytes we need
1816 * @flush: how we are allowed to flush
1818 * This will reserve bytes from the data space info. If there is not enough
1819 * space then we will attempt to flush space as specified by flush.
1821 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
1822 enum btrfs_reserve_flush_enum flush)
1824 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
1827 ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
1828 flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE ||
1829 flush == BTRFS_RESERVE_NO_FLUSH);
1830 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);
1832 ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush);
1833 if (ret == -ENOSPC) {
1834 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1835 data_sinfo->flags, bytes, 1);
1836 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1837 btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0);
1842 /* Dump all the space infos when we abort a transaction due to ENOSPC. */
1843 __cold void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info)
1845 struct btrfs_space_info *space_info;
1847 btrfs_info(fs_info, "dumping space info:");
1848 list_for_each_entry(space_info, &fs_info->space_info, list) {
1849 spin_lock(&space_info->lock);
1850 __btrfs_dump_space_info(fs_info, space_info);
1851 spin_unlock(&space_info->lock);
1853 dump_global_block_rsv(fs_info);
1857 * Account the unused space of all the readonly block group in the space_info.
1858 * takes mirrors into account.
1860 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
1862 struct btrfs_block_group *block_group;
1866 /* It's df, we don't care if it's racy */
1867 if (list_empty(&sinfo->ro_bgs))
1870 spin_lock(&sinfo->lock);
1871 list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
1872 spin_lock(&block_group->lock);
1874 if (!block_group->ro) {
1875 spin_unlock(&block_group->lock);
1879 factor = btrfs_bg_type_to_factor(block_group->flags);
1880 free_bytes += (block_group->length -
1881 block_group->used) * factor;
1883 spin_unlock(&block_group->lock);
1885 spin_unlock(&sinfo->lock);