1 // SPDX-License-Identifier: GPL-2.0
5 #include "space-info.h"
8 #include "free-space-cache.h"
9 #include "ordered-data.h"
10 #include "transaction.h"
11 #include "block-group.h"
14 * HOW DOES SPACE RESERVATION WORK
16 * If you want to know about delalloc specifically, there is a separate comment
17 * for that with the delalloc code. This comment is about how the whole system
22 * 1) space_info. This is the ultimate arbiter of how much space we can use.
23 * There's a description of the bytes_ fields with the struct declaration,
24 * refer to that for specifics on each field. Suffice it to say that for
25 * reservations we care about total_bytes - SUM(space_info->bytes_) when
26 * determining if there is space to make an allocation. There is a space_info
27 * for METADATA, SYSTEM, and DATA areas.
29 * 2) block_rsv's. These are basically buckets for every different type of
30 * metadata reservation we have. You can see the comment in the block_rsv
31 * code on the rules for each type, but generally block_rsv->reserved is how
32 * much space is accounted for in space_info->bytes_may_use.
34 * 3) btrfs_calc*_size. These are the worst case calculations we used based
35 * on the number of items we will want to modify. We have one for changing
36 * items, and one for inserting new items. Generally we use these helpers to
37 * determine the size of the block reserves, and then use the actual bytes
38 * values to adjust the space_info counters.
40 * MAKING RESERVATIONS, THE NORMAL CASE
42 * We call into either btrfs_reserve_data_bytes() or
43 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
44 * num_bytes we want to reserve.
47 * space_info->bytes_may_reserve += num_bytes
50 * Call btrfs_add_reserved_bytes() which does
51 * space_info->bytes_may_reserve -= num_bytes
52 * space_info->bytes_reserved += extent_bytes
55 * Call btrfs_update_block_group() which does
56 * space_info->bytes_reserved -= extent_bytes
57 * space_info->bytes_used += extent_bytes
59 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
61 * Assume we are unable to simply make the reservation because we do not have
65 * create a reserve_ticket with ->bytes set to our reservation, add it to
66 * the tail of space_info->tickets, kick async flush thread
68 * ->handle_reserve_ticket
69 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
72 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
73 * Flushes various things attempting to free up space.
75 * -> btrfs_try_granting_tickets()
76 * This is called by anything that either subtracts space from
77 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
78 * space_info->total_bytes. This loops through the ->priority_tickets and
79 * then the ->tickets list checking to see if the reservation can be
80 * completed. If it can the space is added to space_info->bytes_may_use and
81 * the ticket is woken up.
84 * Check if ->bytes == 0, if it does we got our reservation and we can carry
85 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we
88 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
90 * Same as the above, except we add ourselves to the
91 * space_info->priority_tickets, and we do not use ticket->wait, we simply
92 * call flush_space() ourselves for the states that are safe for us to call
93 * without deadlocking and hope for the best.
97 * Generally speaking we will have two cases for each state, a "nice" state
98 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to
99 * reduce the locking over head on the various trees, and even to keep from
100 * doing any work at all in the case of delayed refs. Each of these delayed
101 * things however hold reservations, and so letting them run allows us to
102 * reclaim space so we can make new reservations.
104 * FLUSH_DELAYED_ITEMS
105 * Every inode has a delayed item to update the inode. Take a simple write
106 * for example, we would update the inode item at write time to update the
107 * mtime, and then again at finish_ordered_io() time in order to update the
108 * isize or bytes. We keep these delayed items to coalesce these operations
109 * into a single operation done on demand. These are an easy way to reclaim
113 * Look at the delalloc comment to get an idea of how much space is reserved
114 * for delayed allocation. We can reclaim some of this space simply by
115 * running delalloc, but usually we need to wait for ordered extents to
116 * reclaim the bulk of this space.
119 * We have a block reserve for the outstanding delayed refs space, and every
120 * delayed ref operation holds a reservation. Running these is a quick way
121 * to reclaim space, but we want to hold this until the end because COW can
122 * churn a lot and we can avoid making some extent tree modifications if we
123 * are able to delay for as long as possible.
126 * We will skip this the first time through space reservation, because of
127 * overcommit and we don't want to have a lot of useless metadata space when
128 * our worst case reservations will likely never come true.
131 * If we're freeing inodes we're likely freeing checksums, file extent
132 * items, and extent tree items. Loads of space could be freed up by these
133 * operations, however they won't be usable until the transaction commits.
136 * may_commit_transaction() is the ultimate arbiter on whether we commit the
137 * transaction or not. In order to avoid constantly churning we do all the
138 * above flushing first and then commit the transaction as the last resort.
139 * However we need to take into account things like pinned space that would
140 * be freed, plus any delayed work we may not have gotten rid of in the case
145 * Because we hold so many reservations for metadata we will allow you to
146 * reserve more space than is currently free in the currently allocate
147 * metadata space. This only happens with metadata, data does not allow
150 * You can see the current logic for when we allow overcommit in
151 * btrfs_can_overcommit(), but it only applies to unallocated space. If there
152 * is no unallocated space to be had, all reservations are kept within the
153 * free space in the allocated metadata chunks.
155 * Because of overcommitting, you generally want to use the
156 * btrfs_can_overcommit() logic for metadata allocations, as it does the right
157 * thing with or without extra unallocated space.
160 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
161 bool may_use_included)
164 return s_info->bytes_used + s_info->bytes_reserved +
165 s_info->bytes_pinned + s_info->bytes_readonly +
166 (may_use_included ? s_info->bytes_may_use : 0);
170 * after adding space to the filesystem, we need to clear the full flags
171 * on all the space infos.
173 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
175 struct list_head *head = &info->space_info;
176 struct btrfs_space_info *found;
179 list_for_each_entry_rcu(found, head, list)
184 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
187 struct btrfs_space_info *space_info;
191 space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
195 ret = percpu_counter_init(&space_info->total_bytes_pinned, 0,
202 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
203 INIT_LIST_HEAD(&space_info->block_groups[i]);
204 init_rwsem(&space_info->groups_sem);
205 spin_lock_init(&space_info->lock);
206 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
207 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
208 INIT_LIST_HEAD(&space_info->ro_bgs);
209 INIT_LIST_HEAD(&space_info->tickets);
210 INIT_LIST_HEAD(&space_info->priority_tickets);
212 ret = btrfs_sysfs_add_space_info_type(info, space_info);
216 list_add_rcu(&space_info->list, &info->space_info);
217 if (flags & BTRFS_BLOCK_GROUP_DATA)
218 info->data_sinfo = space_info;
223 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
225 struct btrfs_super_block *disk_super;
231 disk_super = fs_info->super_copy;
232 if (!btrfs_super_root(disk_super))
235 features = btrfs_super_incompat_flags(disk_super);
236 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
239 flags = BTRFS_BLOCK_GROUP_SYSTEM;
240 ret = create_space_info(fs_info, flags);
245 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
246 ret = create_space_info(fs_info, flags);
248 flags = BTRFS_BLOCK_GROUP_METADATA;
249 ret = create_space_info(fs_info, flags);
253 flags = BTRFS_BLOCK_GROUP_DATA;
254 ret = create_space_info(fs_info, flags);
260 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
261 u64 total_bytes, u64 bytes_used,
263 struct btrfs_space_info **space_info)
265 struct btrfs_space_info *found;
268 factor = btrfs_bg_type_to_factor(flags);
270 found = btrfs_find_space_info(info, flags);
272 spin_lock(&found->lock);
273 found->total_bytes += total_bytes;
274 found->disk_total += total_bytes * factor;
275 found->bytes_used += bytes_used;
276 found->disk_used += bytes_used * factor;
277 found->bytes_readonly += bytes_readonly;
280 btrfs_try_granting_tickets(info, found);
281 spin_unlock(&found->lock);
285 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
288 struct list_head *head = &info->space_info;
289 struct btrfs_space_info *found;
291 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
294 list_for_each_entry_rcu(found, head, list) {
295 if (found->flags & flags) {
304 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
306 return (global->size << 1);
309 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
310 struct btrfs_space_info *space_info,
311 enum btrfs_reserve_flush_enum flush)
317 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
318 profile = btrfs_system_alloc_profile(fs_info);
320 profile = btrfs_metadata_alloc_profile(fs_info);
322 avail = atomic64_read(&fs_info->free_chunk_space);
325 * If we have dup, raid1 or raid10 then only half of the free
326 * space is actually usable. For raid56, the space info used
327 * doesn't include the parity drive, so we don't have to
330 factor = btrfs_bg_type_to_factor(profile);
331 avail = div_u64(avail, factor);
334 * If we aren't flushing all things, let us overcommit up to
335 * 1/2th of the space. If we can flush, don't let us overcommit
336 * too much, let it overcommit up to 1/8 of the space.
338 if (flush == BTRFS_RESERVE_FLUSH_ALL)
345 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
346 struct btrfs_space_info *space_info, u64 bytes,
347 enum btrfs_reserve_flush_enum flush)
352 /* Don't overcommit when in mixed mode */
353 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
356 used = btrfs_space_info_used(space_info, true);
357 avail = calc_available_free_space(fs_info, space_info, flush);
359 if (used + bytes < space_info->total_bytes + avail)
364 static void remove_ticket(struct btrfs_space_info *space_info,
365 struct reserve_ticket *ticket)
367 if (!list_empty(&ticket->list)) {
368 list_del_init(&ticket->list);
369 ASSERT(space_info->reclaim_size >= ticket->bytes);
370 space_info->reclaim_size -= ticket->bytes;
375 * This is for space we already have accounted in space_info->bytes_may_use, so
376 * basically when we're returning space from block_rsv's.
378 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
379 struct btrfs_space_info *space_info)
381 struct list_head *head;
382 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
384 lockdep_assert_held(&space_info->lock);
386 head = &space_info->priority_tickets;
388 while (!list_empty(head)) {
389 struct reserve_ticket *ticket;
390 u64 used = btrfs_space_info_used(space_info, true);
392 ticket = list_first_entry(head, struct reserve_ticket, list);
394 /* Check and see if our ticket can be satisified now. */
395 if ((used + ticket->bytes <= space_info->total_bytes) ||
396 btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
398 btrfs_space_info_update_bytes_may_use(fs_info,
401 remove_ticket(space_info, ticket);
403 space_info->tickets_id++;
404 wake_up(&ticket->wait);
410 if (head == &space_info->priority_tickets) {
411 head = &space_info->tickets;
412 flush = BTRFS_RESERVE_FLUSH_ALL;
417 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \
419 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
420 spin_lock(&__rsv->lock); \
421 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \
422 __rsv->size, __rsv->reserved); \
423 spin_unlock(&__rsv->lock); \
426 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
427 struct btrfs_space_info *info)
429 lockdep_assert_held(&info->lock);
431 btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
433 info->total_bytes - btrfs_space_info_used(info, true),
434 info->full ? "" : "not ");
436 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
437 info->total_bytes, info->bytes_used, info->bytes_pinned,
438 info->bytes_reserved, info->bytes_may_use,
439 info->bytes_readonly);
441 DUMP_BLOCK_RSV(fs_info, global_block_rsv);
442 DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
443 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
444 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
445 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
449 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
450 struct btrfs_space_info *info, u64 bytes,
451 int dump_block_groups)
453 struct btrfs_block_group *cache;
456 spin_lock(&info->lock);
457 __btrfs_dump_space_info(fs_info, info);
458 spin_unlock(&info->lock);
460 if (!dump_block_groups)
463 down_read(&info->groups_sem);
465 list_for_each_entry(cache, &info->block_groups[index], list) {
466 spin_lock(&cache->lock);
468 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
469 cache->start, cache->length, cache->used, cache->pinned,
470 cache->reserved, cache->ro ? "[readonly]" : "");
471 spin_unlock(&cache->lock);
472 btrfs_dump_free_space(cache, bytes);
474 if (++index < BTRFS_NR_RAID_TYPES)
476 up_read(&info->groups_sem);
479 static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info *fs_info,
480 unsigned long nr_pages, u64 nr_items)
482 struct super_block *sb = fs_info->sb;
484 if (down_read_trylock(&sb->s_umount)) {
485 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
486 up_read(&sb->s_umount);
489 * We needn't worry the filesystem going from r/w to r/o though
490 * we don't acquire ->s_umount mutex, because the filesystem
491 * should guarantee the delalloc inodes list be empty after
492 * the filesystem is readonly(all dirty pages are written to
495 btrfs_start_delalloc_roots(fs_info, nr_items);
496 if (!current->journal_info)
497 btrfs_wait_ordered_roots(fs_info, nr_items, 0, (u64)-1);
501 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
507 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
508 nr = div64_u64(to_reclaim, bytes);
514 #define EXTENT_SIZE_PER_ITEM SZ_256K
517 * shrink metadata reservation for delalloc
519 static void shrink_delalloc(struct btrfs_fs_info *fs_info,
520 struct btrfs_space_info *space_info,
521 u64 to_reclaim, bool wait_ordered)
523 struct btrfs_trans_handle *trans;
529 unsigned long nr_pages;
532 /* Calc the number of the pages we need flush for space reservation */
533 if (to_reclaim == U64_MAX) {
537 * to_reclaim is set to however much metadata we need to
538 * reclaim, but reclaiming that much data doesn't really track
539 * exactly, so increase the amount to reclaim by 2x in order to
540 * make sure we're flushing enough delalloc to hopefully reclaim
541 * some metadata reservations.
543 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
544 to_reclaim = items * EXTENT_SIZE_PER_ITEM;
547 trans = (struct btrfs_trans_handle *)current->journal_info;
549 delalloc_bytes = percpu_counter_sum_positive(
550 &fs_info->delalloc_bytes);
551 dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes);
552 if (delalloc_bytes == 0 && dio_bytes == 0) {
556 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
561 * If we are doing more ordered than delalloc we need to just wait on
562 * ordered extents, otherwise we'll waste time trying to flush delalloc
563 * that likely won't give us the space back we need.
565 if (dio_bytes > delalloc_bytes)
569 while ((delalloc_bytes || dio_bytes) && loops < 3) {
570 nr_pages = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
573 * Triggers inode writeback for up to nr_pages. This will invoke
574 * ->writepages callback and trigger delalloc filling
575 * (btrfs_run_delalloc_range()).
577 btrfs_writeback_inodes_sb_nr(fs_info, nr_pages, items);
580 * We need to wait for the compressed pages to start before
583 async_pages = atomic_read(&fs_info->async_delalloc_pages);
588 * Calculate how many compressed pages we want to be written
589 * before we continue. I.e if there are more async pages than we
590 * require wait_event will wait until nr_pages are written.
592 if (async_pages <= nr_pages)
595 async_pages -= nr_pages;
597 wait_event(fs_info->async_submit_wait,
598 atomic_read(&fs_info->async_delalloc_pages) <=
601 spin_lock(&space_info->lock);
602 if (list_empty(&space_info->tickets) &&
603 list_empty(&space_info->priority_tickets)) {
604 spin_unlock(&space_info->lock);
607 spin_unlock(&space_info->lock);
610 if (wait_ordered && !trans) {
611 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
613 time_left = schedule_timeout_killable(1);
617 delalloc_bytes = percpu_counter_sum_positive(
618 &fs_info->delalloc_bytes);
619 dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes);
624 * maybe_commit_transaction - possibly commit the transaction if its ok to
625 * @root - the root we're allocating for
626 * @bytes - the number of bytes we want to reserve
627 * @force - force the commit
629 * This will check to make sure that committing the transaction will actually
630 * get us somewhere and then commit the transaction if it does. Otherwise it
631 * will return -ENOSPC.
633 static int may_commit_transaction(struct btrfs_fs_info *fs_info,
634 struct btrfs_space_info *space_info)
636 struct reserve_ticket *ticket = NULL;
637 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
638 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
639 struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv;
640 struct btrfs_trans_handle *trans;
642 u64 reclaim_bytes = 0;
643 u64 cur_free_bytes = 0;
645 trans = (struct btrfs_trans_handle *)current->journal_info;
649 spin_lock(&space_info->lock);
650 cur_free_bytes = btrfs_space_info_used(space_info, true);
651 if (cur_free_bytes < space_info->total_bytes)
652 cur_free_bytes = space_info->total_bytes - cur_free_bytes;
656 if (!list_empty(&space_info->priority_tickets))
657 ticket = list_first_entry(&space_info->priority_tickets,
658 struct reserve_ticket, list);
659 else if (!list_empty(&space_info->tickets))
660 ticket = list_first_entry(&space_info->tickets,
661 struct reserve_ticket, list);
662 bytes_needed = (ticket) ? ticket->bytes : 0;
664 if (bytes_needed > cur_free_bytes)
665 bytes_needed -= cur_free_bytes;
668 spin_unlock(&space_info->lock);
673 trans = btrfs_join_transaction(fs_info->extent_root);
675 return PTR_ERR(trans);
678 * See if there is enough pinned space to make this reservation, or if
679 * we have block groups that are going to be freed, allowing us to
680 * possibly do a chunk allocation the next loop through.
682 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags) ||
683 __percpu_counter_compare(&space_info->total_bytes_pinned,
685 BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0)
689 * See if there is some space in the delayed insertion reservation for
692 if (space_info != delayed_rsv->space_info)
695 spin_lock(&delayed_rsv->lock);
696 reclaim_bytes += delayed_rsv->reserved;
697 spin_unlock(&delayed_rsv->lock);
699 spin_lock(&delayed_refs_rsv->lock);
700 reclaim_bytes += delayed_refs_rsv->reserved;
701 spin_unlock(&delayed_refs_rsv->lock);
703 spin_lock(&trans_rsv->lock);
704 reclaim_bytes += trans_rsv->reserved;
705 spin_unlock(&trans_rsv->lock);
707 if (reclaim_bytes >= bytes_needed)
709 bytes_needed -= reclaim_bytes;
711 if (__percpu_counter_compare(&space_info->total_bytes_pinned,
713 BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0)
717 return btrfs_commit_transaction(trans);
719 btrfs_end_transaction(trans);
724 * Try to flush some data based on policy set by @state. This is only advisory
725 * and may fail for various reasons. The caller is supposed to examine the
726 * state of @space_info to detect the outcome.
728 static void flush_space(struct btrfs_fs_info *fs_info,
729 struct btrfs_space_info *space_info, u64 num_bytes,
732 struct btrfs_root *root = fs_info->extent_root;
733 struct btrfs_trans_handle *trans;
738 case FLUSH_DELAYED_ITEMS_NR:
739 case FLUSH_DELAYED_ITEMS:
740 if (state == FLUSH_DELAYED_ITEMS_NR)
741 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
745 trans = btrfs_join_transaction(root);
747 ret = PTR_ERR(trans);
750 ret = btrfs_run_delayed_items_nr(trans, nr);
751 btrfs_end_transaction(trans);
754 case FLUSH_DELALLOC_WAIT:
755 shrink_delalloc(fs_info, space_info, num_bytes,
756 state == FLUSH_DELALLOC_WAIT);
758 case FLUSH_DELAYED_REFS_NR:
759 case FLUSH_DELAYED_REFS:
760 trans = btrfs_join_transaction(root);
762 ret = PTR_ERR(trans);
765 if (state == FLUSH_DELAYED_REFS_NR)
766 nr = calc_reclaim_items_nr(fs_info, num_bytes);
769 btrfs_run_delayed_refs(trans, nr);
770 btrfs_end_transaction(trans);
773 case ALLOC_CHUNK_FORCE:
774 trans = btrfs_join_transaction(root);
776 ret = PTR_ERR(trans);
779 ret = btrfs_chunk_alloc(trans,
780 btrfs_metadata_alloc_profile(fs_info),
781 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
783 btrfs_end_transaction(trans);
784 if (ret > 0 || ret == -ENOSPC)
787 case RUN_DELAYED_IPUTS:
789 * If we have pending delayed iputs then we could free up a
790 * bunch of pinned space, so make sure we run the iputs before
791 * we do our pinned bytes check below.
793 btrfs_run_delayed_iputs(fs_info);
794 btrfs_wait_on_delayed_iputs(fs_info);
797 ret = may_commit_transaction(fs_info, space_info);
804 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
810 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
811 struct btrfs_space_info *space_info)
816 u64 to_reclaim = space_info->reclaim_size;
818 lockdep_assert_held(&space_info->lock);
820 avail = calc_available_free_space(fs_info, space_info,
821 BTRFS_RESERVE_FLUSH_ALL);
822 used = btrfs_space_info_used(space_info, true);
825 * We may be flushing because suddenly we have less space than we had
826 * before, and now we're well over-committed based on our current free
827 * space. If that's the case add in our overage so we make sure to put
828 * appropriate pressure on the flushing state machine.
830 if (space_info->total_bytes + avail < used)
831 to_reclaim += used - (space_info->total_bytes + avail);
836 to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
837 if (btrfs_can_overcommit(fs_info, space_info, to_reclaim,
838 BTRFS_RESERVE_FLUSH_ALL))
841 used = btrfs_space_info_used(space_info, true);
843 if (btrfs_can_overcommit(fs_info, space_info, SZ_1M,
844 BTRFS_RESERVE_FLUSH_ALL))
845 expected = div_factor_fine(space_info->total_bytes, 95);
847 expected = div_factor_fine(space_info->total_bytes, 90);
850 to_reclaim = used - expected;
853 to_reclaim = min(to_reclaim, space_info->bytes_may_use +
854 space_info->bytes_reserved);
858 static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
859 struct btrfs_space_info *space_info,
862 u64 thresh = div_factor_fine(space_info->total_bytes, 98);
864 /* If we're just plain full then async reclaim just slows us down. */
865 if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
868 if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info))
871 return (used >= thresh && !btrfs_fs_closing(fs_info) &&
872 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
875 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
876 struct btrfs_space_info *space_info,
877 struct reserve_ticket *ticket)
879 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
882 if (global_rsv->space_info != space_info)
885 spin_lock(&global_rsv->lock);
886 min_bytes = div_factor(global_rsv->size, 1);
887 if (global_rsv->reserved < min_bytes + ticket->bytes) {
888 spin_unlock(&global_rsv->lock);
891 global_rsv->reserved -= ticket->bytes;
892 remove_ticket(space_info, ticket);
894 wake_up(&ticket->wait);
895 space_info->tickets_id++;
896 if (global_rsv->reserved < global_rsv->size)
897 global_rsv->full = 0;
898 spin_unlock(&global_rsv->lock);
904 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
905 * @fs_info - fs_info for this fs
906 * @space_info - the space info we were flushing
908 * We call this when we've exhausted our flushing ability and haven't made
909 * progress in satisfying tickets. The reservation code handles tickets in
910 * order, so if there is a large ticket first and then smaller ones we could
911 * very well satisfy the smaller tickets. This will attempt to wake up any
912 * tickets in the list to catch this case.
914 * This function returns true if it was able to make progress by clearing out
915 * other tickets, or if it stumbles across a ticket that was smaller than the
918 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
919 struct btrfs_space_info *space_info)
921 struct reserve_ticket *ticket;
922 u64 tickets_id = space_info->tickets_id;
923 u64 first_ticket_bytes = 0;
925 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
926 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
927 __btrfs_dump_space_info(fs_info, space_info);
930 while (!list_empty(&space_info->tickets) &&
931 tickets_id == space_info->tickets_id) {
932 ticket = list_first_entry(&space_info->tickets,
933 struct reserve_ticket, list);
936 steal_from_global_rsv(fs_info, space_info, ticket))
940 * may_commit_transaction will avoid committing the transaction
941 * if it doesn't feel like the space reclaimed by the commit
942 * would result in the ticket succeeding. However if we have a
943 * smaller ticket in the queue it may be small enough to be
944 * satisified by committing the transaction, so if any
945 * subsequent ticket is smaller than the first ticket go ahead
946 * and send us back for another loop through the enospc flushing
949 if (first_ticket_bytes == 0)
950 first_ticket_bytes = ticket->bytes;
951 else if (first_ticket_bytes > ticket->bytes)
954 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
955 btrfs_info(fs_info, "failing ticket with %llu bytes",
958 remove_ticket(space_info, ticket);
959 ticket->error = -ENOSPC;
960 wake_up(&ticket->wait);
963 * We're just throwing tickets away, so more flushing may not
964 * trip over btrfs_try_granting_tickets, so we need to call it
965 * here to see if we can make progress with the next ticket in
968 btrfs_try_granting_tickets(fs_info, space_info);
970 return (tickets_id != space_info->tickets_id);
974 * This is for normal flushers, we can wait all goddamned day if we want to. We
975 * will loop and continuously try to flush as long as we are making progress.
976 * We count progress as clearing off tickets each time we have to loop.
978 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
980 struct btrfs_fs_info *fs_info;
981 struct btrfs_space_info *space_info;
984 int commit_cycles = 0;
987 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
988 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
990 spin_lock(&space_info->lock);
991 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
993 space_info->flush = 0;
994 spin_unlock(&space_info->lock);
997 last_tickets_id = space_info->tickets_id;
998 spin_unlock(&space_info->lock);
1000 flush_state = FLUSH_DELAYED_ITEMS_NR;
1002 flush_space(fs_info, space_info, to_reclaim, flush_state);
1003 spin_lock(&space_info->lock);
1004 if (list_empty(&space_info->tickets)) {
1005 space_info->flush = 0;
1006 spin_unlock(&space_info->lock);
1009 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
1011 if (last_tickets_id == space_info->tickets_id) {
1014 last_tickets_id = space_info->tickets_id;
1015 flush_state = FLUSH_DELAYED_ITEMS_NR;
1021 * We don't want to force a chunk allocation until we've tried
1022 * pretty hard to reclaim space. Think of the case where we
1023 * freed up a bunch of space and so have a lot of pinned space
1024 * to reclaim. We would rather use that than possibly create a
1025 * underutilized metadata chunk. So if this is our first run
1026 * through the flushing state machine skip ALLOC_CHUNK_FORCE and
1027 * commit the transaction. If nothing has changed the next go
1028 * around then we can force a chunk allocation.
1030 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
1033 if (flush_state > COMMIT_TRANS) {
1035 if (commit_cycles > 2) {
1036 if (maybe_fail_all_tickets(fs_info, space_info)) {
1037 flush_state = FLUSH_DELAYED_ITEMS_NR;
1040 space_info->flush = 0;
1043 flush_state = FLUSH_DELAYED_ITEMS_NR;
1046 spin_unlock(&space_info->lock);
1047 } while (flush_state <= COMMIT_TRANS);
1050 void btrfs_init_async_reclaim_work(struct work_struct *work)
1052 INIT_WORK(work, btrfs_async_reclaim_metadata_space);
1055 static const enum btrfs_flush_state priority_flush_states[] = {
1056 FLUSH_DELAYED_ITEMS_NR,
1057 FLUSH_DELAYED_ITEMS,
1061 static const enum btrfs_flush_state evict_flush_states[] = {
1062 FLUSH_DELAYED_ITEMS_NR,
1063 FLUSH_DELAYED_ITEMS,
1064 FLUSH_DELAYED_REFS_NR,
1067 FLUSH_DELALLOC_WAIT,
1072 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
1073 struct btrfs_space_info *space_info,
1074 struct reserve_ticket *ticket,
1075 const enum btrfs_flush_state *states,
1081 spin_lock(&space_info->lock);
1082 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1084 spin_unlock(&space_info->lock);
1087 spin_unlock(&space_info->lock);
1091 flush_space(fs_info, space_info, to_reclaim, states[flush_state]);
1093 spin_lock(&space_info->lock);
1094 if (ticket->bytes == 0) {
1095 spin_unlock(&space_info->lock);
1098 spin_unlock(&space_info->lock);
1099 } while (flush_state < states_nr);
1102 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
1103 struct btrfs_space_info *space_info,
1104 struct reserve_ticket *ticket)
1110 spin_lock(&space_info->lock);
1111 while (ticket->bytes > 0 && ticket->error == 0) {
1112 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
1115 * Delete us from the list. After we unlock the space
1116 * info, we don't want the async reclaim job to reserve
1117 * space for this ticket. If that would happen, then the
1118 * ticket's task would not known that space was reserved
1119 * despite getting an error, resulting in a space leak
1120 * (bytes_may_use counter of our space_info).
1122 remove_ticket(space_info, ticket);
1123 ticket->error = -EINTR;
1126 spin_unlock(&space_info->lock);
1130 finish_wait(&ticket->wait, &wait);
1131 spin_lock(&space_info->lock);
1133 spin_unlock(&space_info->lock);
1137 * handle_reserve_ticket - do the appropriate flushing and waiting for a ticket
1139 * @space_info - the space_info for the reservation
1140 * @ticket - the ticket for the reservation
1141 * @flush - how much we can flush
1143 * This does the work of figuring out how to flush for the ticket, waiting for
1144 * the reservation, and returning the appropriate error if there is one.
1146 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
1147 struct btrfs_space_info *space_info,
1148 struct reserve_ticket *ticket,
1149 enum btrfs_reserve_flush_enum flush)
1154 case BTRFS_RESERVE_FLUSH_ALL:
1155 case BTRFS_RESERVE_FLUSH_ALL_STEAL:
1156 wait_reserve_ticket(fs_info, space_info, ticket);
1158 case BTRFS_RESERVE_FLUSH_LIMIT:
1159 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1160 priority_flush_states,
1161 ARRAY_SIZE(priority_flush_states));
1163 case BTRFS_RESERVE_FLUSH_EVICT:
1164 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1166 ARRAY_SIZE(evict_flush_states));
1173 spin_lock(&space_info->lock);
1174 ret = ticket->error;
1175 if (ticket->bytes || ticket->error) {
1177 * We were a priority ticket, so we need to delete ourselves
1178 * from the list. Because we could have other priority tickets
1179 * behind us that require less space, run
1180 * btrfs_try_granting_tickets() to see if their reservations can
1183 if (!list_empty(&ticket->list)) {
1184 remove_ticket(space_info, ticket);
1185 btrfs_try_granting_tickets(fs_info, space_info);
1191 spin_unlock(&space_info->lock);
1192 ASSERT(list_empty(&ticket->list));
1194 * Check that we can't have an error set if the reservation succeeded,
1195 * as that would confuse tasks and lead them to error out without
1196 * releasing reserved space (if an error happens the expectation is that
1197 * space wasn't reserved at all).
1199 ASSERT(!(ticket->bytes == 0 && ticket->error));
1204 * This returns true if this flush state will go through the ordinary flushing
1207 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
1209 return (flush == BTRFS_RESERVE_FLUSH_ALL) ||
1210 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1214 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
1215 * @root - the root we're allocating for
1216 * @space_info - the space info we want to allocate from
1217 * @orig_bytes - the number of bytes we want
1218 * @flush - whether or not we can flush to make our reservation
1220 * This will reserve orig_bytes number of bytes from the space info associated
1221 * with the block_rsv. If there is not enough space it will make an attempt to
1222 * flush out space to make room. It will do this by flushing delalloc if
1223 * possible or committing the transaction. If flush is 0 then no attempts to
1224 * regain reservations will be made and this will fail if there is not enough
1227 static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
1228 struct btrfs_space_info *space_info,
1230 enum btrfs_reserve_flush_enum flush)
1232 struct reserve_ticket ticket;
1235 bool pending_tickets;
1238 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
1240 spin_lock(&space_info->lock);
1242 used = btrfs_space_info_used(space_info, true);
1245 * We don't want NO_FLUSH allocations to jump everybody, they can
1246 * generally handle ENOSPC in a different way, so treat them the same as
1247 * normal flushers when it comes to skipping pending tickets.
1249 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
1250 pending_tickets = !list_empty(&space_info->tickets) ||
1251 !list_empty(&space_info->priority_tickets);
1253 pending_tickets = !list_empty(&space_info->priority_tickets);
1256 * Carry on if we have enough space (short-circuit) OR call
1257 * can_overcommit() to ensure we can overcommit to continue.
1259 if (!pending_tickets &&
1260 ((used + orig_bytes <= space_info->total_bytes) ||
1261 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
1262 btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1268 * If we couldn't make a reservation then setup our reservation ticket
1269 * and kick the async worker if it's not already running.
1271 * If we are a priority flusher then we just need to add our ticket to
1272 * the list and we will do our own flushing further down.
1274 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
1275 ticket.bytes = orig_bytes;
1277 space_info->reclaim_size += ticket.bytes;
1278 init_waitqueue_head(&ticket.wait);
1279 ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1280 if (flush == BTRFS_RESERVE_FLUSH_ALL ||
1281 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) {
1282 list_add_tail(&ticket.list, &space_info->tickets);
1283 if (!space_info->flush) {
1284 space_info->flush = 1;
1285 trace_btrfs_trigger_flush(fs_info,
1289 queue_work(system_unbound_wq,
1290 &fs_info->async_reclaim_work);
1293 list_add_tail(&ticket.list,
1294 &space_info->priority_tickets);
1296 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1299 * We will do the space reservation dance during log replay,
1300 * which means we won't have fs_info->fs_root set, so don't do
1301 * the async reclaim as we will panic.
1303 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1304 need_do_async_reclaim(fs_info, space_info, used) &&
1305 !work_busy(&fs_info->async_reclaim_work)) {
1306 trace_btrfs_trigger_flush(fs_info, space_info->flags,
1307 orig_bytes, flush, "preempt");
1308 queue_work(system_unbound_wq,
1309 &fs_info->async_reclaim_work);
1312 spin_unlock(&space_info->lock);
1313 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
1316 return handle_reserve_ticket(fs_info, space_info, &ticket, flush);
1320 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
1321 * @root - the root we're allocating for
1322 * @block_rsv - the block_rsv we're allocating for
1323 * @orig_bytes - the number of bytes we want
1324 * @flush - whether or not we can flush to make our reservation
1326 * This will reserve orig_bytes number of bytes from the space info associated
1327 * with the block_rsv. If there is not enough space it will make an attempt to
1328 * flush out space to make room. It will do this by flushing delalloc if
1329 * possible or committing the transaction. If flush is 0 then no attempts to
1330 * regain reservations will be made and this will fail if there is not enough
1333 int btrfs_reserve_metadata_bytes(struct btrfs_root *root,
1334 struct btrfs_block_rsv *block_rsv,
1336 enum btrfs_reserve_flush_enum flush)
1338 struct btrfs_fs_info *fs_info = root->fs_info;
1339 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
1342 ret = __reserve_metadata_bytes(fs_info, block_rsv->space_info,
1344 if (ret == -ENOSPC &&
1345 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
1346 if (block_rsv != global_rsv &&
1347 !btrfs_block_rsv_use_bytes(global_rsv, orig_bytes))
1350 if (ret == -ENOSPC) {
1351 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1352 block_rsv->space_info->flags,
1355 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1356 btrfs_dump_space_info(fs_info, block_rsv->space_info,