Merge tag 'rpmsg-v6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/remoteproc...
[linux-block.git] / fs / btrfs / space-info.c
CommitLineData
280c2908
JB
1// SPDX-License-Identifier: GPL-2.0
2
784352fe 3#include "misc.h"
280c2908
JB
4#include "ctree.h"
5#include "space-info.h"
6#include "sysfs.h"
7#include "volumes.h"
5da6afeb 8#include "free-space-cache.h"
0d9764f6
JB
9#include "ordered-data.h"
10#include "transaction.h"
aac0023c 11#include "block-group.h"
b0931513 12#include "zoned.h"
c7f13d42 13#include "fs.h"
07e81dc9 14#include "accessors.h"
a0231804 15#include "extent-tree.h"
280c2908 16
4b8b0528
JB
17/*
18 * HOW DOES SPACE RESERVATION WORK
19 *
20 * If you want to know about delalloc specifically, there is a separate comment
21 * for that with the delalloc code. This comment is about how the whole system
22 * works generally.
23 *
24 * BASIC CONCEPTS
25 *
26 * 1) space_info. This is the ultimate arbiter of how much space we can use.
27 * There's a description of the bytes_ fields with the struct declaration,
28 * refer to that for specifics on each field. Suffice it to say that for
29 * reservations we care about total_bytes - SUM(space_info->bytes_) when
30 * determining if there is space to make an allocation. There is a space_info
31 * for METADATA, SYSTEM, and DATA areas.
32 *
33 * 2) block_rsv's. These are basically buckets for every different type of
34 * metadata reservation we have. You can see the comment in the block_rsv
35 * code on the rules for each type, but generally block_rsv->reserved is how
36 * much space is accounted for in space_info->bytes_may_use.
37 *
38 * 3) btrfs_calc*_size. These are the worst case calculations we used based
39 * on the number of items we will want to modify. We have one for changing
40 * items, and one for inserting new items. Generally we use these helpers to
41 * determine the size of the block reserves, and then use the actual bytes
42 * values to adjust the space_info counters.
43 *
44 * MAKING RESERVATIONS, THE NORMAL CASE
45 *
46 * We call into either btrfs_reserve_data_bytes() or
47 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
48 * num_bytes we want to reserve.
49 *
50 * ->reserve
51 * space_info->bytes_may_reserve += num_bytes
52 *
53 * ->extent allocation
54 * Call btrfs_add_reserved_bytes() which does
55 * space_info->bytes_may_reserve -= num_bytes
56 * space_info->bytes_reserved += extent_bytes
57 *
58 * ->insert reference
59 * Call btrfs_update_block_group() which does
60 * space_info->bytes_reserved -= extent_bytes
61 * space_info->bytes_used += extent_bytes
62 *
63 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
64 *
65 * Assume we are unable to simply make the reservation because we do not have
66 * enough space
67 *
68 * -> __reserve_bytes
69 * create a reserve_ticket with ->bytes set to our reservation, add it to
70 * the tail of space_info->tickets, kick async flush thread
71 *
72 * ->handle_reserve_ticket
73 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
74 * on the ticket.
75 *
76 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
77 * Flushes various things attempting to free up space.
78 *
79 * -> btrfs_try_granting_tickets()
80 * This is called by anything that either subtracts space from
81 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
82 * space_info->total_bytes. This loops through the ->priority_tickets and
83 * then the ->tickets list checking to see if the reservation can be
84 * completed. If it can the space is added to space_info->bytes_may_use and
85 * the ticket is woken up.
86 *
87 * -> ticket wakeup
88 * Check if ->bytes == 0, if it does we got our reservation and we can carry
89 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we
90 * were interrupted.)
91 *
92 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
93 *
94 * Same as the above, except we add ourselves to the
95 * space_info->priority_tickets, and we do not use ticket->wait, we simply
96 * call flush_space() ourselves for the states that are safe for us to call
97 * without deadlocking and hope for the best.
98 *
99 * THE FLUSHING STATES
100 *
101 * Generally speaking we will have two cases for each state, a "nice" state
102 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to
103 * reduce the locking over head on the various trees, and even to keep from
104 * doing any work at all in the case of delayed refs. Each of these delayed
105 * things however hold reservations, and so letting them run allows us to
106 * reclaim space so we can make new reservations.
107 *
108 * FLUSH_DELAYED_ITEMS
109 * Every inode has a delayed item to update the inode. Take a simple write
110 * for example, we would update the inode item at write time to update the
111 * mtime, and then again at finish_ordered_io() time in order to update the
112 * isize or bytes. We keep these delayed items to coalesce these operations
113 * into a single operation done on demand. These are an easy way to reclaim
114 * metadata space.
115 *
116 * FLUSH_DELALLOC
117 * Look at the delalloc comment to get an idea of how much space is reserved
118 * for delayed allocation. We can reclaim some of this space simply by
119 * running delalloc, but usually we need to wait for ordered extents to
120 * reclaim the bulk of this space.
121 *
122 * FLUSH_DELAYED_REFS
123 * We have a block reserve for the outstanding delayed refs space, and every
124 * delayed ref operation holds a reservation. Running these is a quick way
125 * to reclaim space, but we want to hold this until the end because COW can
126 * churn a lot and we can avoid making some extent tree modifications if we
127 * are able to delay for as long as possible.
128 *
129 * ALLOC_CHUNK
130 * We will skip this the first time through space reservation, because of
131 * overcommit and we don't want to have a lot of useless metadata space when
132 * our worst case reservations will likely never come true.
133 *
134 * RUN_DELAYED_IPUTS
135 * If we're freeing inodes we're likely freeing checksums, file extent
136 * items, and extent tree items. Loads of space could be freed up by these
137 * operations, however they won't be usable until the transaction commits.
138 *
139 * COMMIT_TRANS
c416a30c
JB
140 * This will commit the transaction. Historically we had a lot of logic
141 * surrounding whether or not we'd commit the transaction, but this waits born
142 * out of a pre-tickets era where we could end up committing the transaction
143 * thousands of times in a row without making progress. Now thanks to our
144 * ticketing system we know if we're not making progress and can error
145 * everybody out after a few commits rather than burning the disk hoping for
146 * a different answer.
f00c42dd 147 *
4b8b0528
JB
148 * OVERCOMMIT
149 *
150 * Because we hold so many reservations for metadata we will allow you to
151 * reserve more space than is currently free in the currently allocate
152 * metadata space. This only happens with metadata, data does not allow
153 * overcommitting.
154 *
155 * You can see the current logic for when we allow overcommit in
156 * btrfs_can_overcommit(), but it only applies to unallocated space. If there
157 * is no unallocated space to be had, all reservations are kept within the
158 * free space in the allocated metadata chunks.
159 *
160 * Because of overcommitting, you generally want to use the
161 * btrfs_can_overcommit() logic for metadata allocations, as it does the right
162 * thing with or without extra unallocated space.
163 */
164
e1f60a65 165u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
280c2908
JB
166 bool may_use_included)
167{
168 ASSERT(s_info);
169 return s_info->bytes_used + s_info->bytes_reserved +
170 s_info->bytes_pinned + s_info->bytes_readonly +
169e0da9 171 s_info->bytes_zone_unusable +
280c2908
JB
172 (may_use_included ? s_info->bytes_may_use : 0);
173}
174
175/*
176 * after adding space to the filesystem, we need to clear the full flags
177 * on all the space infos.
178 */
179void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
180{
181 struct list_head *head = &info->space_info;
182 struct btrfs_space_info *found;
183
72804905 184 list_for_each_entry(found, head, list)
280c2908 185 found->full = 0;
280c2908
JB
186}
187
bb5a098d
JB
188/*
189 * Block groups with more than this value (percents) of unusable space will be
190 * scheduled for background reclaim.
191 */
192#define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH (75)
193
f6fca391
SR
194/*
195 * Calculate chunk size depending on volume type (regular or zoned).
196 */
197static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
198{
199 if (btrfs_is_zoned(fs_info))
200 return fs_info->zone_size;
201
202 ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
203
204 if (flags & BTRFS_BLOCK_GROUP_DATA)
5da431b7 205 return BTRFS_MAX_DATA_CHUNK_SIZE;
f6fca391
SR
206 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
207 return SZ_32M;
208
209 /* Handle BTRFS_BLOCK_GROUP_METADATA */
210 if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G)
211 return SZ_1G;
212
213 return SZ_256M;
214}
215
216/*
217 * Update default chunk size.
218 */
219void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
220 u64 chunk_size)
221{
222 WRITE_ONCE(space_info->chunk_size, chunk_size);
223}
224
280c2908
JB
225static int create_space_info(struct btrfs_fs_info *info, u64 flags)
226{
227
228 struct btrfs_space_info *space_info;
229 int i;
230 int ret;
231
232 space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
233 if (!space_info)
234 return -ENOMEM;
235
280c2908
JB
236 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
237 INIT_LIST_HEAD(&space_info->block_groups[i]);
238 init_rwsem(&space_info->groups_sem);
239 spin_lock_init(&space_info->lock);
240 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
241 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
280c2908
JB
242 INIT_LIST_HEAD(&space_info->ro_bgs);
243 INIT_LIST_HEAD(&space_info->tickets);
244 INIT_LIST_HEAD(&space_info->priority_tickets);
88a777a6 245 space_info->clamp = 1;
f6fca391 246 btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags));
280c2908 247
bb5a098d
JB
248 if (btrfs_is_zoned(info))
249 space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;
250
b882327a
DS
251 ret = btrfs_sysfs_add_space_info_type(info, space_info);
252 if (ret)
280c2908 253 return ret;
280c2908 254
72804905 255 list_add(&space_info->list, &info->space_info);
280c2908
JB
256 if (flags & BTRFS_BLOCK_GROUP_DATA)
257 info->data_sinfo = space_info;
258
259 return ret;
260}
261
262int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
263{
264 struct btrfs_super_block *disk_super;
265 u64 features;
266 u64 flags;
267 int mixed = 0;
268 int ret;
269
270 disk_super = fs_info->super_copy;
271 if (!btrfs_super_root(disk_super))
272 return -EINVAL;
273
274 features = btrfs_super_incompat_flags(disk_super);
275 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
276 mixed = 1;
277
278 flags = BTRFS_BLOCK_GROUP_SYSTEM;
279 ret = create_space_info(fs_info, flags);
280 if (ret)
281 goto out;
282
283 if (mixed) {
284 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
285 ret = create_space_info(fs_info, flags);
286 } else {
287 flags = BTRFS_BLOCK_GROUP_METADATA;
288 ret = create_space_info(fs_info, flags);
289 if (ret)
290 goto out;
291
292 flags = BTRFS_BLOCK_GROUP_DATA;
293 ret = create_space_info(fs_info, flags);
294 }
295out:
296 return ret;
297}
298
9d4b0a12 299void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
723de71d 300 struct btrfs_block_group *block_group)
280c2908
JB
301{
302 struct btrfs_space_info *found;
723de71d 303 int factor, index;
280c2908 304
9d4b0a12 305 factor = btrfs_bg_type_to_factor(block_group->flags);
280c2908 306
9d4b0a12 307 found = btrfs_find_space_info(info, block_group->flags);
280c2908
JB
308 ASSERT(found);
309 spin_lock(&found->lock);
9d4b0a12 310 found->total_bytes += block_group->length;
9d4b0a12
JB
311 found->disk_total += block_group->length * factor;
312 found->bytes_used += block_group->used;
313 found->disk_used += block_group->used * factor;
314 found->bytes_readonly += block_group->bytes_super;
315 found->bytes_zone_unusable += block_group->zone_unusable;
316 if (block_group->length > 0)
280c2908 317 found->full = 0;
18fa2284 318 btrfs_try_granting_tickets(info, found);
280c2908 319 spin_unlock(&found->lock);
723de71d
JB
320
321 block_group->space_info = found;
322
323 index = btrfs_bg_flags_to_raid_index(block_group->flags);
324 down_write(&found->groups_sem);
325 list_add_tail(&block_group->list, &found->block_groups[index]);
326 up_write(&found->groups_sem);
280c2908
JB
327}
328
329struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
330 u64 flags)
331{
332 struct list_head *head = &info->space_info;
333 struct btrfs_space_info *found;
334
335 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
336
72804905
JB
337 list_for_each_entry(found, head, list) {
338 if (found->flags & flags)
280c2908 339 return found;
280c2908 340 }
280c2908
JB
341 return NULL;
342}
41783ef2 343
fa121a26
JB
344static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
345 struct btrfs_space_info *space_info,
346 enum btrfs_reserve_flush_enum flush)
41783ef2 347{
41783ef2 348 u64 profile;
41783ef2 349 u64 avail;
41783ef2
JB
350 int factor;
351
9f246926 352 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
41783ef2
JB
353 profile = btrfs_system_alloc_profile(fs_info);
354 else
355 profile = btrfs_metadata_alloc_profile(fs_info);
356
41783ef2
JB
357 avail = atomic64_read(&fs_info->free_chunk_space);
358
359 /*
360 * If we have dup, raid1 or raid10 then only half of the free
361 * space is actually usable. For raid56, the space info used
362 * doesn't include the parity drive, so we don't have to
363 * change the math
364 */
365 factor = btrfs_bg_type_to_factor(profile);
366 avail = div_u64(avail, factor);
367
368 /*
369 * If we aren't flushing all things, let us overcommit up to
370 * 1/2th of the space. If we can flush, don't let us overcommit
371 * too much, let it overcommit up to 1/8 of the space.
372 */
373 if (flush == BTRFS_RESERVE_FLUSH_ALL)
374 avail >>= 3;
375 else
376 avail >>= 1;
fa121a26
JB
377 return avail;
378}
379
380int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
381 struct btrfs_space_info *space_info, u64 bytes,
382 enum btrfs_reserve_flush_enum flush)
383{
384 u64 avail;
385 u64 used;
386
387 /* Don't overcommit when in mixed mode */
388 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
389 return 0;
390
391 used = btrfs_space_info_used(space_info, true);
bf1f1fec 392 if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags) &&
85e79ec7 393 (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
79417d04
NA
394 avail = 0;
395 else
396 avail = calc_available_free_space(fs_info, space_info, flush);
41783ef2 397
e15acc25 398 if (used + bytes < space_info->total_bytes + avail)
41783ef2
JB
399 return 1;
400 return 0;
401}
b338b013 402
d611add4
FM
403static void remove_ticket(struct btrfs_space_info *space_info,
404 struct reserve_ticket *ticket)
405{
406 if (!list_empty(&ticket->list)) {
407 list_del_init(&ticket->list);
408 ASSERT(space_info->reclaim_size >= ticket->bytes);
409 space_info->reclaim_size -= ticket->bytes;
410 }
411}
412
b338b013
JB
413/*
414 * This is for space we already have accounted in space_info->bytes_may_use, so
415 * basically when we're returning space from block_rsv's.
416 */
18fa2284
JB
417void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
418 struct btrfs_space_info *space_info)
b338b013 419{
b338b013 420 struct list_head *head;
b338b013 421 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
b338b013 422
18fa2284 423 lockdep_assert_held(&space_info->lock);
b338b013 424
18fa2284 425 head = &space_info->priority_tickets;
b338b013 426again:
91182645
JB
427 while (!list_empty(head)) {
428 struct reserve_ticket *ticket;
429 u64 used = btrfs_space_info_used(space_info, true);
430
431 ticket = list_first_entry(head, struct reserve_ticket, list);
432
1a9fd417 433 /* Check and see if our ticket can be satisfied now. */
e15acc25 434 if ((used + ticket->bytes <= space_info->total_bytes) ||
a30a3d20
JB
435 btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
436 flush)) {
91182645
JB
437 btrfs_space_info_update_bytes_may_use(fs_info,
438 space_info,
439 ticket->bytes);
d611add4 440 remove_ticket(space_info, ticket);
b338b013
JB
441 ticket->bytes = 0;
442 space_info->tickets_id++;
443 wake_up(&ticket->wait);
444 } else {
91182645 445 break;
b338b013
JB
446 }
447 }
448
91182645 449 if (head == &space_info->priority_tickets) {
b338b013
JB
450 head = &space_info->tickets;
451 flush = BTRFS_RESERVE_FLUSH_ALL;
452 goto again;
453 }
b338b013 454}
5da6afeb
JB
455
456#define DUMP_BLOCK_RSV(fs_info, rsv_name) \
457do { \
458 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
459 spin_lock(&__rsv->lock); \
460 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \
461 __rsv->size, __rsv->reserved); \
462 spin_unlock(&__rsv->lock); \
463} while (0)
464
25a860c4
QW
465static const char *space_info_flag_to_str(const struct btrfs_space_info *space_info)
466{
467 switch (space_info->flags) {
468 case BTRFS_BLOCK_GROUP_SYSTEM:
469 return "SYSTEM";
470 case BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA:
471 return "DATA+METADATA";
472 case BTRFS_BLOCK_GROUP_DATA:
473 return "DATA";
474 case BTRFS_BLOCK_GROUP_METADATA:
475 return "METADATA";
476 default:
477 return "UNKNOWN";
478 }
479}
480
8e327b9c
QW
481static void dump_global_block_rsv(struct btrfs_fs_info *fs_info)
482{
483 DUMP_BLOCK_RSV(fs_info, global_block_rsv);
484 DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
485 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
486 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
487 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
488}
489
84fe47a4
JB
490static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
491 struct btrfs_space_info *info)
5da6afeb 492{
25a860c4 493 const char *flag_str = space_info_flag_to_str(info);
84fe47a4 494 lockdep_assert_held(&info->lock);
5da6afeb 495
0619b790 496 /* The free space could be negative in case of overcommit */
25a860c4
QW
497 btrfs_info(fs_info, "space_info %s has %lld free, is %sfull",
498 flag_str,
0619b790 499 (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
5da6afeb
JB
500 info->full ? "" : "not ");
501 btrfs_info(fs_info,
25a860c4 502"space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
5da6afeb
JB
503 info->total_bytes, info->bytes_used, info->bytes_pinned,
504 info->bytes_reserved, info->bytes_may_use,
169e0da9 505 info->bytes_readonly, info->bytes_zone_unusable);
84fe47a4
JB
506}
507
508void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
509 struct btrfs_space_info *info, u64 bytes,
510 int dump_block_groups)
511{
32da5386 512 struct btrfs_block_group *cache;
84fe47a4
JB
513 int index = 0;
514
515 spin_lock(&info->lock);
516 __btrfs_dump_space_info(fs_info, info);
8e327b9c 517 dump_global_block_rsv(fs_info);
84fe47a4
JB
518 spin_unlock(&info->lock);
519
5da6afeb
JB
520 if (!dump_block_groups)
521 return;
522
523 down_read(&info->groups_sem);
524again:
525 list_for_each_entry(cache, &info->block_groups[index], list) {
526 spin_lock(&cache->lock);
527 btrfs_info(fs_info,
169e0da9 528 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu zone_unusable %s",
b3470b5d 529 cache->start, cache->length, cache->used, cache->pinned,
169e0da9
NA
530 cache->reserved, cache->zone_unusable,
531 cache->ro ? "[readonly]" : "");
5da6afeb 532 spin_unlock(&cache->lock);
ab0db043 533 btrfs_dump_free_space(cache, bytes);
5da6afeb
JB
534 }
535 if (++index < BTRFS_NR_RAID_TYPES)
536 goto again;
537 up_read(&info->groups_sem);
538}
0d9764f6 539
0d9764f6
JB
540static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
541 u64 to_reclaim)
542{
543 u64 bytes;
544 u64 nr;
545
2bd36e7b 546 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
0d9764f6
JB
547 nr = div64_u64(to_reclaim, bytes);
548 if (!nr)
549 nr = 1;
550 return nr;
551}
552
553#define EXTENT_SIZE_PER_ITEM SZ_256K
554
555/*
556 * shrink metadata reservation for delalloc
557 */
920a9958
JB
558static void shrink_delalloc(struct btrfs_fs_info *fs_info,
559 struct btrfs_space_info *space_info,
385f421f
JB
560 u64 to_reclaim, bool wait_ordered,
561 bool for_preempt)
0d9764f6 562{
0d9764f6
JB
563 struct btrfs_trans_handle *trans;
564 u64 delalloc_bytes;
5deb17e1 565 u64 ordered_bytes;
0d9764f6
JB
566 u64 items;
567 long time_left;
0d9764f6
JB
568 int loops;
569
03fe78cc
JB
570 delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
571 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
572 if (delalloc_bytes == 0 && ordered_bytes == 0)
573 return;
574
0d9764f6 575 /* Calc the number of the pages we need flush for space reservation */
d7f81fac
JB
576 if (to_reclaim == U64_MAX) {
577 items = U64_MAX;
578 } else {
579 /*
580 * to_reclaim is set to however much metadata we need to
581 * reclaim, but reclaiming that much data doesn't really track
03fe78cc
JB
582 * exactly. What we really want to do is reclaim full inode's
583 * worth of reservations, however that's not available to us
584 * here. We will take a fraction of the delalloc bytes for our
585 * flushing loops and hope for the best. Delalloc will expand
586 * the amount we write to cover an entire dirty extent, which
587 * will reclaim the metadata reservation for that range. If
588 * it's not enough subsequent flush stages will be more
589 * aggressive.
d7f81fac 590 */
03fe78cc 591 to_reclaim = max(to_reclaim, delalloc_bytes >> 3);
d7f81fac 592 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
d7f81fac 593 }
0d9764f6 594
0d031dc4 595 trans = current->journal_info;
0d9764f6 596
0d9764f6
JB
597 /*
598 * If we are doing more ordered than delalloc we need to just wait on
599 * ordered extents, otherwise we'll waste time trying to flush delalloc
600 * that likely won't give us the space back we need.
601 */
385f421f 602 if (ordered_bytes > delalloc_bytes && !for_preempt)
0d9764f6
JB
603 wait_ordered = true;
604
605 loops = 0;
5deb17e1 606 while ((delalloc_bytes || ordered_bytes) && loops < 3) {
9db4dc24
NB
607 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
608 long nr_pages = min_t(u64, temp, LONG_MAX);
e1646070 609 int async_pages;
e076ab2a
JB
610
611 btrfs_start_delalloc_roots(fs_info, nr_pages, true);
0d9764f6 612
e1646070
JB
613 /*
614 * We need to make sure any outstanding async pages are now
615 * processed before we continue. This is because things like
616 * sync_inode() try to be smart and skip writing if the inode is
617 * marked clean. We don't use filemap_fwrite for flushing
618 * because we want to control how many pages we write out at a
619 * time, thus this is the only safe way to make sure we've
620 * waited for outstanding compressed workers to have started
621 * their jobs and thus have ordered extents set up properly.
622 *
623 * This exists because we do not want to wait for each
624 * individual inode to finish its async work, we simply want to
625 * start the IO on everybody, and then come back here and wait
626 * for all of the async work to catch up. Once we're done with
627 * that we know we'll have ordered extents for everything and we
628 * can decide if we wait for that or not.
629 *
630 * If we choose to replace this in the future, make absolutely
631 * sure that the proper waiting is being done in the async case,
632 * as there have been bugs in that area before.
633 */
634 async_pages = atomic_read(&fs_info->async_delalloc_pages);
635 if (!async_pages)
636 goto skip_async;
637
638 /*
639 * We don't want to wait forever, if we wrote less pages in this
640 * loop than we have outstanding, only wait for that number of
641 * pages, otherwise we can wait for all async pages to finish
642 * before continuing.
643 */
644 if (async_pages > nr_pages)
645 async_pages -= nr_pages;
646 else
647 async_pages = 0;
648 wait_event(fs_info->async_submit_wait,
649 atomic_read(&fs_info->async_delalloc_pages) <=
650 async_pages);
651skip_async:
0d9764f6
JB
652 loops++;
653 if (wait_ordered && !trans) {
654 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
655 } else {
656 time_left = schedule_timeout_killable(1);
657 if (time_left)
658 break;
659 }
448b966b 660
385f421f
JB
661 /*
662 * If we are for preemption we just want a one-shot of delalloc
663 * flushing so we can stop flushing if we decide we don't need
664 * to anymore.
665 */
666 if (for_preempt)
667 break;
668
448b966b
JB
669 spin_lock(&space_info->lock);
670 if (list_empty(&space_info->tickets) &&
671 list_empty(&space_info->priority_tickets)) {
672 spin_unlock(&space_info->lock);
673 break;
674 }
675 spin_unlock(&space_info->lock);
676
0d9764f6
JB
677 delalloc_bytes = percpu_counter_sum_positive(
678 &fs_info->delalloc_bytes);
5deb17e1
JB
679 ordered_bytes = percpu_counter_sum_positive(
680 &fs_info->ordered_bytes);
0d9764f6
JB
681 }
682}
683
0d9764f6
JB
684/*
685 * Try to flush some data based on policy set by @state. This is only advisory
686 * and may fail for various reasons. The caller is supposed to examine the
687 * state of @space_info to detect the outcome.
688 */
689static void flush_space(struct btrfs_fs_info *fs_info,
690 struct btrfs_space_info *space_info, u64 num_bytes,
4b02b00f 691 enum btrfs_flush_state state, bool for_preempt)
0d9764f6 692{
ce5603d0 693 struct btrfs_root *root = fs_info->tree_root;
0d9764f6
JB
694 struct btrfs_trans_handle *trans;
695 int nr;
696 int ret = 0;
697
698 switch (state) {
699 case FLUSH_DELAYED_ITEMS_NR:
700 case FLUSH_DELAYED_ITEMS:
701 if (state == FLUSH_DELAYED_ITEMS_NR)
702 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
703 else
704 nr = -1;
705
706 trans = btrfs_join_transaction(root);
707 if (IS_ERR(trans)) {
708 ret = PTR_ERR(trans);
709 break;
710 }
711 ret = btrfs_run_delayed_items_nr(trans, nr);
712 btrfs_end_transaction(trans);
713 break;
714 case FLUSH_DELALLOC:
715 case FLUSH_DELALLOC_WAIT:
03fe78cc
JB
716 case FLUSH_DELALLOC_FULL:
717 if (state == FLUSH_DELALLOC_FULL)
718 num_bytes = U64_MAX;
920a9958 719 shrink_delalloc(fs_info, space_info, num_bytes,
03fe78cc 720 state != FLUSH_DELALLOC, for_preempt);
0d9764f6
JB
721 break;
722 case FLUSH_DELAYED_REFS_NR:
723 case FLUSH_DELAYED_REFS:
724 trans = btrfs_join_transaction(root);
725 if (IS_ERR(trans)) {
726 ret = PTR_ERR(trans);
727 break;
728 }
729 if (state == FLUSH_DELAYED_REFS_NR)
730 nr = calc_reclaim_items_nr(fs_info, num_bytes);
731 else
732 nr = 0;
733 btrfs_run_delayed_refs(trans, nr);
734 btrfs_end_transaction(trans);
735 break;
736 case ALLOC_CHUNK:
737 case ALLOC_CHUNK_FORCE:
b0931513
NA
738 /*
739 * For metadata space on zoned filesystem, reaching here means we
740 * don't have enough space left in active_total_bytes. Try to
741 * activate a block group first, because we may have inactive
742 * block group already allocated.
743 */
744 ret = btrfs_zoned_activate_one_bg(fs_info, space_info, false);
745 if (ret < 0)
746 break;
747 else if (ret == 1)
748 break;
749
0d9764f6
JB
750 trans = btrfs_join_transaction(root);
751 if (IS_ERR(trans)) {
752 ret = PTR_ERR(trans);
753 break;
754 }
755 ret = btrfs_chunk_alloc(trans,
c6c45303 756 btrfs_get_alloc_profile(fs_info, space_info->flags),
0d9764f6
JB
757 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
758 CHUNK_ALLOC_FORCE);
759 btrfs_end_transaction(trans);
b0931513
NA
760
761 /*
762 * For metadata space on zoned filesystem, allocating a new chunk
763 * is not enough. We still need to activate the block * group.
764 * Active the newly allocated block group by (maybe) finishing
765 * a block group.
766 */
767 if (ret == 1) {
768 ret = btrfs_zoned_activate_one_bg(fs_info, space_info, true);
769 /*
770 * Revert to the original ret regardless we could finish
771 * one block group or not.
772 */
773 if (ret >= 0)
774 ret = 1;
775 }
776
0d9764f6
JB
777 if (ret > 0 || ret == -ENOSPC)
778 ret = 0;
779 break;
844245b4 780 case RUN_DELAYED_IPUTS:
0d9764f6
JB
781 /*
782 * If we have pending delayed iputs then we could free up a
783 * bunch of pinned space, so make sure we run the iputs before
784 * we do our pinned bytes check below.
785 */
786 btrfs_run_delayed_iputs(fs_info);
787 btrfs_wait_on_delayed_iputs(fs_info);
844245b4
JB
788 break;
789 case COMMIT_TRANS:
c416a30c 790 ASSERT(current->journal_info == NULL);
f00c42dd
JB
791 trans = btrfs_join_transaction(root);
792 if (IS_ERR(trans)) {
793 ret = PTR_ERR(trans);
794 break;
795 }
796 ret = btrfs_commit_transaction(trans);
797 break;
0d9764f6
JB
798 default:
799 ret = -ENOSPC;
800 break;
801 }
802
803 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
4b02b00f 804 ret, for_preempt);
0d9764f6
JB
805 return;
806}
807
808static inline u64
809btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
9f246926 810 struct btrfs_space_info *space_info)
0d9764f6 811{
0d9764f6 812 u64 used;
fa121a26 813 u64 avail;
db161806 814 u64 to_reclaim = space_info->reclaim_size;
0d9764f6 815
db161806 816 lockdep_assert_held(&space_info->lock);
fa121a26
JB
817
818 avail = calc_available_free_space(fs_info, space_info,
819 BTRFS_RESERVE_FLUSH_ALL);
820 used = btrfs_space_info_used(space_info, true);
821
822 /*
823 * We may be flushing because suddenly we have less space than we had
824 * before, and now we're well over-committed based on our current free
825 * space. If that's the case add in our overage so we make sure to put
826 * appropriate pressure on the flushing state machine.
827 */
e15acc25
NA
828 if (space_info->total_bytes + avail < used)
829 to_reclaim += used - (space_info->total_bytes + avail);
fa121a26 830
0d9764f6
JB
831 return to_reclaim;
832}
833
ae7913ba 834static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
2e294c60 835 struct btrfs_space_info *space_info)
0d9764f6 836{
610a6ef4 837 u64 global_rsv_size = fs_info->global_block_rsv.reserved;
2e294c60 838 u64 ordered, delalloc;
6a921de5 839 u64 thresh;
2e294c60 840 u64 used;
0d9764f6 841
e15acc25 842 thresh = mult_perc(space_info->total_bytes, 90);
6a921de5 843
bf7bd725
ND
844 lockdep_assert_held(&space_info->lock);
845
0d9764f6 846 /* If we're just plain full then async reclaim just slows us down. */
610a6ef4
JB
847 if ((space_info->bytes_used + space_info->bytes_reserved +
848 global_rsv_size) >= thresh)
ae7913ba 849 return false;
0d9764f6 850
11462397
JB
851 used = space_info->bytes_may_use + space_info->bytes_pinned;
852
853 /* The total flushable belongs to the global rsv, don't flush. */
854 if (global_rsv_size >= used)
855 return false;
856
857 /*
858 * 128MiB is 1/4 of the maximum global rsv size. If we have less than
859 * that devoted to other reservations then there's no sense in flushing,
860 * we don't have a lot of things that need flushing.
861 */
862 if (used - global_rsv_size <= SZ_128M)
863 return false;
864
f205edf7
JB
865 /*
866 * We have tickets queued, bail so we don't compete with the async
867 * flushers.
868 */
869 if (space_info->reclaim_size)
870 return false;
871
2e294c60
JB
872 /*
873 * If we have over half of the free space occupied by reservations or
874 * pinned then we want to start flushing.
875 *
876 * We do not do the traditional thing here, which is to say
877 *
878 * if (used >= ((total_bytes + avail) / 2))
879 * return 1;
880 *
881 * because this doesn't quite work how we want. If we had more than 50%
882 * of the space_info used by bytes_used and we had 0 available we'd just
883 * constantly run the background flusher. Instead we want it to kick in
88a777a6
JB
884 * if our reclaimable space exceeds our clamped free space.
885 *
886 * Our clamping range is 2^1 -> 2^8. Practically speaking that means
887 * the following:
888 *
889 * Amount of RAM Minimum threshold Maximum threshold
890 *
891 * 256GiB 1GiB 128GiB
892 * 128GiB 512MiB 64GiB
893 * 64GiB 256MiB 32GiB
894 * 32GiB 128MiB 16GiB
895 * 16GiB 64MiB 8GiB
896 *
897 * These are the range our thresholds will fall in, corresponding to how
898 * much delalloc we need for the background flusher to kick in.
2e294c60 899 */
88a777a6 900
2e294c60
JB
901 thresh = calc_available_free_space(fs_info, space_info,
902 BTRFS_RESERVE_FLUSH_ALL);
1239e2da
JB
903 used = space_info->bytes_used + space_info->bytes_reserved +
904 space_info->bytes_readonly + global_rsv_size;
e15acc25
NA
905 if (used < space_info->total_bytes)
906 thresh += space_info->total_bytes - used;
88a777a6 907 thresh >>= space_info->clamp;
9f42d377 908
2e294c60 909 used = space_info->bytes_pinned;
9f42d377 910
2e294c60
JB
911 /*
912 * If we have more ordered bytes than delalloc bytes then we're either
913 * doing a lot of DIO, or we simply don't have a lot of delalloc waiting
914 * around. Preemptive flushing is only useful in that it can free up
915 * space before tickets need to wait for things to finish. In the case
916 * of ordered extents, preemptively waiting on ordered extents gets us
917 * nothing, if our reservations are tied up in ordered extents we'll
918 * simply have to slow down writers by forcing them to wait on ordered
919 * extents.
920 *
921 * In the case that ordered is larger than delalloc, only include the
922 * block reserves that we would actually be able to directly reclaim
923 * from. In this case if we're heavy on metadata operations this will
924 * clearly be heavy enough to warrant preemptive flushing. In the case
925 * of heavy DIO or ordered reservations, preemptive flushing will just
926 * waste time and cause us to slow down.
3e101569
JB
927 *
928 * We want to make sure we truly are maxed out on ordered however, so
929 * cut ordered in half, and if it's still higher than delalloc then we
930 * can keep flushing. This is to avoid the case where we start
931 * flushing, and now delalloc == ordered and we stop preemptively
932 * flushing when we could still have several gigs of delalloc to flush.
2e294c60 933 */
3e101569 934 ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
2cdb3909 935 delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
2e294c60
JB
936 if (ordered >= delalloc)
937 used += fs_info->delayed_refs_rsv.reserved +
938 fs_info->delayed_block_rsv.reserved;
9f42d377 939 else
30acce4e 940 used += space_info->bytes_may_use - global_rsv_size;
0d9764f6
JB
941
942 return (used >= thresh && !btrfs_fs_closing(fs_info) &&
943 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
944}
945
7f9fe614
JB
946static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
947 struct btrfs_space_info *space_info,
948 struct reserve_ticket *ticket)
949{
950 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
951 u64 min_bytes;
952
1b0309ea
JB
953 if (!ticket->steal)
954 return false;
955
7f9fe614
JB
956 if (global_rsv->space_info != space_info)
957 return false;
958
959 spin_lock(&global_rsv->lock);
428c8e03 960 min_bytes = mult_perc(global_rsv->size, 10);
7f9fe614
JB
961 if (global_rsv->reserved < min_bytes + ticket->bytes) {
962 spin_unlock(&global_rsv->lock);
963 return false;
964 }
965 global_rsv->reserved -= ticket->bytes;
6d548b9e 966 remove_ticket(space_info, ticket);
7f9fe614 967 ticket->bytes = 0;
7f9fe614
JB
968 wake_up(&ticket->wait);
969 space_info->tickets_id++;
970 if (global_rsv->reserved < global_rsv->size)
971 global_rsv->full = 0;
972 spin_unlock(&global_rsv->lock);
973
974 return true;
975}
976
2341ccd1
JB
977/*
978 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
979 * @fs_info - fs_info for this fs
980 * @space_info - the space info we were flushing
981 *
982 * We call this when we've exhausted our flushing ability and haven't made
983 * progress in satisfying tickets. The reservation code handles tickets in
984 * order, so if there is a large ticket first and then smaller ones we could
985 * very well satisfy the smaller tickets. This will attempt to wake up any
986 * tickets in the list to catch this case.
987 *
988 * This function returns true if it was able to make progress by clearing out
989 * other tickets, or if it stumbles across a ticket that was smaller than the
990 * first ticket.
991 */
992static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
993 struct btrfs_space_info *space_info)
0d9764f6
JB
994{
995 struct reserve_ticket *ticket;
2341ccd1 996 u64 tickets_id = space_info->tickets_id;
0e24f6d8 997 const bool aborted = BTRFS_FS_ERROR(fs_info);
2341ccd1 998
fcdef39c
JB
999 trace_btrfs_fail_all_tickets(fs_info, space_info);
1000
84fe47a4
JB
1001 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1002 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
1003 __btrfs_dump_space_info(fs_info, space_info);
1004 }
1005
2341ccd1
JB
1006 while (!list_empty(&space_info->tickets) &&
1007 tickets_id == space_info->tickets_id) {
1008 ticket = list_first_entry(&space_info->tickets,
1009 struct reserve_ticket, list);
1010
1b0309ea 1011 if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket))
7f9fe614
JB
1012 return true;
1013
0e24f6d8 1014 if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG))
84fe47a4
JB
1015 btrfs_info(fs_info, "failing ticket with %llu bytes",
1016 ticket->bytes);
1017
d611add4 1018 remove_ticket(space_info, ticket);
0e24f6d8
JB
1019 if (aborted)
1020 ticket->error = -EIO;
1021 else
1022 ticket->error = -ENOSPC;
0d9764f6 1023 wake_up(&ticket->wait);
2341ccd1
JB
1024
1025 /*
1026 * We're just throwing tickets away, so more flushing may not
1027 * trip over btrfs_try_granting_tickets, so we need to call it
1028 * here to see if we can make progress with the next ticket in
1029 * the list.
1030 */
0e24f6d8
JB
1031 if (!aborted)
1032 btrfs_try_granting_tickets(fs_info, space_info);
0d9764f6 1033 }
2341ccd1 1034 return (tickets_id != space_info->tickets_id);
0d9764f6
JB
1035}
1036
1037/*
1038 * This is for normal flushers, we can wait all goddamned day if we want to. We
1039 * will loop and continuously try to flush as long as we are making progress.
1040 * We count progress as clearing off tickets each time we have to loop.
1041 */
1042static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
1043{
1044 struct btrfs_fs_info *fs_info;
1045 struct btrfs_space_info *space_info;
1046 u64 to_reclaim;
91e79a83 1047 enum btrfs_flush_state flush_state;
0d9764f6
JB
1048 int commit_cycles = 0;
1049 u64 last_tickets_id;
1050
1051 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
1052 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1053
1054 spin_lock(&space_info->lock);
9f246926 1055 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
0d9764f6
JB
1056 if (!to_reclaim) {
1057 space_info->flush = 0;
1058 spin_unlock(&space_info->lock);
1059 return;
1060 }
1061 last_tickets_id = space_info->tickets_id;
1062 spin_unlock(&space_info->lock);
1063
1064 flush_state = FLUSH_DELAYED_ITEMS_NR;
1065 do {
4b02b00f 1066 flush_space(fs_info, space_info, to_reclaim, flush_state, false);
0d9764f6
JB
1067 spin_lock(&space_info->lock);
1068 if (list_empty(&space_info->tickets)) {
1069 space_info->flush = 0;
1070 spin_unlock(&space_info->lock);
1071 return;
1072 }
1073 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
9f246926 1074 space_info);
0d9764f6
JB
1075 if (last_tickets_id == space_info->tickets_id) {
1076 flush_state++;
1077 } else {
1078 last_tickets_id = space_info->tickets_id;
1079 flush_state = FLUSH_DELAYED_ITEMS_NR;
1080 if (commit_cycles)
1081 commit_cycles--;
1082 }
1083
03fe78cc
JB
1084 /*
1085 * We do not want to empty the system of delalloc unless we're
1086 * under heavy pressure, so allow one trip through the flushing
1087 * logic before we start doing a FLUSH_DELALLOC_FULL.
1088 */
1089 if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles)
1090 flush_state++;
1091
0d9764f6
JB
1092 /*
1093 * We don't want to force a chunk allocation until we've tried
1094 * pretty hard to reclaim space. Think of the case where we
1095 * freed up a bunch of space and so have a lot of pinned space
1096 * to reclaim. We would rather use that than possibly create a
1097 * underutilized metadata chunk. So if this is our first run
1098 * through the flushing state machine skip ALLOC_CHUNK_FORCE and
1099 * commit the transaction. If nothing has changed the next go
1100 * around then we can force a chunk allocation.
1101 */
1102 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
1103 flush_state++;
1104
1105 if (flush_state > COMMIT_TRANS) {
1106 commit_cycles++;
1107 if (commit_cycles > 2) {
2341ccd1 1108 if (maybe_fail_all_tickets(fs_info, space_info)) {
0d9764f6
JB
1109 flush_state = FLUSH_DELAYED_ITEMS_NR;
1110 commit_cycles--;
1111 } else {
1112 space_info->flush = 0;
1113 }
1114 } else {
1115 flush_state = FLUSH_DELAYED_ITEMS_NR;
1116 }
1117 }
1118 spin_unlock(&space_info->lock);
1119 } while (flush_state <= COMMIT_TRANS);
1120}
1121
576fa348
JB
1122/*
1123 * This handles pre-flushing of metadata space before we get to the point that
1124 * we need to start blocking threads on tickets. The logic here is different
1125 * from the other flush paths because it doesn't rely on tickets to tell us how
1126 * much we need to flush, instead it attempts to keep us below the 80% full
1127 * watermark of space by flushing whichever reservation pool is currently the
1128 * largest.
1129 */
1130static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
1131{
1132 struct btrfs_fs_info *fs_info;
1133 struct btrfs_space_info *space_info;
1134 struct btrfs_block_rsv *delayed_block_rsv;
1135 struct btrfs_block_rsv *delayed_refs_rsv;
1136 struct btrfs_block_rsv *global_rsv;
1137 struct btrfs_block_rsv *trans_rsv;
88a777a6 1138 int loops = 0;
576fa348
JB
1139
1140 fs_info = container_of(work, struct btrfs_fs_info,
1141 preempt_reclaim_work);
1142 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1143 delayed_block_rsv = &fs_info->delayed_block_rsv;
1144 delayed_refs_rsv = &fs_info->delayed_refs_rsv;
1145 global_rsv = &fs_info->global_block_rsv;
1146 trans_rsv = &fs_info->trans_block_rsv;
1147
1148 spin_lock(&space_info->lock);
2e294c60 1149 while (need_preemptive_reclaim(fs_info, space_info)) {
576fa348
JB
1150 enum btrfs_flush_state flush;
1151 u64 delalloc_size = 0;
1152 u64 to_reclaim, block_rsv_size;
1153 u64 global_rsv_size = global_rsv->reserved;
1154
88a777a6
JB
1155 loops++;
1156
576fa348
JB
1157 /*
1158 * We don't have a precise counter for the metadata being
1159 * reserved for delalloc, so we'll approximate it by subtracting
1160 * out the block rsv's space from the bytes_may_use. If that
1161 * amount is higher than the individual reserves, then we can
1162 * assume it's tied up in delalloc reservations.
1163 */
1164 block_rsv_size = global_rsv_size +
1165 delayed_block_rsv->reserved +
1166 delayed_refs_rsv->reserved +
1167 trans_rsv->reserved;
1168 if (block_rsv_size < space_info->bytes_may_use)
1169 delalloc_size = space_info->bytes_may_use - block_rsv_size;
576fa348
JB
1170
1171 /*
1172 * We don't want to include the global_rsv in our calculation,
1173 * because that's space we can't touch. Subtract it from the
1174 * block_rsv_size for the next checks.
1175 */
1176 block_rsv_size -= global_rsv_size;
1177
1178 /*
1179 * We really want to avoid flushing delalloc too much, as it
1180 * could result in poor allocation patterns, so only flush it if
1181 * it's larger than the rest of the pools combined.
1182 */
1183 if (delalloc_size > block_rsv_size) {
1184 to_reclaim = delalloc_size;
1185 flush = FLUSH_DELALLOC;
1186 } else if (space_info->bytes_pinned >
1187 (delayed_block_rsv->reserved +
1188 delayed_refs_rsv->reserved)) {
1189 to_reclaim = space_info->bytes_pinned;
c416a30c 1190 flush = COMMIT_TRANS;
576fa348
JB
1191 } else if (delayed_block_rsv->reserved >
1192 delayed_refs_rsv->reserved) {
1193 to_reclaim = delayed_block_rsv->reserved;
1194 flush = FLUSH_DELAYED_ITEMS_NR;
1195 } else {
1196 to_reclaim = delayed_refs_rsv->reserved;
1197 flush = FLUSH_DELAYED_REFS_NR;
1198 }
1199
06bae876
ND
1200 spin_unlock(&space_info->lock);
1201
576fa348
JB
1202 /*
1203 * We don't want to reclaim everything, just a portion, so scale
1204 * down the to_reclaim by 1/4. If it takes us down to 0,
1205 * reclaim 1 items worth.
1206 */
1207 to_reclaim >>= 2;
1208 if (!to_reclaim)
1209 to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
4b02b00f 1210 flush_space(fs_info, space_info, to_reclaim, flush, true);
576fa348
JB
1211 cond_resched();
1212 spin_lock(&space_info->lock);
576fa348 1213 }
88a777a6
JB
1214
1215 /* We only went through once, back off our clamping. */
1216 if (loops == 1 && !space_info->reclaim_size)
1217 space_info->clamp = max(1, space_info->clamp - 1);
e5ad49e2 1218 trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
576fa348
JB
1219 spin_unlock(&space_info->lock);
1220}
1221
1a7a92c8
JB
1222/*
1223 * FLUSH_DELALLOC_WAIT:
1224 * Space is freed from flushing delalloc in one of two ways.
1225 *
1226 * 1) compression is on and we allocate less space than we reserved
1227 * 2) we are overwriting existing space
1228 *
1229 * For #1 that extra space is reclaimed as soon as the delalloc pages are
1230 * COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent
1231 * length to ->bytes_reserved, and subtracts the reserved space from
1232 * ->bytes_may_use.
1233 *
1234 * For #2 this is trickier. Once the ordered extent runs we will drop the
1235 * extent in the range we are overwriting, which creates a delayed ref for
1236 * that freed extent. This however is not reclaimed until the transaction
1237 * commits, thus the next stages.
1238 *
1239 * RUN_DELAYED_IPUTS
1240 * If we are freeing inodes, we want to make sure all delayed iputs have
1241 * completed, because they could have been on an inode with i_nlink == 0, and
1242 * thus have been truncated and freed up space. But again this space is not
1243 * immediately re-usable, it comes in the form of a delayed ref, which must be
1244 * run and then the transaction must be committed.
1245 *
1a7a92c8 1246 * COMMIT_TRANS
c416a30c
JB
1247 * This is where we reclaim all of the pinned space generated by running the
1248 * iputs
c4923027
JB
1249 *
1250 * ALLOC_CHUNK_FORCE
1251 * For data we start with alloc chunk force, however we could have been full
1252 * before, and then the transaction commit could have freed new block groups,
1253 * so if we now have space to allocate do the force chunk allocation.
1a7a92c8 1254 */
57056740 1255static const enum btrfs_flush_state data_flush_states[] = {
03fe78cc 1256 FLUSH_DELALLOC_FULL,
57056740 1257 RUN_DELAYED_IPUTS,
57056740 1258 COMMIT_TRANS,
c4923027 1259 ALLOC_CHUNK_FORCE,
57056740
JB
1260};
1261
1262static void btrfs_async_reclaim_data_space(struct work_struct *work)
0d9764f6 1263{
57056740
JB
1264 struct btrfs_fs_info *fs_info;
1265 struct btrfs_space_info *space_info;
1266 u64 last_tickets_id;
91e79a83 1267 enum btrfs_flush_state flush_state = 0;
57056740
JB
1268
1269 fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
1270 space_info = fs_info->data_sinfo;
1271
1272 spin_lock(&space_info->lock);
1273 if (list_empty(&space_info->tickets)) {
1274 space_info->flush = 0;
1275 spin_unlock(&space_info->lock);
1276 return;
1277 }
1278 last_tickets_id = space_info->tickets_id;
1279 spin_unlock(&space_info->lock);
1280
1281 while (!space_info->full) {
4b02b00f 1282 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
57056740
JB
1283 spin_lock(&space_info->lock);
1284 if (list_empty(&space_info->tickets)) {
1285 space_info->flush = 0;
1286 spin_unlock(&space_info->lock);
1287 return;
1288 }
0e24f6d8
JB
1289
1290 /* Something happened, fail everything and bail. */
1291 if (BTRFS_FS_ERROR(fs_info))
1292 goto aborted_fs;
57056740
JB
1293 last_tickets_id = space_info->tickets_id;
1294 spin_unlock(&space_info->lock);
1295 }
1296
1297 while (flush_state < ARRAY_SIZE(data_flush_states)) {
1298 flush_space(fs_info, space_info, U64_MAX,
4b02b00f 1299 data_flush_states[flush_state], false);
57056740
JB
1300 spin_lock(&space_info->lock);
1301 if (list_empty(&space_info->tickets)) {
1302 space_info->flush = 0;
1303 spin_unlock(&space_info->lock);
1304 return;
1305 }
1306
1307 if (last_tickets_id == space_info->tickets_id) {
1308 flush_state++;
1309 } else {
1310 last_tickets_id = space_info->tickets_id;
1311 flush_state = 0;
1312 }
1313
1314 if (flush_state >= ARRAY_SIZE(data_flush_states)) {
1315 if (space_info->full) {
1316 if (maybe_fail_all_tickets(fs_info, space_info))
1317 flush_state = 0;
1318 else
1319 space_info->flush = 0;
1320 } else {
1321 flush_state = 0;
1322 }
0e24f6d8
JB
1323
1324 /* Something happened, fail everything and bail. */
1325 if (BTRFS_FS_ERROR(fs_info))
1326 goto aborted_fs;
1327
57056740
JB
1328 }
1329 spin_unlock(&space_info->lock);
1330 }
0e24f6d8
JB
1331 return;
1332
1333aborted_fs:
1334 maybe_fail_all_tickets(fs_info, space_info);
1335 space_info->flush = 0;
1336 spin_unlock(&space_info->lock);
57056740
JB
1337}
1338
1339void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
1340{
1341 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
1342 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
576fa348
JB
1343 INIT_WORK(&fs_info->preempt_reclaim_work,
1344 btrfs_preempt_reclaim_metadata_space);
0d9764f6
JB
1345}
1346
1347static const enum btrfs_flush_state priority_flush_states[] = {
1348 FLUSH_DELAYED_ITEMS_NR,
1349 FLUSH_DELAYED_ITEMS,
1350 ALLOC_CHUNK,
1351};
1352
d3984c90
JB
1353static const enum btrfs_flush_state evict_flush_states[] = {
1354 FLUSH_DELAYED_ITEMS_NR,
1355 FLUSH_DELAYED_ITEMS,
1356 FLUSH_DELAYED_REFS_NR,
1357 FLUSH_DELAYED_REFS,
1358 FLUSH_DELALLOC,
1359 FLUSH_DELALLOC_WAIT,
03fe78cc 1360 FLUSH_DELALLOC_FULL,
d3984c90
JB
1361 ALLOC_CHUNK,
1362 COMMIT_TRANS,
1363};
1364
0d9764f6 1365static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
9ce2f423
JB
1366 struct btrfs_space_info *space_info,
1367 struct reserve_ticket *ticket,
1368 const enum btrfs_flush_state *states,
1369 int states_nr)
0d9764f6
JB
1370{
1371 u64 to_reclaim;
9f35f76d 1372 int flush_state = 0;
0d9764f6
JB
1373
1374 spin_lock(&space_info->lock);
9f246926 1375 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
9cd8dcdc
JB
1376 /*
1377 * This is the priority reclaim path, so to_reclaim could be >0 still
143823cf 1378 * because we may have only satisfied the priority tickets and still
9cd8dcdc
JB
1379 * left non priority tickets on the list. We would then have
1380 * to_reclaim but ->bytes == 0.
1381 */
1382 if (ticket->bytes == 0) {
0d9764f6
JB
1383 spin_unlock(&space_info->lock);
1384 return;
1385 }
0d9764f6 1386
9f35f76d
JB
1387 while (flush_state < states_nr) {
1388 spin_unlock(&space_info->lock);
4b02b00f
JB
1389 flush_space(fs_info, space_info, to_reclaim, states[flush_state],
1390 false);
0d9764f6
JB
1391 flush_state++;
1392 spin_lock(&space_info->lock);
1393 if (ticket->bytes == 0) {
1394 spin_unlock(&space_info->lock);
1395 return;
1396 }
9f35f76d
JB
1397 }
1398
ee6adbfd
JB
1399 /* Attempt to steal from the global rsv if we can. */
1400 if (!steal_from_global_rsv(fs_info, space_info, ticket)) {
1401 ticket->error = -ENOSPC;
1402 remove_ticket(space_info, ticket);
1403 }
1404
9f35f76d
JB
1405 /*
1406 * We must run try_granting_tickets here because we could be a large
1407 * ticket in front of a smaller ticket that can now be satisfied with
1408 * the available space.
1409 */
9f35f76d
JB
1410 btrfs_try_granting_tickets(fs_info, space_info);
1411 spin_unlock(&space_info->lock);
0d9764f6
JB
1412}
1413
1004f686
JB
1414static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
1415 struct btrfs_space_info *space_info,
57056740 1416 struct reserve_ticket *ticket)
1004f686 1417{
9f35f76d 1418 spin_lock(&space_info->lock);
9cd8dcdc
JB
1419
1420 /* We could have been granted before we got here. */
1421 if (ticket->bytes == 0) {
1422 spin_unlock(&space_info->lock);
1423 return;
1424 }
1425
1004f686 1426 while (!space_info->full) {
9f35f76d 1427 spin_unlock(&space_info->lock);
4b02b00f 1428 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1004f686
JB
1429 spin_lock(&space_info->lock);
1430 if (ticket->bytes == 0) {
1431 spin_unlock(&space_info->lock);
1432 return;
1433 }
1004f686 1434 }
9f35f76d
JB
1435
1436 ticket->error = -ENOSPC;
1437 remove_ticket(space_info, ticket);
1438 btrfs_try_granting_tickets(fs_info, space_info);
1439 spin_unlock(&space_info->lock);
1004f686
JB
1440}
1441
374bf9c5
JB
1442static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
1443 struct btrfs_space_info *space_info,
1444 struct reserve_ticket *ticket)
0d9764f6
JB
1445
1446{
1447 DEFINE_WAIT(wait);
0d9764f6
JB
1448 int ret = 0;
1449
1450 spin_lock(&space_info->lock);
1451 while (ticket->bytes > 0 && ticket->error == 0) {
1452 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
1453 if (ret) {
0cab7acc
FM
1454 /*
1455 * Delete us from the list. After we unlock the space
1456 * info, we don't want the async reclaim job to reserve
1457 * space for this ticket. If that would happen, then the
1458 * ticket's task would not known that space was reserved
1459 * despite getting an error, resulting in a space leak
1460 * (bytes_may_use counter of our space_info).
1461 */
d611add4 1462 remove_ticket(space_info, ticket);
374bf9c5 1463 ticket->error = -EINTR;
0d9764f6
JB
1464 break;
1465 }
1466 spin_unlock(&space_info->lock);
1467
1468 schedule();
1469
1470 finish_wait(&ticket->wait, &wait);
1471 spin_lock(&space_info->lock);
1472 }
0d9764f6 1473 spin_unlock(&space_info->lock);
0d9764f6
JB
1474}
1475
43dd529a
DS
1476/*
1477 * Do the appropriate flushing and waiting for a ticket.
d98b188e
NB
1478 *
1479 * @fs_info: the filesystem
1480 * @space_info: space info for the reservation
1481 * @ticket: ticket for the reservation
ac1ea10e
JB
1482 * @start_ns: timestamp when the reservation started
1483 * @orig_bytes: amount of bytes originally reserved
d98b188e 1484 * @flush: how much we can flush
03235279
JB
1485 *
1486 * This does the work of figuring out how to flush for the ticket, waiting for
1487 * the reservation, and returning the appropriate error if there is one.
1488 */
1489static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
1490 struct btrfs_space_info *space_info,
1491 struct reserve_ticket *ticket,
ac1ea10e 1492 u64 start_ns, u64 orig_bytes,
03235279
JB
1493 enum btrfs_reserve_flush_enum flush)
1494{
03235279
JB
1495 int ret;
1496
d3984c90 1497 switch (flush) {
57056740 1498 case BTRFS_RESERVE_FLUSH_DATA:
d3984c90 1499 case BTRFS_RESERVE_FLUSH_ALL:
7f9fe614 1500 case BTRFS_RESERVE_FLUSH_ALL_STEAL:
03235279 1501 wait_reserve_ticket(fs_info, space_info, ticket);
d3984c90
JB
1502 break;
1503 case BTRFS_RESERVE_FLUSH_LIMIT:
9ce2f423
JB
1504 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1505 priority_flush_states,
1506 ARRAY_SIZE(priority_flush_states));
d3984c90
JB
1507 break;
1508 case BTRFS_RESERVE_FLUSH_EVICT:
1509 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1510 evict_flush_states,
1511 ARRAY_SIZE(evict_flush_states));
1512 break;
1004f686 1513 case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
57056740 1514 priority_reclaim_data_space(fs_info, space_info, ticket);
1004f686 1515 break;
d3984c90
JB
1516 default:
1517 ASSERT(0);
1518 break;
1519 }
03235279 1520
03235279 1521 ret = ticket->error;
03235279 1522 ASSERT(list_empty(&ticket->list));
0cab7acc
FM
1523 /*
1524 * Check that we can't have an error set if the reservation succeeded,
1525 * as that would confuse tasks and lead them to error out without
1526 * releasing reserved space (if an error happens the expectation is that
1527 * space wasn't reserved at all).
1528 */
1529 ASSERT(!(ticket->bytes == 0 && ticket->error));
ac1ea10e
JB
1530 trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
1531 start_ns, flush, ticket->error);
03235279
JB
1532 return ret;
1533}
1534
666daa9f
JB
1535/*
1536 * This returns true if this flush state will go through the ordinary flushing
1537 * code.
1538 */
1539static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
1540{
1541 return (flush == BTRFS_RESERVE_FLUSH_ALL) ||
1542 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1543}
1544
88a777a6
JB
1545static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
1546 struct btrfs_space_info *space_info)
1547{
1548 u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
1549 u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
1550
1551 /*
1552 * If we're heavy on ordered operations then clamping won't help us. We
1553 * need to clamp specifically to keep up with dirty'ing buffered
1554 * writers, because there's not a 1:1 correlation of writing delalloc
1555 * and freeing space, like there is with flushing delayed refs or
1556 * delayed nodes. If we're already more ordered than delalloc then
1557 * we're keeping up, otherwise we aren't and should probably clamp.
1558 */
1559 if (ordered < delalloc)
1560 space_info->clamp = min(space_info->clamp + 1, 8);
1561}
1562
ee6adbfd
JB
1563static inline bool can_steal(enum btrfs_reserve_flush_enum flush)
1564{
1565 return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1566 flush == BTRFS_RESERVE_FLUSH_EVICT);
1567}
1568
765c3fe9
JB
1569/*
1570 * NO_FLUSH and FLUSH_EMERGENCY don't want to create a ticket, they just want to
1571 * fail as quickly as possible.
1572 */
1573static inline bool can_ticket(enum btrfs_reserve_flush_enum flush)
1574{
1575 return (flush != BTRFS_RESERVE_NO_FLUSH &&
1576 flush != BTRFS_RESERVE_FLUSH_EMERGENCY);
1577}
1578
43dd529a
DS
1579/*
1580 * Try to reserve bytes from the block_rsv's space.
d98b188e
NB
1581 *
1582 * @fs_info: the filesystem
1583 * @space_info: space info we want to allocate from
1584 * @orig_bytes: number of bytes we want
1585 * @flush: whether or not we can flush to make our reservation
0d9764f6
JB
1586 *
1587 * This will reserve orig_bytes number of bytes from the space info associated
1588 * with the block_rsv. If there is not enough space it will make an attempt to
1589 * flush out space to make room. It will do this by flushing delalloc if
1590 * possible or committing the transaction. If flush is 0 then no attempts to
1591 * regain reservations will be made and this will fail if there is not enough
1592 * space already.
1593 */
f3bda421
JB
1594static int __reserve_bytes(struct btrfs_fs_info *fs_info,
1595 struct btrfs_space_info *space_info, u64 orig_bytes,
1596 enum btrfs_reserve_flush_enum flush)
0d9764f6 1597{
57056740 1598 struct work_struct *async_work;
0d9764f6 1599 struct reserve_ticket ticket;
ac1ea10e 1600 u64 start_ns = 0;
0d9764f6 1601 u64 used;
0d9764f6 1602 int ret = 0;
ef1317a1 1603 bool pending_tickets;
0d9764f6
JB
1604
1605 ASSERT(orig_bytes);
1606 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
1607
57056740
JB
1608 if (flush == BTRFS_RESERVE_FLUSH_DATA)
1609 async_work = &fs_info->async_data_reclaim_work;
1610 else
1611 async_work = &fs_info->async_reclaim_work;
1612
0d9764f6
JB
1613 spin_lock(&space_info->lock);
1614 ret = -ENOSPC;
1615 used = btrfs_space_info_used(space_info, true);
666daa9f
JB
1616
1617 /*
1618 * We don't want NO_FLUSH allocations to jump everybody, they can
1619 * generally handle ENOSPC in a different way, so treat them the same as
1620 * normal flushers when it comes to skipping pending tickets.
1621 */
1622 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
1623 pending_tickets = !list_empty(&space_info->tickets) ||
1624 !list_empty(&space_info->priority_tickets);
1625 else
1626 pending_tickets = !list_empty(&space_info->priority_tickets);
0d9764f6
JB
1627
1628 /*
9b4851bc
GR
1629 * Carry on if we have enough space (short-circuit) OR call
1630 * can_overcommit() to ensure we can overcommit to continue.
0d9764f6 1631 */
ef1317a1 1632 if (!pending_tickets &&
e15acc25 1633 ((used + orig_bytes <= space_info->total_bytes) ||
a30a3d20 1634 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
0d9764f6
JB
1635 btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1636 orig_bytes);
0d9764f6
JB
1637 ret = 0;
1638 }
1639
765c3fe9
JB
1640 /*
1641 * Things are dire, we need to make a reservation so we don't abort. We
1642 * will let this reservation go through as long as we have actual space
1643 * left to allocate for the block.
1644 */
1645 if (ret && unlikely(flush == BTRFS_RESERVE_FLUSH_EMERGENCY)) {
1646 used = btrfs_space_info_used(space_info, false);
e15acc25 1647 if (used + orig_bytes <= space_info->total_bytes) {
765c3fe9
JB
1648 btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1649 orig_bytes);
1650 ret = 0;
1651 }
1652 }
1653
0d9764f6
JB
1654 /*
1655 * If we couldn't make a reservation then setup our reservation ticket
1656 * and kick the async worker if it's not already running.
1657 *
1658 * If we are a priority flusher then we just need to add our ticket to
1659 * the list and we will do our own flushing further down.
1660 */
765c3fe9 1661 if (ret && can_ticket(flush)) {
0d9764f6
JB
1662 ticket.bytes = orig_bytes;
1663 ticket.error = 0;
db161806 1664 space_info->reclaim_size += ticket.bytes;
0d9764f6 1665 init_waitqueue_head(&ticket.wait);
ee6adbfd 1666 ticket.steal = can_steal(flush);
ac1ea10e
JB
1667 if (trace_btrfs_reserve_ticket_enabled())
1668 start_ns = ktime_get_ns();
1669
7f9fe614 1670 if (flush == BTRFS_RESERVE_FLUSH_ALL ||
57056740
JB
1671 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1672 flush == BTRFS_RESERVE_FLUSH_DATA) {
0d9764f6
JB
1673 list_add_tail(&ticket.list, &space_info->tickets);
1674 if (!space_info->flush) {
0aae4ca9
JB
1675 /*
1676 * We were forced to add a reserve ticket, so
1677 * our preemptive flushing is unable to keep
1678 * up. Clamp down on the threshold for the
1679 * preemptive flushing in order to keep up with
1680 * the workload.
1681 */
1682 maybe_clamp_preempt(fs_info, space_info);
1683
0d9764f6
JB
1684 space_info->flush = 1;
1685 trace_btrfs_trigger_flush(fs_info,
1686 space_info->flags,
1687 orig_bytes, flush,
1688 "enospc");
57056740 1689 queue_work(system_unbound_wq, async_work);
0d9764f6
JB
1690 }
1691 } else {
1692 list_add_tail(&ticket.list,
1693 &space_info->priority_tickets);
1694 }
1695 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
0d9764f6
JB
1696 /*
1697 * We will do the space reservation dance during log replay,
1698 * which means we won't have fs_info->fs_root set, so don't do
1699 * the async reclaim as we will panic.
1700 */
1701 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
ed738ba7
JB
1702 !work_busy(&fs_info->preempt_reclaim_work) &&
1703 need_preemptive_reclaim(fs_info, space_info)) {
0d9764f6
JB
1704 trace_btrfs_trigger_flush(fs_info, space_info->flags,
1705 orig_bytes, flush, "preempt");
1706 queue_work(system_unbound_wq,
576fa348 1707 &fs_info->preempt_reclaim_work);
0d9764f6
JB
1708 }
1709 }
1710 spin_unlock(&space_info->lock);
765c3fe9 1711 if (!ret || !can_ticket(flush))
0d9764f6
JB
1712 return ret;
1713
ac1ea10e
JB
1714 return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
1715 orig_bytes, flush);
0d9764f6
JB
1716}
1717
43dd529a
DS
1718/*
1719 * Try to reserve metadata bytes from the block_rsv's space.
d98b188e 1720 *
be8d1a2a 1721 * @fs_info: the filesystem
d98b188e
NB
1722 * @block_rsv: block_rsv we're allocating for
1723 * @orig_bytes: number of bytes we want
1724 * @flush: whether or not we can flush to make our reservation
0d9764f6
JB
1725 *
1726 * This will reserve orig_bytes number of bytes from the space info associated
1727 * with the block_rsv. If there is not enough space it will make an attempt to
1728 * flush out space to make room. It will do this by flushing delalloc if
1729 * possible or committing the transaction. If flush is 0 then no attempts to
1730 * regain reservations will be made and this will fail if there is not enough
1731 * space already.
1732 */
9270501c 1733int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
0d9764f6
JB
1734 struct btrfs_block_rsv *block_rsv,
1735 u64 orig_bytes,
1736 enum btrfs_reserve_flush_enum flush)
1737{
0d9764f6 1738 int ret;
0d9764f6 1739
f3bda421 1740 ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush);
0d9764f6
JB
1741 if (ret == -ENOSPC) {
1742 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1743 block_rsv->space_info->flags,
1744 orig_bytes, 1);
1745
1746 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1747 btrfs_dump_space_info(fs_info, block_rsv->space_info,
1748 orig_bytes, 0);
1749 }
1750 return ret;
1751}
8698fc4e 1752
43dd529a
DS
1753/*
1754 * Try to reserve data bytes for an allocation.
d98b188e
NB
1755 *
1756 * @fs_info: the filesystem
1757 * @bytes: number of bytes we need
1758 * @flush: how we are allowed to flush
8698fc4e
JB
1759 *
1760 * This will reserve bytes from the data space info. If there is not enough
1761 * space then we will attempt to flush space as specified by flush.
1762 */
1763int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
1764 enum btrfs_reserve_flush_enum flush)
1765{
1766 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
f3bda421 1767 int ret;
8698fc4e 1768
f3bda421 1769 ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
1daedb1d
JB
1770 flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE ||
1771 flush == BTRFS_RESERVE_NO_FLUSH);
8698fc4e
JB
1772 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);
1773
f3bda421
JB
1774 ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush);
1775 if (ret == -ENOSPC) {
1776 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
8698fc4e 1777 data_sinfo->flags, bytes, 1);
f3bda421
JB
1778 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1779 btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0);
1780 }
8698fc4e
JB
1781 return ret;
1782}
8e327b9c
QW
1783
1784/* Dump all the space infos when we abort a transaction due to ENOSPC. */
1785__cold void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info)
1786{
1787 struct btrfs_space_info *space_info;
1788
1789 btrfs_info(fs_info, "dumping space info:");
1790 list_for_each_entry(space_info, &fs_info->space_info, list) {
1791 spin_lock(&space_info->lock);
1792 __btrfs_dump_space_info(fs_info, space_info);
1793 spin_unlock(&space_info->lock);
1794 }
1795 dump_global_block_rsv(fs_info);
1796}
e2f13b34
JB
1797
1798/*
1799 * Account the unused space of all the readonly block group in the space_info.
1800 * takes mirrors into account.
1801 */
1802u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
1803{
1804 struct btrfs_block_group *block_group;
1805 u64 free_bytes = 0;
1806 int factor;
1807
1808 /* It's df, we don't care if it's racy */
1809 if (list_empty(&sinfo->ro_bgs))
1810 return 0;
1811
1812 spin_lock(&sinfo->lock);
1813 list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
1814 spin_lock(&block_group->lock);
1815
1816 if (!block_group->ro) {
1817 spin_unlock(&block_group->lock);
1818 continue;
1819 }
1820
1821 factor = btrfs_bg_type_to_factor(block_group->flags);
1822 free_bytes += (block_group->length -
1823 block_group->used) * factor;
1824
1825 spin_unlock(&block_group->lock);
1826 }
1827 spin_unlock(&sinfo->lock);
1828
1829 return free_bytes;
1830}