1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/jiffies.h>
4 #include <linux/kernel.h>
5 #include <linux/ktime.h>
6 #include <linux/list.h>
7 #include <linux/math64.h>
8 #include <linux/sizes.h>
9 #include <linux/workqueue.h>
11 #include "block-group.h"
13 #include "free-space-cache.h"
17 * This contains the logic to handle async discard.
19 * Async discard manages trimming of free space outside of transaction commit.
20 * Discarding is done by managing the block_groups on a LRU list based on free
21 * space recency. Two passes are used to first prioritize discarding extents
22 * and then allow for trimming in the bitmap the best opportunity to coalesce.
23 * The block_groups are maintained on multiple lists to allow for multiple
24 * passes with different discard filter requirements. A delayed work item is
25 * used to manage discarding with timeout determined by a max of the delay
26 * incurred by the iops rate limit, the byte rate limit, and the max delay of
27 * BTRFS_DISCARD_MAX_DELAY.
29 * Note, this only keeps track of block_groups that are explicitly for data.
30 * Mixed block_groups are not supported.
32 * The first list is special to manage discarding of fully free block groups.
33 * This is necessary because we issue a final trim for a full free block group
34 * after forgetting it. When a block group becomes unused, instead of directly
35 * being added to the unused_bgs list, we add it to this first list. Then
36 * from there, if it becomes fully discarded, we place it onto the unused_bgs
39 * The in-memory free space cache serves as the backing state for discard.
40 * Consequently this means there is no persistence. We opt to load all the
41 * block groups in as not discarded, so the mount case degenerates to the
44 * As the free space cache uses bitmaps, there exists a tradeoff between
45 * ease/efficiency for find_free_extent() and the accuracy of discard state.
46 * Here we opt to let untrimmed regions merge with everything while only letting
47 * trimmed regions merge with other trimmed regions. This can cause
48 * overtrimming, but the coalescing benefit seems to be worth it. Additionally,
49 * bitmap state is tracked as a whole. If we're able to fully trim a bitmap,
50 * the trimmed flag is set on the bitmap. Otherwise, if an allocation comes in,
51 * this resets the state and we will retry trimming the whole bitmap. This is a
52 * tradeoff between discard state accuracy and the cost of accounting.
55 /* This is an initial delay to give some chance for block reuse */
56 #define BTRFS_DISCARD_DELAY (120ULL * NSEC_PER_SEC)
57 #define BTRFS_DISCARD_UNUSED_DELAY (10ULL * NSEC_PER_SEC)
59 #define BTRFS_DISCARD_MIN_DELAY_MSEC (1UL)
60 #define BTRFS_DISCARD_MAX_DELAY_MSEC (1000UL)
61 #define BTRFS_DISCARD_MAX_IOPS (1000U)
63 /* Monotonically decreasing minimum length filters after index 0 */
64 static int discard_minlen[BTRFS_NR_DISCARD_LISTS] = {
66 BTRFS_ASYNC_DISCARD_MAX_FILTER,
67 BTRFS_ASYNC_DISCARD_MIN_FILTER
70 static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl,
71 struct btrfs_block_group *block_group)
73 return &discard_ctl->discard_list[block_group->discard_index];
76 static void __add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
77 struct btrfs_block_group *block_group)
79 lockdep_assert_held(&discard_ctl->lock);
80 if (!btrfs_run_discard_work(discard_ctl))
83 if (list_empty(&block_group->discard_list) ||
84 block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED) {
85 if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED)
86 block_group->discard_index = BTRFS_DISCARD_INDEX_START;
87 block_group->discard_eligible_time = (ktime_get_ns() +
89 block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR;
91 if (list_empty(&block_group->discard_list))
92 btrfs_get_block_group(block_group);
94 list_move_tail(&block_group->discard_list,
95 get_discard_list(discard_ctl, block_group));
98 static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
99 struct btrfs_block_group *block_group)
101 if (!btrfs_is_block_group_data_only(block_group))
104 spin_lock(&discard_ctl->lock);
105 __add_to_discard_list(discard_ctl, block_group);
106 spin_unlock(&discard_ctl->lock);
109 static void add_to_discard_unused_list(struct btrfs_discard_ctl *discard_ctl,
110 struct btrfs_block_group *block_group)
114 spin_lock(&discard_ctl->lock);
116 queued = !list_empty(&block_group->discard_list);
118 if (!btrfs_run_discard_work(discard_ctl)) {
119 spin_unlock(&discard_ctl->lock);
123 list_del_init(&block_group->discard_list);
125 block_group->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
126 block_group->discard_eligible_time = (ktime_get_ns() +
127 BTRFS_DISCARD_UNUSED_DELAY);
128 block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR;
130 btrfs_get_block_group(block_group);
131 list_add_tail(&block_group->discard_list,
132 &discard_ctl->discard_list[BTRFS_DISCARD_INDEX_UNUSED]);
134 spin_unlock(&discard_ctl->lock);
137 static bool remove_from_discard_list(struct btrfs_discard_ctl *discard_ctl,
138 struct btrfs_block_group *block_group)
140 bool running = false;
143 spin_lock(&discard_ctl->lock);
145 if (block_group == discard_ctl->block_group) {
147 discard_ctl->block_group = NULL;
150 block_group->discard_eligible_time = 0;
151 queued = !list_empty(&block_group->discard_list);
152 list_del_init(&block_group->discard_list);
154 * If the block group is currently running in the discard workfn, we
155 * don't want to deref it, since it's still being used by the workfn.
156 * The workfn will notice this case and deref the block group when it is
159 if (queued && !running)
160 btrfs_put_block_group(block_group);
162 spin_unlock(&discard_ctl->lock);
168 * Find block_group that's up next for discarding.
170 * @discard_ctl: discard control
173 * Iterate over the discard lists to find the next block_group up for
174 * discarding checking the discard_eligible_time of block_group.
176 static struct btrfs_block_group *find_next_block_group(
177 struct btrfs_discard_ctl *discard_ctl,
180 struct btrfs_block_group *ret_block_group = NULL, *block_group;
183 for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++) {
184 struct list_head *discard_list = &discard_ctl->discard_list[i];
186 if (!list_empty(discard_list)) {
187 block_group = list_first_entry(discard_list,
188 struct btrfs_block_group,
191 if (!ret_block_group)
192 ret_block_group = block_group;
194 if (ret_block_group->discard_eligible_time < now)
197 if (ret_block_group->discard_eligible_time >
198 block_group->discard_eligible_time)
199 ret_block_group = block_group;
203 return ret_block_group;
207 * Look up next block group and set it for use.
209 * @discard_ctl: discard control
210 * @discard_state: the discard_state of the block_group after state management
211 * @discard_index: the discard_index of the block_group after state management
212 * @now: time when discard was invoked, in ns
214 * Wrap find_next_block_group() and set the block_group to be in use.
215 * @discard_state's control flow is managed here. Variables related to
216 * @discard_state are reset here as needed (eg. @discard_cursor). @discard_state
217 * and @discard_index are remembered as it may change while we're discarding,
218 * but we want the discard to execute in the context determined here.
220 static struct btrfs_block_group *peek_discard_list(
221 struct btrfs_discard_ctl *discard_ctl,
222 enum btrfs_discard_state *discard_state,
223 int *discard_index, u64 now)
225 struct btrfs_block_group *block_group;
227 spin_lock(&discard_ctl->lock);
229 block_group = find_next_block_group(discard_ctl, now);
231 if (block_group && now >= block_group->discard_eligible_time) {
232 if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED &&
233 block_group->used != 0) {
234 if (btrfs_is_block_group_data_only(block_group)) {
235 __add_to_discard_list(discard_ctl, block_group);
237 list_del_init(&block_group->discard_list);
238 btrfs_put_block_group(block_group);
242 if (block_group->discard_state == BTRFS_DISCARD_RESET_CURSOR) {
243 block_group->discard_cursor = block_group->start;
244 block_group->discard_state = BTRFS_DISCARD_EXTENTS;
246 discard_ctl->block_group = block_group;
249 *discard_state = block_group->discard_state;
250 *discard_index = block_group->discard_index;
252 spin_unlock(&discard_ctl->lock);
258 * Update a block group's filters.
260 * @block_group: block group of interest
261 * @bytes: recently freed region size after coalescing
263 * Async discard maintains multiple lists with progressively smaller filters
264 * to prioritize discarding based on size. Should a free space that matches
265 * a larger filter be returned to the free_space_cache, prioritize that discard
266 * by moving @block_group to the proper filter.
268 void btrfs_discard_check_filter(struct btrfs_block_group *block_group,
271 struct btrfs_discard_ctl *discard_ctl;
274 !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
277 discard_ctl = &block_group->fs_info->discard_ctl;
279 if (block_group->discard_index > BTRFS_DISCARD_INDEX_START &&
280 bytes >= discard_minlen[block_group->discard_index - 1]) {
283 remove_from_discard_list(discard_ctl, block_group);
285 for (i = BTRFS_DISCARD_INDEX_START; i < BTRFS_NR_DISCARD_LISTS;
287 if (bytes >= discard_minlen[i]) {
288 block_group->discard_index = i;
289 add_to_discard_list(discard_ctl, block_group);
297 * Move a block group along the discard lists.
299 * @discard_ctl: discard control
300 * @block_group: block_group of interest
302 * Increment @block_group's discard_index. If it falls of the list, let it be.
303 * Otherwise add it back to the appropriate list.
305 static void btrfs_update_discard_index(struct btrfs_discard_ctl *discard_ctl,
306 struct btrfs_block_group *block_group)
308 block_group->discard_index++;
309 if (block_group->discard_index == BTRFS_NR_DISCARD_LISTS) {
310 block_group->discard_index = 1;
314 add_to_discard_list(discard_ctl, block_group);
318 * Remove a block_group from the discard lists.
320 * @discard_ctl: discard control
321 * @block_group: block_group of interest
323 * Remove @block_group from the discard lists. If necessary, wait on the
324 * current work and then reschedule the delayed work.
326 void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl,
327 struct btrfs_block_group *block_group)
329 if (remove_from_discard_list(discard_ctl, block_group)) {
330 cancel_delayed_work_sync(&discard_ctl->work);
331 btrfs_discard_schedule_work(discard_ctl, true);
336 * Handles queuing the block_groups.
338 * @discard_ctl: discard control
339 * @block_group: block_group of interest
341 * Maintain the LRU order of the discard lists.
343 void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl,
344 struct btrfs_block_group *block_group)
346 if (!block_group || !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
349 if (block_group->used == 0)
350 add_to_discard_unused_list(discard_ctl, block_group);
352 add_to_discard_list(discard_ctl, block_group);
354 if (!delayed_work_pending(&discard_ctl->work))
355 btrfs_discard_schedule_work(discard_ctl, false);
358 static void __btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
359 u64 now, bool override)
361 struct btrfs_block_group *block_group;
363 if (!btrfs_run_discard_work(discard_ctl))
365 if (!override && delayed_work_pending(&discard_ctl->work))
368 block_group = find_next_block_group(discard_ctl, now);
370 u64 delay = discard_ctl->delay_ms * NSEC_PER_MSEC;
371 u32 kbps_limit = READ_ONCE(discard_ctl->kbps_limit);
374 * A single delayed workqueue item is responsible for
375 * discarding, so we can manage the bytes rate limit by keeping
376 * track of the previous discard.
378 if (kbps_limit && discard_ctl->prev_discard) {
379 u64 bps_limit = ((u64)kbps_limit) * SZ_1K;
380 u64 bps_delay = div64_u64(discard_ctl->prev_discard *
381 NSEC_PER_SEC, bps_limit);
383 delay = max(delay, bps_delay);
387 * This timeout is to hopefully prevent immediate discarding
388 * in a recently allocated block group.
390 if (now < block_group->discard_eligible_time) {
391 u64 bg_timeout = block_group->discard_eligible_time - now;
393 delay = max(delay, bg_timeout);
396 if (override && discard_ctl->prev_discard) {
397 u64 elapsed = now - discard_ctl->prev_discard_time;
405 mod_delayed_work(discard_ctl->discard_workers,
406 &discard_ctl->work, nsecs_to_jiffies(delay));
411 * Responsible for scheduling the discard work.
413 * @discard_ctl: discard control
414 * @override: override the current timer
416 * Discards are issued by a delayed workqueue item. @override is used to
417 * update the current delay as the baseline delay interval is reevaluated on
418 * transaction commit. This is also maxed with any other rate limit.
420 void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
423 const u64 now = ktime_get_ns();
425 spin_lock(&discard_ctl->lock);
426 __btrfs_discard_schedule_work(discard_ctl, now, override);
427 spin_unlock(&discard_ctl->lock);
431 * Determine next step of a block_group.
433 * @discard_ctl: discard control
434 * @block_group: block_group of interest
436 * Determine the next step for a block group after it's finished going through
437 * a pass on a discard list. If it is unused and fully trimmed, we can mark it
438 * unused and send it to the unused_bgs path. Otherwise, pass it onto the
439 * appropriate filter list or let it fall off.
441 static void btrfs_finish_discard_pass(struct btrfs_discard_ctl *discard_ctl,
442 struct btrfs_block_group *block_group)
444 remove_from_discard_list(discard_ctl, block_group);
446 if (block_group->used == 0) {
447 if (btrfs_is_free_space_trimmed(block_group))
448 btrfs_mark_bg_unused(block_group);
450 add_to_discard_unused_list(discard_ctl, block_group);
452 btrfs_update_discard_index(discard_ctl, block_group);
457 * Discard work queue callback
461 * Find the next block_group to start discarding and then discard a single
462 * region. It does this in a two-pass fashion: first extents and second
463 * bitmaps. Completely discarded block groups are sent to the unused_bgs path.
465 static void btrfs_discard_workfn(struct work_struct *work)
467 struct btrfs_discard_ctl *discard_ctl;
468 struct btrfs_block_group *block_group;
469 enum btrfs_discard_state discard_state;
470 int discard_index = 0;
473 u64 now = ktime_get_ns();
475 discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work);
477 block_group = peek_discard_list(discard_ctl, &discard_state,
478 &discard_index, now);
479 if (!block_group || !btrfs_run_discard_work(discard_ctl))
481 if (now < block_group->discard_eligible_time) {
482 btrfs_discard_schedule_work(discard_ctl, false);
486 /* Perform discarding */
487 minlen = discard_minlen[discard_index];
489 if (discard_state == BTRFS_DISCARD_BITMAPS) {
493 * Use the previous levels minimum discard length as the max
494 * length filter. In the case something is added to make a
495 * region go beyond the max filter, the entire bitmap is set
496 * back to BTRFS_TRIM_STATE_UNTRIMMED.
498 if (discard_index != BTRFS_DISCARD_INDEX_UNUSED)
499 maxlen = discard_minlen[discard_index - 1];
501 btrfs_trim_block_group_bitmaps(block_group, &trimmed,
502 block_group->discard_cursor,
503 btrfs_block_group_end(block_group),
504 minlen, maxlen, true);
505 discard_ctl->discard_bitmap_bytes += trimmed;
507 btrfs_trim_block_group_extents(block_group, &trimmed,
508 block_group->discard_cursor,
509 btrfs_block_group_end(block_group),
511 discard_ctl->discard_extent_bytes += trimmed;
514 /* Determine next steps for a block_group */
515 if (block_group->discard_cursor >= btrfs_block_group_end(block_group)) {
516 if (discard_state == BTRFS_DISCARD_BITMAPS) {
517 btrfs_finish_discard_pass(discard_ctl, block_group);
519 block_group->discard_cursor = block_group->start;
520 spin_lock(&discard_ctl->lock);
521 if (block_group->discard_state !=
522 BTRFS_DISCARD_RESET_CURSOR)
523 block_group->discard_state =
524 BTRFS_DISCARD_BITMAPS;
525 spin_unlock(&discard_ctl->lock);
529 now = ktime_get_ns();
530 spin_lock(&discard_ctl->lock);
531 discard_ctl->prev_discard = trimmed;
532 discard_ctl->prev_discard_time = now;
534 * If the block group was removed from the discard list while it was
535 * running in this workfn, then we didn't deref it, since this function
536 * still owned that reference. But we set the discard_ctl->block_group
537 * back to NULL, so we can use that condition to know that now we need
538 * to deref the block_group.
540 if (discard_ctl->block_group == NULL)
541 btrfs_put_block_group(block_group);
542 discard_ctl->block_group = NULL;
543 __btrfs_discard_schedule_work(discard_ctl, now, false);
544 spin_unlock(&discard_ctl->lock);
548 * Determine if async discard should be running.
550 * @discard_ctl: discard control
552 * Check if the file system is writeable and BTRFS_FS_DISCARD_RUNNING is set.
554 bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl)
556 struct btrfs_fs_info *fs_info = container_of(discard_ctl,
557 struct btrfs_fs_info,
560 return (!(fs_info->sb->s_flags & SB_RDONLY) &&
561 test_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags));
565 * Recalculate the base delay.
567 * @discard_ctl: discard control
569 * Recalculate the base delay which is based off the total number of
570 * discardable_extents. Clamp this between the lower_limit (iops_limit or 1ms)
571 * and the upper_limit (BTRFS_DISCARD_MAX_DELAY_MSEC).
573 void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
575 s32 discardable_extents;
576 s64 discardable_bytes;
578 unsigned long min_delay = BTRFS_DISCARD_MIN_DELAY_MSEC;
581 discardable_extents = atomic_read(&discard_ctl->discardable_extents);
582 if (!discardable_extents)
585 spin_lock(&discard_ctl->lock);
588 * The following is to fix a potential -1 discrepancy that we're not
589 * sure how to reproduce. But given that this is the only place that
590 * utilizes these numbers and this is only called by from
591 * btrfs_finish_extent_commit() which is synchronized, we can correct
594 if (discardable_extents < 0)
595 atomic_add(-discardable_extents,
596 &discard_ctl->discardable_extents);
598 discardable_bytes = atomic64_read(&discard_ctl->discardable_bytes);
599 if (discardable_bytes < 0)
600 atomic64_add(-discardable_bytes,
601 &discard_ctl->discardable_bytes);
603 if (discardable_extents <= 0) {
604 spin_unlock(&discard_ctl->lock);
608 iops_limit = READ_ONCE(discard_ctl->iops_limit);
611 delay = MSEC_PER_SEC / iops_limit;
614 * Unset iops_limit means go as fast as possible, so allow a
621 delay = clamp(delay, min_delay, BTRFS_DISCARD_MAX_DELAY_MSEC);
622 discard_ctl->delay_ms = delay;
624 spin_unlock(&discard_ctl->lock);
628 * Propagate discard counters.
630 * @block_group: block_group of interest
632 * Propagate deltas of counters up to the discard_ctl. It maintains a current
633 * counter and a previous counter passing the delta up to the global stat.
634 * Then the current counter value becomes the previous counter value.
636 void btrfs_discard_update_discardable(struct btrfs_block_group *block_group)
638 struct btrfs_free_space_ctl *ctl;
639 struct btrfs_discard_ctl *discard_ctl;
644 !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC) ||
645 !btrfs_is_block_group_data_only(block_group))
648 ctl = block_group->free_space_ctl;
649 discard_ctl = &block_group->fs_info->discard_ctl;
651 lockdep_assert_held(&ctl->tree_lock);
652 extents_delta = ctl->discardable_extents[BTRFS_STAT_CURR] -
653 ctl->discardable_extents[BTRFS_STAT_PREV];
655 atomic_add(extents_delta, &discard_ctl->discardable_extents);
656 ctl->discardable_extents[BTRFS_STAT_PREV] =
657 ctl->discardable_extents[BTRFS_STAT_CURR];
660 bytes_delta = ctl->discardable_bytes[BTRFS_STAT_CURR] -
661 ctl->discardable_bytes[BTRFS_STAT_PREV];
663 atomic64_add(bytes_delta, &discard_ctl->discardable_bytes);
664 ctl->discardable_bytes[BTRFS_STAT_PREV] =
665 ctl->discardable_bytes[BTRFS_STAT_CURR];
670 * Punt unused_bgs list to discard lists.
672 * @fs_info: fs_info of interest
674 * The unused_bgs list needs to be punted to the discard lists because the
675 * order of operations is changed. In the normal synchronous discard path, the
676 * block groups are trimmed via a single large trim in transaction commit. This
677 * is ultimately what we are trying to avoid with asynchronous discard. Thus,
678 * it must be done before going down the unused_bgs path.
680 void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info)
682 struct btrfs_block_group *block_group, *next;
684 spin_lock(&fs_info->unused_bgs_lock);
685 /* We enabled async discard, so punt all to the queue */
686 list_for_each_entry_safe(block_group, next, &fs_info->unused_bgs,
688 list_del_init(&block_group->bg_list);
689 btrfs_discard_queue_work(&fs_info->discard_ctl, block_group);
691 * This put is for the get done by btrfs_mark_bg_unused.
692 * Queueing discard incremented it for discard's reference.
694 btrfs_put_block_group(block_group);
696 spin_unlock(&fs_info->unused_bgs_lock);
700 * Purge discard lists.
702 * @discard_ctl: discard control
704 * If we are disabling async discard, we may have intercepted block groups that
705 * are completely free and ready for the unused_bgs path. As discarding will
706 * now happen in transaction commit or not at all, we can safely mark the
707 * corresponding block groups as unused and they will be sent on their merry
708 * way to the unused_bgs list.
710 static void btrfs_discard_purge_list(struct btrfs_discard_ctl *discard_ctl)
712 struct btrfs_block_group *block_group, *next;
715 spin_lock(&discard_ctl->lock);
716 for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++) {
717 list_for_each_entry_safe(block_group, next,
718 &discard_ctl->discard_list[i],
720 list_del_init(&block_group->discard_list);
721 spin_unlock(&discard_ctl->lock);
722 if (block_group->used == 0)
723 btrfs_mark_bg_unused(block_group);
724 spin_lock(&discard_ctl->lock);
725 btrfs_put_block_group(block_group);
728 spin_unlock(&discard_ctl->lock);
731 void btrfs_discard_resume(struct btrfs_fs_info *fs_info)
733 if (!btrfs_test_opt(fs_info, DISCARD_ASYNC)) {
734 btrfs_discard_cleanup(fs_info);
738 btrfs_discard_punt_unused_bgs_list(fs_info);
740 set_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags);
743 void btrfs_discard_stop(struct btrfs_fs_info *fs_info)
745 clear_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags);
748 void btrfs_discard_init(struct btrfs_fs_info *fs_info)
750 struct btrfs_discard_ctl *discard_ctl = &fs_info->discard_ctl;
753 spin_lock_init(&discard_ctl->lock);
754 INIT_DELAYED_WORK(&discard_ctl->work, btrfs_discard_workfn);
756 for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++)
757 INIT_LIST_HEAD(&discard_ctl->discard_list[i]);
759 discard_ctl->prev_discard = 0;
760 discard_ctl->prev_discard_time = 0;
761 atomic_set(&discard_ctl->discardable_extents, 0);
762 atomic64_set(&discard_ctl->discardable_bytes, 0);
763 discard_ctl->max_discard_size = BTRFS_ASYNC_DISCARD_DEFAULT_MAX_SIZE;
764 discard_ctl->delay_ms = BTRFS_DISCARD_MAX_DELAY_MSEC;
765 discard_ctl->iops_limit = BTRFS_DISCARD_MAX_IOPS;
766 discard_ctl->kbps_limit = 0;
767 discard_ctl->discard_extent_bytes = 0;
768 discard_ctl->discard_bitmap_bytes = 0;
769 atomic64_set(&discard_ctl->discard_bytes_saved, 0);
772 void btrfs_discard_cleanup(struct btrfs_fs_info *fs_info)
774 btrfs_discard_stop(fs_info);
775 cancel_delayed_work_sync(&fs_info->discard_ctl.work);
776 btrfs_discard_purge_list(&fs_info->discard_ctl);