1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2002, Linus Torvalds.
6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
8 * Contains functions related to writing back dirty pages at the
11 * 10Apr2002 Andrew Morton
15 #include <linux/kernel.h>
16 #include <linux/export.h>
17 #include <linux/spinlock.h>
20 #include <linux/swap.h>
21 #include <linux/slab.h>
22 #include <linux/pagemap.h>
23 #include <linux/writeback.h>
24 #include <linux/init.h>
25 #include <linux/backing-dev.h>
26 #include <linux/task_io_accounting_ops.h>
27 #include <linux/blkdev.h>
28 #include <linux/mpage.h>
29 #include <linux/rmap.h>
30 #include <linux/percpu.h>
31 #include <linux/smp.h>
32 #include <linux/sysctl.h>
33 #include <linux/cpu.h>
34 #include <linux/syscalls.h>
35 #include <linux/pagevec.h>
36 #include <linux/timer.h>
37 #include <linux/sched/rt.h>
38 #include <linux/sched/signal.h>
39 #include <linux/mm_inline.h>
40 #include <trace/events/writeback.h>
45 * Sleep at most 200ms at a time in balance_dirty_pages().
47 #define MAX_PAUSE max(HZ/5, 1)
50 * Try to keep balance_dirty_pages() call intervals higher than this many pages
51 * by raising pause time to max_pause when falls below it.
53 #define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10))
56 * Estimate write bandwidth at 200ms intervals.
58 #define BANDWIDTH_INTERVAL max(HZ/5, 1)
60 #define RATELIMIT_CALC_SHIFT 10
63 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
64 * will look to see if it needs to force writeback or throttling.
66 static long ratelimit_pages = 32;
68 /* The following parameters are exported via /proc/sys/vm */
71 * Start background writeback (via writeback threads) at this percentage
73 int dirty_background_ratio = 10;
76 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
77 * dirty_background_ratio * the amount of dirtyable memory
79 unsigned long dirty_background_bytes;
82 * free highmem will not be subtracted from the total free memory
83 * for calculating free ratios if vm_highmem_is_dirtyable is true
85 int vm_highmem_is_dirtyable;
88 * The generator of dirty data starts writeback at this percentage
90 int vm_dirty_ratio = 20;
93 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
94 * vm_dirty_ratio * the amount of dirtyable memory
96 unsigned long vm_dirty_bytes;
99 * The interval between `kupdate'-style writebacks
101 unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
103 EXPORT_SYMBOL_GPL(dirty_writeback_interval);
106 * The longest time for which data is allowed to remain dirty
108 unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
111 * Flag that makes the machine dump writes/reads and block dirtyings.
116 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
117 * a full sync is triggered after this time elapses without any disk activity.
121 EXPORT_SYMBOL(laptop_mode);
123 /* End of sysctl-exported parameters */
125 struct wb_domain global_wb_domain;
127 /* consolidated parameters for balance_dirty_pages() and its subroutines */
128 struct dirty_throttle_control {
129 #ifdef CONFIG_CGROUP_WRITEBACK
130 struct wb_domain *dom;
131 struct dirty_throttle_control *gdtc; /* only set in memcg dtc's */
133 struct bdi_writeback *wb;
134 struct fprop_local_percpu *wb_completions;
136 unsigned long avail; /* dirtyable */
137 unsigned long dirty; /* file_dirty + write + nfs */
138 unsigned long thresh; /* dirty threshold */
139 unsigned long bg_thresh; /* dirty background threshold */
141 unsigned long wb_dirty; /* per-wb counterparts */
142 unsigned long wb_thresh;
143 unsigned long wb_bg_thresh;
145 unsigned long pos_ratio;
149 * Length of period for aging writeout fractions of bdis. This is an
150 * arbitrarily chosen number. The longer the period, the slower fractions will
151 * reflect changes in current writeout rate.
153 #define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
155 #ifdef CONFIG_CGROUP_WRITEBACK
157 #define GDTC_INIT(__wb) .wb = (__wb), \
158 .dom = &global_wb_domain, \
159 .wb_completions = &(__wb)->completions
161 #define GDTC_INIT_NO_WB .dom = &global_wb_domain
163 #define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \
164 .dom = mem_cgroup_wb_domain(__wb), \
165 .wb_completions = &(__wb)->memcg_completions, \
168 static bool mdtc_valid(struct dirty_throttle_control *dtc)
173 static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
178 static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
183 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
185 return &wb->memcg_completions;
188 static void wb_min_max_ratio(struct bdi_writeback *wb,
189 unsigned long *minp, unsigned long *maxp)
191 unsigned long this_bw = wb->avg_write_bandwidth;
192 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
193 unsigned long long min = wb->bdi->min_ratio;
194 unsigned long long max = wb->bdi->max_ratio;
197 * @wb may already be clean by the time control reaches here and
198 * the total may not include its bw.
200 if (this_bw < tot_bw) {
203 min = div64_ul(min, tot_bw);
207 max = div64_ul(max, tot_bw);
215 #else /* CONFIG_CGROUP_WRITEBACK */
217 #define GDTC_INIT(__wb) .wb = (__wb), \
218 .wb_completions = &(__wb)->completions
219 #define GDTC_INIT_NO_WB
220 #define MDTC_INIT(__wb, __gdtc)
222 static bool mdtc_valid(struct dirty_throttle_control *dtc)
227 static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
229 return &global_wb_domain;
232 static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
237 static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
242 static void wb_min_max_ratio(struct bdi_writeback *wb,
243 unsigned long *minp, unsigned long *maxp)
245 *minp = wb->bdi->min_ratio;
246 *maxp = wb->bdi->max_ratio;
249 #endif /* CONFIG_CGROUP_WRITEBACK */
252 * In a memory zone, there is a certain amount of pages we consider
253 * available for the page cache, which is essentially the number of
254 * free and reclaimable pages, minus some zone reserves to protect
255 * lowmem and the ability to uphold the zone's watermarks without
256 * requiring writeback.
258 * This number of dirtyable pages is the base value of which the
259 * user-configurable dirty ratio is the effective number of pages that
260 * are allowed to be actually dirtied. Per individual zone, or
261 * globally by using the sum of dirtyable pages over all zones.
263 * Because the user is allowed to specify the dirty limit globally as
264 * absolute number of bytes, calculating the per-zone dirty limit can
265 * require translating the configured limit into a percentage of
266 * global dirtyable memory first.
270 * node_dirtyable_memory - number of dirtyable pages in a node
273 * Return: the node's number of pages potentially available for dirty
274 * page cache. This is the base value for the per-node dirty limits.
276 static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
278 unsigned long nr_pages = 0;
281 for (z = 0; z < MAX_NR_ZONES; z++) {
282 struct zone *zone = pgdat->node_zones + z;
284 if (!populated_zone(zone))
287 nr_pages += zone_page_state(zone, NR_FREE_PAGES);
291 * Pages reserved for the kernel should not be considered
292 * dirtyable, to prevent a situation where reclaim has to
293 * clean pages in order to balance the zones.
295 nr_pages -= min(nr_pages, pgdat->totalreserve_pages);
297 nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE);
298 nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE);
303 static unsigned long highmem_dirtyable_memory(unsigned long total)
305 #ifdef CONFIG_HIGHMEM
310 for_each_node_state(node, N_HIGH_MEMORY) {
311 for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
313 unsigned long nr_pages;
315 if (!is_highmem_idx(i))
318 z = &NODE_DATA(node)->node_zones[i];
319 if (!populated_zone(z))
322 nr_pages = zone_page_state(z, NR_FREE_PAGES);
323 /* watch for underflows */
324 nr_pages -= min(nr_pages, high_wmark_pages(z));
325 nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE);
326 nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE);
332 * Unreclaimable memory (kernel memory or anonymous memory
333 * without swap) can bring down the dirtyable pages below
334 * the zone's dirty balance reserve and the above calculation
335 * will underflow. However we still want to add in nodes
336 * which are below threshold (negative values) to get a more
337 * accurate calculation but make sure that the total never
344 * Make sure that the number of highmem pages is never larger
345 * than the number of the total dirtyable memory. This can only
346 * occur in very strange VM situations but we want to make sure
347 * that this does not occur.
349 return min(x, total);
356 * global_dirtyable_memory - number of globally dirtyable pages
358 * Return: the global number of pages potentially available for dirty
359 * page cache. This is the base value for the global dirty limits.
361 static unsigned long global_dirtyable_memory(void)
365 x = global_zone_page_state(NR_FREE_PAGES);
367 * Pages reserved for the kernel should not be considered
368 * dirtyable, to prevent a situation where reclaim has to
369 * clean pages in order to balance the zones.
371 x -= min(x, totalreserve_pages);
373 x += global_node_page_state(NR_INACTIVE_FILE);
374 x += global_node_page_state(NR_ACTIVE_FILE);
376 if (!vm_highmem_is_dirtyable)
377 x -= highmem_dirtyable_memory(x);
379 return x + 1; /* Ensure that we never return 0 */
383 * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain
384 * @dtc: dirty_throttle_control of interest
386 * Calculate @dtc->thresh and ->bg_thresh considering
387 * vm_dirty_{bytes|ratio} and dirty_background_{bytes|ratio}. The caller
388 * must ensure that @dtc->avail is set before calling this function. The
389 * dirty limits will be lifted by 1/4 for real-time tasks.
391 static void domain_dirty_limits(struct dirty_throttle_control *dtc)
393 const unsigned long available_memory = dtc->avail;
394 struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
395 unsigned long bytes = vm_dirty_bytes;
396 unsigned long bg_bytes = dirty_background_bytes;
397 /* convert ratios to per-PAGE_SIZE for higher precision */
398 unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
399 unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
400 unsigned long thresh;
401 unsigned long bg_thresh;
402 struct task_struct *tsk;
404 /* gdtc is !NULL iff @dtc is for memcg domain */
406 unsigned long global_avail = gdtc->avail;
409 * The byte settings can't be applied directly to memcg
410 * domains. Convert them to ratios by scaling against
411 * globally available memory. As the ratios are in
412 * per-PAGE_SIZE, they can be obtained by dividing bytes by
416 ratio = min(DIV_ROUND_UP(bytes, global_avail),
419 bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
421 bytes = bg_bytes = 0;
425 thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
427 thresh = (ratio * available_memory) / PAGE_SIZE;
430 bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
432 bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
434 if (bg_thresh >= thresh)
435 bg_thresh = thresh / 2;
438 bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
439 thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
441 dtc->thresh = thresh;
442 dtc->bg_thresh = bg_thresh;
444 /* we should eventually report the domain in the TP */
446 trace_global_dirty_state(bg_thresh, thresh);
450 * global_dirty_limits - background-writeback and dirty-throttling thresholds
451 * @pbackground: out parameter for bg_thresh
452 * @pdirty: out parameter for thresh
454 * Calculate bg_thresh and thresh for global_wb_domain. See
455 * domain_dirty_limits() for details.
457 void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
459 struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
461 gdtc.avail = global_dirtyable_memory();
462 domain_dirty_limits(&gdtc);
464 *pbackground = gdtc.bg_thresh;
465 *pdirty = gdtc.thresh;
469 * node_dirty_limit - maximum number of dirty pages allowed in a node
472 * Return: the maximum number of dirty pages allowed in a node, based
473 * on the node's dirtyable memory.
475 static unsigned long node_dirty_limit(struct pglist_data *pgdat)
477 unsigned long node_memory = node_dirtyable_memory(pgdat);
478 struct task_struct *tsk = current;
482 dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
483 node_memory / global_dirtyable_memory();
485 dirty = vm_dirty_ratio * node_memory / 100;
494 * node_dirty_ok - tells whether a node is within its dirty limits
495 * @pgdat: the node to check
497 * Return: %true when the dirty pages in @pgdat are within the node's
498 * dirty limit, %false if the limit is exceeded.
500 bool node_dirty_ok(struct pglist_data *pgdat)
502 unsigned long limit = node_dirty_limit(pgdat);
503 unsigned long nr_pages = 0;
505 nr_pages += node_page_state(pgdat, NR_FILE_DIRTY);
506 nr_pages += node_page_state(pgdat, NR_WRITEBACK);
508 return nr_pages <= limit;
511 int dirty_background_ratio_handler(struct ctl_table *table, int write,
512 void *buffer, size_t *lenp, loff_t *ppos)
516 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
517 if (ret == 0 && write)
518 dirty_background_bytes = 0;
522 int dirty_background_bytes_handler(struct ctl_table *table, int write,
523 void *buffer, size_t *lenp, loff_t *ppos)
527 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
528 if (ret == 0 && write)
529 dirty_background_ratio = 0;
533 int dirty_ratio_handler(struct ctl_table *table, int write, void *buffer,
534 size_t *lenp, loff_t *ppos)
536 int old_ratio = vm_dirty_ratio;
539 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
540 if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
541 writeback_set_ratelimit();
547 int dirty_bytes_handler(struct ctl_table *table, int write,
548 void *buffer, size_t *lenp, loff_t *ppos)
550 unsigned long old_bytes = vm_dirty_bytes;
553 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
554 if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
555 writeback_set_ratelimit();
561 static unsigned long wp_next_time(unsigned long cur_time)
563 cur_time += VM_COMPLETIONS_PERIOD_LEN;
564 /* 0 has a special meaning... */
570 static void wb_domain_writeout_inc(struct wb_domain *dom,
571 struct fprop_local_percpu *completions,
572 unsigned int max_prop_frac)
574 __fprop_inc_percpu_max(&dom->completions, completions,
576 /* First event after period switching was turned off? */
577 if (unlikely(!dom->period_time)) {
579 * We can race with other __bdi_writeout_inc calls here but
580 * it does not cause any harm since the resulting time when
581 * timer will fire and what is in writeout_period_time will be
584 dom->period_time = wp_next_time(jiffies);
585 mod_timer(&dom->period_timer, dom->period_time);
590 * Increment @wb's writeout completion count and the global writeout
591 * completion count. Called from test_clear_page_writeback().
593 static inline void __wb_writeout_inc(struct bdi_writeback *wb)
595 struct wb_domain *cgdom;
597 inc_wb_stat(wb, WB_WRITTEN);
598 wb_domain_writeout_inc(&global_wb_domain, &wb->completions,
599 wb->bdi->max_prop_frac);
601 cgdom = mem_cgroup_wb_domain(wb);
603 wb_domain_writeout_inc(cgdom, wb_memcg_completions(wb),
604 wb->bdi->max_prop_frac);
607 void wb_writeout_inc(struct bdi_writeback *wb)
611 local_irq_save(flags);
612 __wb_writeout_inc(wb);
613 local_irq_restore(flags);
615 EXPORT_SYMBOL_GPL(wb_writeout_inc);
618 * On idle system, we can be called long after we scheduled because we use
619 * deferred timers so count with missed periods.
621 static void writeout_period(struct timer_list *t)
623 struct wb_domain *dom = from_timer(dom, t, period_timer);
624 int miss_periods = (jiffies - dom->period_time) /
625 VM_COMPLETIONS_PERIOD_LEN;
627 if (fprop_new_period(&dom->completions, miss_periods + 1)) {
628 dom->period_time = wp_next_time(dom->period_time +
629 miss_periods * VM_COMPLETIONS_PERIOD_LEN);
630 mod_timer(&dom->period_timer, dom->period_time);
633 * Aging has zeroed all fractions. Stop wasting CPU on period
636 dom->period_time = 0;
640 int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
642 memset(dom, 0, sizeof(*dom));
644 spin_lock_init(&dom->lock);
646 timer_setup(&dom->period_timer, writeout_period, TIMER_DEFERRABLE);
648 dom->dirty_limit_tstamp = jiffies;
650 return fprop_global_init(&dom->completions, gfp);
653 #ifdef CONFIG_CGROUP_WRITEBACK
654 void wb_domain_exit(struct wb_domain *dom)
656 del_timer_sync(&dom->period_timer);
657 fprop_global_destroy(&dom->completions);
662 * bdi_min_ratio keeps the sum of the minimum dirty shares of all
663 * registered backing devices, which, for obvious reasons, can not
666 static unsigned int bdi_min_ratio;
668 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
672 spin_lock_bh(&bdi_lock);
673 if (min_ratio > bdi->max_ratio) {
676 min_ratio -= bdi->min_ratio;
677 if (bdi_min_ratio + min_ratio < 100) {
678 bdi_min_ratio += min_ratio;
679 bdi->min_ratio += min_ratio;
684 spin_unlock_bh(&bdi_lock);
689 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
696 spin_lock_bh(&bdi_lock);
697 if (bdi->min_ratio > max_ratio) {
700 bdi->max_ratio = max_ratio;
701 bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100;
703 spin_unlock_bh(&bdi_lock);
707 EXPORT_SYMBOL(bdi_set_max_ratio);
709 static unsigned long dirty_freerun_ceiling(unsigned long thresh,
710 unsigned long bg_thresh)
712 return (thresh + bg_thresh) / 2;
715 static unsigned long hard_dirty_limit(struct wb_domain *dom,
716 unsigned long thresh)
718 return max(thresh, dom->dirty_limit);
722 * Memory which can be further allocated to a memcg domain is capped by
723 * system-wide clean memory excluding the amount being used in the domain.
725 static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
726 unsigned long filepages, unsigned long headroom)
728 struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc);
729 unsigned long clean = filepages - min(filepages, mdtc->dirty);
730 unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty);
731 unsigned long other_clean = global_clean - min(global_clean, clean);
733 mdtc->avail = filepages + min(headroom, other_clean);
737 * __wb_calc_thresh - @wb's share of dirty throttling threshold
738 * @dtc: dirty_throttle_context of interest
740 * Note that balance_dirty_pages() will only seriously take it as a hard limit
741 * when sleeping max_pause per page is not enough to keep the dirty pages under
742 * control. For example, when the device is completely stalled due to some error
743 * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
744 * In the other normal situations, it acts more gently by throttling the tasks
745 * more (rather than completely block them) when the wb dirty pages go high.
747 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
748 * - starving fast devices
749 * - piling up dirty pages (that will take long time to sync) on slow devices
751 * The wb's share of dirty limit will be adapting to its throughput and
752 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
754 * Return: @wb's dirty limit in pages. The term "dirty" in the context of
755 * dirty balancing includes all PG_dirty and PG_writeback pages.
757 static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
759 struct wb_domain *dom = dtc_dom(dtc);
760 unsigned long thresh = dtc->thresh;
762 unsigned long numerator, denominator;
763 unsigned long wb_min_ratio, wb_max_ratio;
766 * Calculate this BDI's share of the thresh ratio.
768 fprop_fraction_percpu(&dom->completions, dtc->wb_completions,
769 &numerator, &denominator);
771 wb_thresh = (thresh * (100 - bdi_min_ratio)) / 100;
772 wb_thresh *= numerator;
773 wb_thresh = div64_ul(wb_thresh, denominator);
775 wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio);
777 wb_thresh += (thresh * wb_min_ratio) / 100;
778 if (wb_thresh > (thresh * wb_max_ratio) / 100)
779 wb_thresh = thresh * wb_max_ratio / 100;
784 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
786 struct dirty_throttle_control gdtc = { GDTC_INIT(wb),
788 return __wb_calc_thresh(&gdtc);
793 * f(dirty) := 1.0 + (----------------)
796 * it's a 3rd order polynomial that subjects to
798 * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast
799 * (2) f(setpoint) = 1.0 => the balance point
800 * (3) f(limit) = 0 => the hard limit
801 * (4) df/dx <= 0 => negative feedback control
802 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
803 * => fast response on large errors; small oscillation near setpoint
805 static long long pos_ratio_polynom(unsigned long setpoint,
812 x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
813 (limit - setpoint) | 1);
815 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
816 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
817 pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
819 return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
823 * Dirty position control.
825 * (o) global/bdi setpoints
827 * We want the dirty pages be balanced around the global/wb setpoints.
828 * When the number of dirty pages is higher/lower than the setpoint, the
829 * dirty position control ratio (and hence task dirty ratelimit) will be
830 * decreased/increased to bring the dirty pages back to the setpoint.
832 * pos_ratio = 1 << RATELIMIT_CALC_SHIFT
834 * if (dirty < setpoint) scale up pos_ratio
835 * if (dirty > setpoint) scale down pos_ratio
837 * if (wb_dirty < wb_setpoint) scale up pos_ratio
838 * if (wb_dirty > wb_setpoint) scale down pos_ratio
840 * task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
842 * (o) global control line
846 * | |<===== global dirty control scope ======>|
854 * 1.0 ................................*
860 * 0 +------------.------------------.----------------------*------------->
861 * freerun^ setpoint^ limit^ dirty pages
863 * (o) wb control line
871 * | * |<=========== span ============>|
872 * 1.0 .......................*
884 * 1/4 ...............................................* * * * * * * * * * * *
888 * 0 +----------------------.-------------------------------.------------->
889 * wb_setpoint^ x_intercept^
891 * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
892 * be smoothly throttled down to normal if it starts high in situations like
893 * - start writing to a slow SD card and a fast disk at the same time. The SD
894 * card's wb_dirty may rush to many times higher than wb_setpoint.
895 * - the wb dirty thresh drops quickly due to change of JBOD workload
897 static void wb_position_ratio(struct dirty_throttle_control *dtc)
899 struct bdi_writeback *wb = dtc->wb;
900 unsigned long write_bw = wb->avg_write_bandwidth;
901 unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
902 unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
903 unsigned long wb_thresh = dtc->wb_thresh;
904 unsigned long x_intercept;
905 unsigned long setpoint; /* dirty pages' target balance point */
906 unsigned long wb_setpoint;
908 long long pos_ratio; /* for scaling up/down the rate limit */
913 if (unlikely(dtc->dirty >= limit))
919 * See comment for pos_ratio_polynom().
921 setpoint = (freerun + limit) / 2;
922 pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit);
925 * The strictlimit feature is a tool preventing mistrusted filesystems
926 * from growing a large number of dirty pages before throttling. For
927 * such filesystems balance_dirty_pages always checks wb counters
928 * against wb limits. Even if global "nr_dirty" is under "freerun".
929 * This is especially important for fuse which sets bdi->max_ratio to
930 * 1% by default. Without strictlimit feature, fuse writeback may
931 * consume arbitrary amount of RAM because it is accounted in
932 * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty".
934 * Here, in wb_position_ratio(), we calculate pos_ratio based on
935 * two values: wb_dirty and wb_thresh. Let's consider an example:
936 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
937 * limits are set by default to 10% and 20% (background and throttle).
938 * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
939 * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is
940 * about ~6K pages (as the average of background and throttle wb
941 * limits). The 3rd order polynomial will provide positive feedback if
942 * wb_dirty is under wb_setpoint and vice versa.
944 * Note, that we cannot use global counters in these calculations
945 * because we want to throttle process writing to a strictlimit wb
946 * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
947 * in the example above).
949 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
950 long long wb_pos_ratio;
952 if (dtc->wb_dirty < 8) {
953 dtc->pos_ratio = min_t(long long, pos_ratio * 2,
954 2 << RATELIMIT_CALC_SHIFT);
958 if (dtc->wb_dirty >= wb_thresh)
961 wb_setpoint = dirty_freerun_ceiling(wb_thresh,
964 if (wb_setpoint == 0 || wb_setpoint == wb_thresh)
967 wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty,
971 * Typically, for strictlimit case, wb_setpoint << setpoint
972 * and pos_ratio >> wb_pos_ratio. In the other words global
973 * state ("dirty") is not limiting factor and we have to
974 * make decision based on wb counters. But there is an
975 * important case when global pos_ratio should get precedence:
976 * global limits are exceeded (e.g. due to activities on other
977 * wb's) while given strictlimit wb is below limit.
979 * "pos_ratio * wb_pos_ratio" would work for the case above,
980 * but it would look too non-natural for the case of all
981 * activity in the system coming from a single strictlimit wb
982 * with bdi->max_ratio == 100%.
984 * Note that min() below somewhat changes the dynamics of the
985 * control system. Normally, pos_ratio value can be well over 3
986 * (when globally we are at freerun and wb is well below wb
987 * setpoint). Now the maximum pos_ratio in the same situation
988 * is 2. We might want to tweak this if we observe the control
989 * system is too slow to adapt.
991 dtc->pos_ratio = min(pos_ratio, wb_pos_ratio);
996 * We have computed basic pos_ratio above based on global situation. If
997 * the wb is over/under its share of dirty pages, we want to scale
998 * pos_ratio further down/up. That is done by the following mechanism.
1004 * f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint)
1006 * x_intercept - wb_dirty
1007 * := --------------------------
1008 * x_intercept - wb_setpoint
1010 * The main wb control line is a linear function that subjects to
1012 * (1) f(wb_setpoint) = 1.0
1013 * (2) k = - 1 / (8 * write_bw) (in single wb case)
1014 * or equally: x_intercept = wb_setpoint + 8 * write_bw
1016 * For single wb case, the dirty pages are observed to fluctuate
1017 * regularly within range
1018 * [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2]
1019 * for various filesystems, where (2) can yield in a reasonable 12.5%
1020 * fluctuation range for pos_ratio.
1022 * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its
1023 * own size, so move the slope over accordingly and choose a slope that
1024 * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh.
1026 if (unlikely(wb_thresh > dtc->thresh))
1027 wb_thresh = dtc->thresh;
1029 * It's very possible that wb_thresh is close to 0 not because the
1030 * device is slow, but that it has remained inactive for long time.
1031 * Honour such devices a reasonable good (hopefully IO efficient)
1032 * threshold, so that the occasional writes won't be blocked and active
1033 * writes can rampup the threshold quickly.
1035 wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8);
1037 * scale global setpoint to wb's:
1038 * wb_setpoint = setpoint * wb_thresh / thresh
1040 x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1);
1041 wb_setpoint = setpoint * (u64)x >> 16;
1043 * Use span=(8*write_bw) in single wb case as indicated by
1044 * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case.
1046 * wb_thresh thresh - wb_thresh
1047 * span = --------- * (8 * write_bw) + ------------------ * wb_thresh
1050 span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16;
1051 x_intercept = wb_setpoint + span;
1053 if (dtc->wb_dirty < x_intercept - span / 4) {
1054 pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty),
1055 (x_intercept - wb_setpoint) | 1);
1060 * wb reserve area, safeguard against dirty pool underrun and disk idle
1061 * It may push the desired control point of global dirty pages higher
1064 x_intercept = wb_thresh / 2;
1065 if (dtc->wb_dirty < x_intercept) {
1066 if (dtc->wb_dirty > x_intercept / 8)
1067 pos_ratio = div_u64(pos_ratio * x_intercept,
1073 dtc->pos_ratio = pos_ratio;
1076 static void wb_update_write_bandwidth(struct bdi_writeback *wb,
1077 unsigned long elapsed,
1078 unsigned long written)
1080 const unsigned long period = roundup_pow_of_two(3 * HZ);
1081 unsigned long avg = wb->avg_write_bandwidth;
1082 unsigned long old = wb->write_bandwidth;
1086 * bw = written * HZ / elapsed
1088 * bw * elapsed + write_bandwidth * (period - elapsed)
1089 * write_bandwidth = ---------------------------------------------------
1092 * @written may have decreased due to account_page_redirty().
1093 * Avoid underflowing @bw calculation.
1095 bw = written - min(written, wb->written_stamp);
1097 if (unlikely(elapsed > period)) {
1098 bw = div64_ul(bw, elapsed);
1102 bw += (u64)wb->write_bandwidth * (period - elapsed);
1103 bw >>= ilog2(period);
1106 * one more level of smoothing, for filtering out sudden spikes
1108 if (avg > old && old >= (unsigned long)bw)
1109 avg -= (avg - old) >> 3;
1111 if (avg < old && old <= (unsigned long)bw)
1112 avg += (old - avg) >> 3;
1115 /* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */
1116 avg = max(avg, 1LU);
1117 if (wb_has_dirty_io(wb)) {
1118 long delta = avg - wb->avg_write_bandwidth;
1119 WARN_ON_ONCE(atomic_long_add_return(delta,
1120 &wb->bdi->tot_write_bandwidth) <= 0);
1122 wb->write_bandwidth = bw;
1123 wb->avg_write_bandwidth = avg;
1126 static void update_dirty_limit(struct dirty_throttle_control *dtc)
1128 struct wb_domain *dom = dtc_dom(dtc);
1129 unsigned long thresh = dtc->thresh;
1130 unsigned long limit = dom->dirty_limit;
1133 * Follow up in one step.
1135 if (limit < thresh) {
1141 * Follow down slowly. Use the higher one as the target, because thresh
1142 * may drop below dirty. This is exactly the reason to introduce
1143 * dom->dirty_limit which is guaranteed to lie above the dirty pages.
1145 thresh = max(thresh, dtc->dirty);
1146 if (limit > thresh) {
1147 limit -= (limit - thresh) >> 5;
1152 dom->dirty_limit = limit;
1155 static void domain_update_bandwidth(struct dirty_throttle_control *dtc,
1158 struct wb_domain *dom = dtc_dom(dtc);
1161 * check locklessly first to optimize away locking for the most time
1163 if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
1166 spin_lock(&dom->lock);
1167 if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
1168 update_dirty_limit(dtc);
1169 dom->dirty_limit_tstamp = now;
1171 spin_unlock(&dom->lock);
1175 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
1177 * Normal wb tasks will be curbed at or below it in long term.
1178 * Obviously it should be around (write_bw / N) when there are N dd tasks.
1180 static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
1181 unsigned long dirtied,
1182 unsigned long elapsed)
1184 struct bdi_writeback *wb = dtc->wb;
1185 unsigned long dirty = dtc->dirty;
1186 unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
1187 unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
1188 unsigned long setpoint = (freerun + limit) / 2;
1189 unsigned long write_bw = wb->avg_write_bandwidth;
1190 unsigned long dirty_ratelimit = wb->dirty_ratelimit;
1191 unsigned long dirty_rate;
1192 unsigned long task_ratelimit;
1193 unsigned long balanced_dirty_ratelimit;
1196 unsigned long shift;
1199 * The dirty rate will match the writeout rate in long term, except
1200 * when dirty pages are truncated by userspace or re-dirtied by FS.
1202 dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed;
1205 * task_ratelimit reflects each dd's dirty rate for the past 200ms.
1207 task_ratelimit = (u64)dirty_ratelimit *
1208 dtc->pos_ratio >> RATELIMIT_CALC_SHIFT;
1209 task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
1212 * A linear estimation of the "balanced" throttle rate. The theory is,
1213 * if there are N dd tasks, each throttled at task_ratelimit, the wb's
1214 * dirty_rate will be measured to be (N * task_ratelimit). So the below
1215 * formula will yield the balanced rate limit (write_bw / N).
1217 * Note that the expanded form is not a pure rate feedback:
1218 * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) (1)
1219 * but also takes pos_ratio into account:
1220 * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio (2)
1222 * (1) is not realistic because pos_ratio also takes part in balancing
1223 * the dirty rate. Consider the state
1224 * pos_ratio = 0.5 (3)
1225 * rate = 2 * (write_bw / N) (4)
1226 * If (1) is used, it will stuck in that state! Because each dd will
1228 * task_ratelimit = pos_ratio * rate = (write_bw / N) (5)
1230 * dirty_rate = N * task_ratelimit = write_bw (6)
1231 * put (6) into (1) we get
1232 * rate_(i+1) = rate_(i) (7)
1234 * So we end up using (2) to always keep
1235 * rate_(i+1) ~= (write_bw / N) (8)
1236 * regardless of the value of pos_ratio. As long as (8) is satisfied,
1237 * pos_ratio is able to drive itself to 1.0, which is not only where
1238 * the dirty count meet the setpoint, but also where the slope of
1239 * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
1241 balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
1244 * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
1246 if (unlikely(balanced_dirty_ratelimit > write_bw))
1247 balanced_dirty_ratelimit = write_bw;
1250 * We could safely do this and return immediately:
1252 * wb->dirty_ratelimit = balanced_dirty_ratelimit;
1254 * However to get a more stable dirty_ratelimit, the below elaborated
1255 * code makes use of task_ratelimit to filter out singular points and
1256 * limit the step size.
1258 * The below code essentially only uses the relative value of
1260 * task_ratelimit - dirty_ratelimit
1261 * = (pos_ratio - 1) * dirty_ratelimit
1263 * which reflects the direction and size of dirty position error.
1267 * dirty_ratelimit will follow balanced_dirty_ratelimit iff
1268 * task_ratelimit is on the same side of dirty_ratelimit, too.
1270 * - dirty_ratelimit > balanced_dirty_ratelimit
1271 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
1272 * lowering dirty_ratelimit will help meet both the position and rate
1273 * control targets. Otherwise, don't update dirty_ratelimit if it will
1274 * only help meet the rate target. After all, what the users ultimately
1275 * feel and care are stable dirty rate and small position error.
1277 * |task_ratelimit - dirty_ratelimit| is used to limit the step size
1278 * and filter out the singular points of balanced_dirty_ratelimit. Which
1279 * keeps jumping around randomly and can even leap far away at times
1280 * due to the small 200ms estimation period of dirty_rate (we want to
1281 * keep that period small to reduce time lags).
1286 * For strictlimit case, calculations above were based on wb counters
1287 * and limits (starting from pos_ratio = wb_position_ratio() and up to
1288 * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
1289 * Hence, to calculate "step" properly, we have to use wb_dirty as
1290 * "dirty" and wb_setpoint as "setpoint".
1292 * We rampup dirty_ratelimit forcibly if wb_dirty is low because
1293 * it's possible that wb_thresh is close to zero due to inactivity
1294 * of backing device.
1296 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
1297 dirty = dtc->wb_dirty;
1298 if (dtc->wb_dirty < 8)
1299 setpoint = dtc->wb_dirty + 1;
1301 setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
1304 if (dirty < setpoint) {
1305 x = min3(wb->balanced_dirty_ratelimit,
1306 balanced_dirty_ratelimit, task_ratelimit);
1307 if (dirty_ratelimit < x)
1308 step = x - dirty_ratelimit;
1310 x = max3(wb->balanced_dirty_ratelimit,
1311 balanced_dirty_ratelimit, task_ratelimit);
1312 if (dirty_ratelimit > x)
1313 step = dirty_ratelimit - x;
1317 * Don't pursue 100% rate matching. It's impossible since the balanced
1318 * rate itself is constantly fluctuating. So decrease the track speed
1319 * when it gets close to the target. Helps eliminate pointless tremors.
1321 shift = dirty_ratelimit / (2 * step + 1);
1322 if (shift < BITS_PER_LONG)
1323 step = DIV_ROUND_UP(step >> shift, 8);
1327 if (dirty_ratelimit < balanced_dirty_ratelimit)
1328 dirty_ratelimit += step;
1330 dirty_ratelimit -= step;
1332 wb->dirty_ratelimit = max(dirty_ratelimit, 1UL);
1333 wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
1335 trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit);
1338 static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
1339 struct dirty_throttle_control *mdtc,
1340 unsigned long start_time,
1341 bool update_ratelimit)
1343 struct bdi_writeback *wb = gdtc->wb;
1344 unsigned long now = jiffies;
1345 unsigned long elapsed = now - wb->bw_time_stamp;
1346 unsigned long dirtied;
1347 unsigned long written;
1349 lockdep_assert_held(&wb->list_lock);
1352 * rate-limit, only update once every 200ms.
1354 if (elapsed < BANDWIDTH_INTERVAL)
1357 dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
1358 written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
1361 * Skip quiet periods when disk bandwidth is under-utilized.
1362 * (at least 1s idle time between two flusher runs)
1364 if (elapsed > HZ && time_before(wb->bw_time_stamp, start_time))
1367 if (update_ratelimit) {
1368 domain_update_bandwidth(gdtc, now);
1369 wb_update_dirty_ratelimit(gdtc, dirtied, elapsed);
1372 * @mdtc is always NULL if !CGROUP_WRITEBACK but the
1373 * compiler has no way to figure that out. Help it.
1375 if (IS_ENABLED(CONFIG_CGROUP_WRITEBACK) && mdtc) {
1376 domain_update_bandwidth(mdtc, now);
1377 wb_update_dirty_ratelimit(mdtc, dirtied, elapsed);
1380 wb_update_write_bandwidth(wb, elapsed, written);
1383 wb->dirtied_stamp = dirtied;
1384 wb->written_stamp = written;
1385 wb->bw_time_stamp = now;
1388 void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time)
1390 struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
1392 __wb_update_bandwidth(&gdtc, NULL, start_time, false);
1396 * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
1397 * will look to see if it needs to start dirty throttling.
1399 * If dirty_poll_interval is too low, big NUMA machines will call the expensive
1400 * global_zone_page_state() too often. So scale it near-sqrt to the safety margin
1401 * (the number of pages we may dirty without exceeding the dirty limits).
1403 static unsigned long dirty_poll_interval(unsigned long dirty,
1404 unsigned long thresh)
1407 return 1UL << (ilog2(thresh - dirty) >> 1);
1412 static unsigned long wb_max_pause(struct bdi_writeback *wb,
1413 unsigned long wb_dirty)
1415 unsigned long bw = wb->avg_write_bandwidth;
1419 * Limit pause time for small memory systems. If sleeping for too long
1420 * time, a small pool of dirty/writeback pages may go empty and disk go
1423 * 8 serves as the safety ratio.
1425 t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
1428 return min_t(unsigned long, t, MAX_PAUSE);
1431 static long wb_min_pause(struct bdi_writeback *wb,
1433 unsigned long task_ratelimit,
1434 unsigned long dirty_ratelimit,
1435 int *nr_dirtied_pause)
1437 long hi = ilog2(wb->avg_write_bandwidth);
1438 long lo = ilog2(wb->dirty_ratelimit);
1439 long t; /* target pause */
1440 long pause; /* estimated next pause */
1441 int pages; /* target nr_dirtied_pause */
1443 /* target for 10ms pause on 1-dd case */
1444 t = max(1, HZ / 100);
1447 * Scale up pause time for concurrent dirtiers in order to reduce CPU
1450 * (N * 10ms) on 2^N concurrent tasks.
1453 t += (hi - lo) * (10 * HZ) / 1024;
1456 * This is a bit convoluted. We try to base the next nr_dirtied_pause
1457 * on the much more stable dirty_ratelimit. However the next pause time
1458 * will be computed based on task_ratelimit and the two rate limits may
1459 * depart considerably at some time. Especially if task_ratelimit goes
1460 * below dirty_ratelimit/2 and the target pause is max_pause, the next
1461 * pause time will be max_pause*2 _trimmed down_ to max_pause. As a
1462 * result task_ratelimit won't be executed faithfully, which could
1463 * eventually bring down dirty_ratelimit.
1465 * We apply two rules to fix it up:
1466 * 1) try to estimate the next pause time and if necessary, use a lower
1467 * nr_dirtied_pause so as not to exceed max_pause. When this happens,
1468 * nr_dirtied_pause will be "dancing" with task_ratelimit.
1469 * 2) limit the target pause time to max_pause/2, so that the normal
1470 * small fluctuations of task_ratelimit won't trigger rule (1) and
1471 * nr_dirtied_pause will remain as stable as dirty_ratelimit.
1473 t = min(t, 1 + max_pause / 2);
1474 pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1477 * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
1478 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
1479 * When the 16 consecutive reads are often interrupted by some dirty
1480 * throttling pause during the async writes, cfq will go into idles
1481 * (deadline is fine). So push nr_dirtied_pause as high as possible
1482 * until reaches DIRTY_POLL_THRESH=32 pages.
1484 if (pages < DIRTY_POLL_THRESH) {
1486 pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1487 if (pages > DIRTY_POLL_THRESH) {
1488 pages = DIRTY_POLL_THRESH;
1489 t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
1493 pause = HZ * pages / (task_ratelimit + 1);
1494 if (pause > max_pause) {
1496 pages = task_ratelimit * t / roundup_pow_of_two(HZ);
1499 *nr_dirtied_pause = pages;
1501 * The minimal pause time will normally be half the target pause time.
1503 return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
1506 static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
1508 struct bdi_writeback *wb = dtc->wb;
1509 unsigned long wb_reclaimable;
1512 * wb_thresh is not treated as some limiting factor as
1513 * dirty_thresh, due to reasons
1514 * - in JBOD setup, wb_thresh can fluctuate a lot
1515 * - in a system with HDD and USB key, the USB key may somehow
1516 * go into state (wb_dirty >> wb_thresh) either because
1517 * wb_dirty starts high, or because wb_thresh drops low.
1518 * In this case we don't want to hard throttle the USB key
1519 * dirtiers for 100 seconds until wb_dirty drops under
1520 * wb_thresh. Instead the auxiliary wb control line in
1521 * wb_position_ratio() will let the dirtier task progress
1522 * at some rate <= (write_bw / 2) for bringing down wb_dirty.
1524 dtc->wb_thresh = __wb_calc_thresh(dtc);
1525 dtc->wb_bg_thresh = dtc->thresh ?
1526 div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
1529 * In order to avoid the stacked BDI deadlock we need
1530 * to ensure we accurately count the 'dirty' pages when
1531 * the threshold is low.
1533 * Otherwise it would be possible to get thresh+n pages
1534 * reported dirty, even though there are thresh-m pages
1535 * actually dirty; with m+n sitting in the percpu
1538 if (dtc->wb_thresh < 2 * wb_stat_error()) {
1539 wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
1540 dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
1542 wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
1543 dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
1548 * balance_dirty_pages() must be called by processes which are generating dirty
1549 * data. It looks at the number of dirty pages in the machine and will force
1550 * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
1551 * If we're over `background_thresh' then the writeback threads are woken to
1552 * perform some writeout.
1554 static void balance_dirty_pages(struct bdi_writeback *wb,
1555 unsigned long pages_dirtied)
1557 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1558 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
1559 struct dirty_throttle_control * const gdtc = &gdtc_stor;
1560 struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
1562 struct dirty_throttle_control *sdtc;
1563 unsigned long nr_reclaimable; /* = file_dirty */
1568 int nr_dirtied_pause;
1569 bool dirty_exceeded = false;
1570 unsigned long task_ratelimit;
1571 unsigned long dirty_ratelimit;
1572 struct backing_dev_info *bdi = wb->bdi;
1573 bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
1574 unsigned long start_time = jiffies;
1577 unsigned long now = jiffies;
1578 unsigned long dirty, thresh, bg_thresh;
1579 unsigned long m_dirty = 0; /* stop bogus uninit warnings */
1580 unsigned long m_thresh = 0;
1581 unsigned long m_bg_thresh = 0;
1583 nr_reclaimable = global_node_page_state(NR_FILE_DIRTY);
1584 gdtc->avail = global_dirtyable_memory();
1585 gdtc->dirty = nr_reclaimable + global_node_page_state(NR_WRITEBACK);
1587 domain_dirty_limits(gdtc);
1589 if (unlikely(strictlimit)) {
1590 wb_dirty_limits(gdtc);
1592 dirty = gdtc->wb_dirty;
1593 thresh = gdtc->wb_thresh;
1594 bg_thresh = gdtc->wb_bg_thresh;
1596 dirty = gdtc->dirty;
1597 thresh = gdtc->thresh;
1598 bg_thresh = gdtc->bg_thresh;
1602 unsigned long filepages, headroom, writeback;
1605 * If @wb belongs to !root memcg, repeat the same
1606 * basic calculations for the memcg domain.
1608 mem_cgroup_wb_stats(wb, &filepages, &headroom,
1609 &mdtc->dirty, &writeback);
1610 mdtc->dirty += writeback;
1611 mdtc_calc_avail(mdtc, filepages, headroom);
1613 domain_dirty_limits(mdtc);
1615 if (unlikely(strictlimit)) {
1616 wb_dirty_limits(mdtc);
1617 m_dirty = mdtc->wb_dirty;
1618 m_thresh = mdtc->wb_thresh;
1619 m_bg_thresh = mdtc->wb_bg_thresh;
1621 m_dirty = mdtc->dirty;
1622 m_thresh = mdtc->thresh;
1623 m_bg_thresh = mdtc->bg_thresh;
1628 * Throttle it only when the background writeback cannot
1629 * catch-up. This avoids (excessively) small writeouts
1630 * when the wb limits are ramping up in case of !strictlimit.
1632 * In strictlimit case make decision based on the wb counters
1633 * and limits. Small writeouts when the wb limits are ramping
1634 * up are the price we consciously pay for strictlimit-ing.
1636 * If memcg domain is in effect, @dirty should be under
1637 * both global and memcg freerun ceilings.
1639 if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh) &&
1641 m_dirty <= dirty_freerun_ceiling(m_thresh, m_bg_thresh))) {
1643 unsigned long m_intv;
1646 intv = dirty_poll_interval(dirty, thresh);
1649 current->dirty_paused_when = now;
1650 current->nr_dirtied = 0;
1652 m_intv = dirty_poll_interval(m_dirty, m_thresh);
1653 current->nr_dirtied_pause = min(intv, m_intv);
1657 if (unlikely(!writeback_in_progress(wb)))
1658 wb_start_background_writeback(wb);
1660 mem_cgroup_flush_foreign(wb);
1663 * Calculate global domain's pos_ratio and select the
1664 * global dtc by default.
1667 wb_dirty_limits(gdtc);
1669 if ((current->flags & PF_LOCAL_THROTTLE) &&
1671 dirty_freerun_ceiling(gdtc->wb_thresh,
1672 gdtc->wb_bg_thresh))
1674 * LOCAL_THROTTLE tasks must not be throttled
1675 * when below the per-wb freerun ceiling.
1680 dirty_exceeded = (gdtc->wb_dirty > gdtc->wb_thresh) &&
1681 ((gdtc->dirty > gdtc->thresh) || strictlimit);
1683 wb_position_ratio(gdtc);
1688 * If memcg domain is in effect, calculate its
1689 * pos_ratio. @wb should satisfy constraints from
1690 * both global and memcg domains. Choose the one
1691 * w/ lower pos_ratio.
1694 wb_dirty_limits(mdtc);
1696 if ((current->flags & PF_LOCAL_THROTTLE) &&
1698 dirty_freerun_ceiling(mdtc->wb_thresh,
1699 mdtc->wb_bg_thresh))
1701 * LOCAL_THROTTLE tasks must not be
1702 * throttled when below the per-wb
1707 dirty_exceeded |= (mdtc->wb_dirty > mdtc->wb_thresh) &&
1708 ((mdtc->dirty > mdtc->thresh) || strictlimit);
1710 wb_position_ratio(mdtc);
1711 if (mdtc->pos_ratio < gdtc->pos_ratio)
1715 if (dirty_exceeded && !wb->dirty_exceeded)
1716 wb->dirty_exceeded = 1;
1718 if (time_is_before_jiffies(wb->bw_time_stamp +
1719 BANDWIDTH_INTERVAL)) {
1720 spin_lock(&wb->list_lock);
1721 __wb_update_bandwidth(gdtc, mdtc, start_time, true);
1722 spin_unlock(&wb->list_lock);
1725 /* throttle according to the chosen dtc */
1726 dirty_ratelimit = wb->dirty_ratelimit;
1727 task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >>
1728 RATELIMIT_CALC_SHIFT;
1729 max_pause = wb_max_pause(wb, sdtc->wb_dirty);
1730 min_pause = wb_min_pause(wb, max_pause,
1731 task_ratelimit, dirty_ratelimit,
1734 if (unlikely(task_ratelimit == 0)) {
1739 period = HZ * pages_dirtied / task_ratelimit;
1741 if (current->dirty_paused_when)
1742 pause -= now - current->dirty_paused_when;
1744 * For less than 1s think time (ext3/4 may block the dirtier
1745 * for up to 800ms from time to time on 1-HDD; so does xfs,
1746 * however at much less frequency), try to compensate it in
1747 * future periods by updating the virtual time; otherwise just
1748 * do a reset, as it may be a light dirtier.
1750 if (pause < min_pause) {
1751 trace_balance_dirty_pages(wb,
1764 current->dirty_paused_when = now;
1765 current->nr_dirtied = 0;
1766 } else if (period) {
1767 current->dirty_paused_when += period;
1768 current->nr_dirtied = 0;
1769 } else if (current->nr_dirtied_pause <= pages_dirtied)
1770 current->nr_dirtied_pause += pages_dirtied;
1773 if (unlikely(pause > max_pause)) {
1774 /* for occasional dropped task_ratelimit */
1775 now += min(pause - max_pause, max_pause);
1780 trace_balance_dirty_pages(wb,
1792 __set_current_state(TASK_KILLABLE);
1793 wb->dirty_sleep = now;
1794 io_schedule_timeout(pause);
1796 current->dirty_paused_when = now + pause;
1797 current->nr_dirtied = 0;
1798 current->nr_dirtied_pause = nr_dirtied_pause;
1801 * This is typically equal to (dirty < thresh) and can also
1802 * keep "1000+ dd on a slow USB stick" under control.
1808 * In the case of an unresponsive NFS server and the NFS dirty
1809 * pages exceeds dirty_thresh, give the other good wb's a pipe
1810 * to go through, so that tasks on them still remain responsive.
1812 * In theory 1 page is enough to keep the consumer-producer
1813 * pipe going: the flusher cleans 1 page => the task dirties 1
1814 * more page. However wb_dirty has accounting errors. So use
1815 * the larger and more IO friendly wb_stat_error.
1817 if (sdtc->wb_dirty <= wb_stat_error())
1820 if (fatal_signal_pending(current))
1824 if (!dirty_exceeded && wb->dirty_exceeded)
1825 wb->dirty_exceeded = 0;
1827 if (writeback_in_progress(wb))
1831 * In laptop mode, we wait until hitting the higher threshold before
1832 * starting background writeout, and then write out all the way down
1833 * to the lower threshold. So slow writers cause minimal disk activity.
1835 * In normal mode, we start background writeout at the lower
1836 * background_thresh, to keep the amount of dirty memory low.
1841 if (nr_reclaimable > gdtc->bg_thresh)
1842 wb_start_background_writeback(wb);
1845 static DEFINE_PER_CPU(int, bdp_ratelimits);
1848 * Normal tasks are throttled by
1850 * dirty tsk->nr_dirtied_pause pages;
1851 * take a snap in balance_dirty_pages();
1853 * However there is a worst case. If every task exit immediately when dirtied
1854 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
1855 * called to throttle the page dirties. The solution is to save the not yet
1856 * throttled page dirties in dirty_throttle_leaks on task exit and charge them
1857 * randomly into the running tasks. This works well for the above worst case,
1858 * as the new task will pick up and accumulate the old task's leaked dirty
1859 * count and eventually get throttled.
1861 DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
1864 * balance_dirty_pages_ratelimited - balance dirty memory state
1865 * @mapping: address_space which was dirtied
1867 * Processes which are dirtying memory should call in here once for each page
1868 * which was newly dirtied. The function will periodically check the system's
1869 * dirty state and will initiate writeback if needed.
1871 * Once we're over the dirty memory limit we decrease the ratelimiting
1872 * by a lot, to prevent individual processes from overshooting the limit
1873 * by (ratelimit_pages) each.
1875 void balance_dirty_pages_ratelimited(struct address_space *mapping)
1877 struct inode *inode = mapping->host;
1878 struct backing_dev_info *bdi = inode_to_bdi(inode);
1879 struct bdi_writeback *wb = NULL;
1883 if (!(bdi->capabilities & BDI_CAP_WRITEBACK))
1886 if (inode_cgwb_enabled(inode))
1887 wb = wb_get_create_current(bdi, GFP_KERNEL);
1891 ratelimit = current->nr_dirtied_pause;
1892 if (wb->dirty_exceeded)
1893 ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
1897 * This prevents one CPU to accumulate too many dirtied pages without
1898 * calling into balance_dirty_pages(), which can happen when there are
1899 * 1000+ tasks, all of them start dirtying pages at exactly the same
1900 * time, hence all honoured too large initial task->nr_dirtied_pause.
1902 p = this_cpu_ptr(&bdp_ratelimits);
1903 if (unlikely(current->nr_dirtied >= ratelimit))
1905 else if (unlikely(*p >= ratelimit_pages)) {
1910 * Pick up the dirtied pages by the exited tasks. This avoids lots of
1911 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
1912 * the dirty throttling and livelock other long-run dirtiers.
1914 p = this_cpu_ptr(&dirty_throttle_leaks);
1915 if (*p > 0 && current->nr_dirtied < ratelimit) {
1916 unsigned long nr_pages_dirtied;
1917 nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
1918 *p -= nr_pages_dirtied;
1919 current->nr_dirtied += nr_pages_dirtied;
1923 if (unlikely(current->nr_dirtied >= ratelimit))
1924 balance_dirty_pages(wb, current->nr_dirtied);
1928 EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
1931 * wb_over_bg_thresh - does @wb need to be written back?
1932 * @wb: bdi_writeback of interest
1934 * Determines whether background writeback should keep writing @wb or it's
1937 * Return: %true if writeback should continue.
1939 bool wb_over_bg_thresh(struct bdi_writeback *wb)
1941 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
1942 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
1943 struct dirty_throttle_control * const gdtc = &gdtc_stor;
1944 struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
1946 unsigned long reclaimable;
1947 unsigned long thresh;
1950 * Similar to balance_dirty_pages() but ignores pages being written
1951 * as we're trying to decide whether to put more under writeback.
1953 gdtc->avail = global_dirtyable_memory();
1954 gdtc->dirty = global_node_page_state(NR_FILE_DIRTY);
1955 domain_dirty_limits(gdtc);
1957 if (gdtc->dirty > gdtc->bg_thresh)
1960 thresh = wb_calc_thresh(gdtc->wb, gdtc->bg_thresh);
1961 if (thresh < 2 * wb_stat_error())
1962 reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
1964 reclaimable = wb_stat(wb, WB_RECLAIMABLE);
1966 if (reclaimable > thresh)
1970 unsigned long filepages, headroom, writeback;
1972 mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty,
1974 mdtc_calc_avail(mdtc, filepages, headroom);
1975 domain_dirty_limits(mdtc); /* ditto, ignore writeback */
1977 if (mdtc->dirty > mdtc->bg_thresh)
1980 thresh = wb_calc_thresh(mdtc->wb, mdtc->bg_thresh);
1981 if (thresh < 2 * wb_stat_error())
1982 reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
1984 reclaimable = wb_stat(wb, WB_RECLAIMABLE);
1986 if (reclaimable > thresh)
1994 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
1996 int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
1997 void *buffer, size_t *length, loff_t *ppos)
1999 unsigned int old_interval = dirty_writeback_interval;
2002 ret = proc_dointvec(table, write, buffer, length, ppos);
2005 * Writing 0 to dirty_writeback_interval will disable periodic writeback
2006 * and a different non-zero value will wakeup the writeback threads.
2007 * wb_wakeup_delayed() would be more appropriate, but it's a pain to
2008 * iterate over all bdis and wbs.
2009 * The reason we do this is to make the change take effect immediately.
2011 if (!ret && write && dirty_writeback_interval &&
2012 dirty_writeback_interval != old_interval)
2013 wakeup_flusher_threads(WB_REASON_PERIODIC);
2019 void laptop_mode_timer_fn(struct timer_list *t)
2021 struct backing_dev_info *backing_dev_info =
2022 from_timer(backing_dev_info, t, laptop_mode_wb_timer);
2024 wakeup_flusher_threads_bdi(backing_dev_info, WB_REASON_LAPTOP_TIMER);
2028 * We've spun up the disk and we're in laptop mode: schedule writeback
2029 * of all dirty data a few seconds from now. If the flush is already scheduled
2030 * then push it back - the user is still using the disk.
2032 void laptop_io_completion(struct backing_dev_info *info)
2034 mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
2038 * We're in laptop mode and we've just synced. The sync's writes will have
2039 * caused another writeback to be scheduled by laptop_io_completion.
2040 * Nothing needs to be written back anymore, so we unschedule the writeback.
2042 void laptop_sync_completion(void)
2044 struct backing_dev_info *bdi;
2048 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
2049 del_timer(&bdi->laptop_mode_wb_timer);
2056 * If ratelimit_pages is too high then we can get into dirty-data overload
2057 * if a large number of processes all perform writes at the same time.
2059 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
2060 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
2064 void writeback_set_ratelimit(void)
2066 struct wb_domain *dom = &global_wb_domain;
2067 unsigned long background_thresh;
2068 unsigned long dirty_thresh;
2070 global_dirty_limits(&background_thresh, &dirty_thresh);
2071 dom->dirty_limit = dirty_thresh;
2072 ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
2073 if (ratelimit_pages < 16)
2074 ratelimit_pages = 16;
2077 static int page_writeback_cpu_online(unsigned int cpu)
2079 writeback_set_ratelimit();
2084 * Called early on to tune the page writeback dirty limits.
2086 * We used to scale dirty pages according to how total memory
2087 * related to pages that could be allocated for buffers.
2089 * However, that was when we used "dirty_ratio" to scale with
2090 * all memory, and we don't do that any more. "dirty_ratio"
2091 * is now applied to total non-HIGHPAGE memory, and as such we can't
2092 * get into the old insane situation any more where we had
2093 * large amounts of dirty pages compared to a small amount of
2094 * non-HIGHMEM memory.
2096 * But we might still want to scale the dirty_ratio by how
2097 * much memory the box has..
2099 void __init page_writeback_init(void)
2101 BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2103 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/writeback:online",
2104 page_writeback_cpu_online, NULL);
2105 cpuhp_setup_state(CPUHP_MM_WRITEBACK_DEAD, "mm/writeback:dead", NULL,
2106 page_writeback_cpu_online);
2110 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
2111 * @mapping: address space structure to write
2112 * @start: starting page index
2113 * @end: ending page index (inclusive)
2115 * This function scans the page range from @start to @end (inclusive) and tags
2116 * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
2117 * that write_cache_pages (or whoever calls this function) will then use
2118 * TOWRITE tag to identify pages eligible for writeback. This mechanism is
2119 * used to avoid livelocking of writeback by a process steadily creating new
2120 * dirty pages in the file (thus it is important for this function to be quick
2121 * so that it can tag pages faster than a dirtying process can create them).
2123 void tag_pages_for_writeback(struct address_space *mapping,
2124 pgoff_t start, pgoff_t end)
2126 XA_STATE(xas, &mapping->i_pages, start);
2127 unsigned int tagged = 0;
2131 xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) {
2132 xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE);
2133 if (++tagged % XA_CHECK_SCHED)
2137 xas_unlock_irq(&xas);
2141 xas_unlock_irq(&xas);
2143 EXPORT_SYMBOL(tag_pages_for_writeback);
2146 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2147 * @mapping: address space structure to write
2148 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2149 * @writepage: function called for each page
2150 * @data: data passed to writepage function
2152 * If a page is already under I/O, write_cache_pages() skips it, even
2153 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2154 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2155 * and msync() need to guarantee that all the data which was dirty at the time
2156 * the call was made get new I/O started against them. If wbc->sync_mode is
2157 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2158 * existing IO to complete.
2160 * To avoid livelocks (when other process dirties new pages), we first tag
2161 * pages which should be written back with TOWRITE tag and only then start
2162 * writing them. For data-integrity sync we have to be careful so that we do
2163 * not miss some pages (e.g., because some other process has cleared TOWRITE
2164 * tag we set). The rule we follow is that TOWRITE tag can be cleared only
2165 * by the process clearing the DIRTY tag (and submitting the page for IO).
2167 * To avoid deadlocks between range_cyclic writeback and callers that hold
2168 * pages in PageWriteback to aggregate IO until write_cache_pages() returns,
2169 * we do not loop back to the start of the file. Doing so causes a page
2170 * lock/page writeback access order inversion - we should only ever lock
2171 * multiple pages in ascending page->index order, and looping back to the start
2172 * of the file violates that rule and causes deadlocks.
2174 * Return: %0 on success, negative error code otherwise
2176 int write_cache_pages(struct address_space *mapping,
2177 struct writeback_control *wbc, writepage_t writepage,
2183 struct pagevec pvec;
2186 pgoff_t end; /* Inclusive */
2188 int range_whole = 0;
2191 pagevec_init(&pvec);
2192 if (wbc->range_cyclic) {
2193 index = mapping->writeback_index; /* prev offset */
2196 index = wbc->range_start >> PAGE_SHIFT;
2197 end = wbc->range_end >> PAGE_SHIFT;
2198 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2201 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) {
2202 tag_pages_for_writeback(mapping, index, end);
2203 tag = PAGECACHE_TAG_TOWRITE;
2205 tag = PAGECACHE_TAG_DIRTY;
2208 while (!done && (index <= end)) {
2211 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
2216 for (i = 0; i < nr_pages; i++) {
2217 struct page *page = pvec.pages[i];
2219 done_index = page->index;
2224 * Page truncated or invalidated. We can freely skip it
2225 * then, even for data integrity operations: the page
2226 * has disappeared concurrently, so there could be no
2227 * real expectation of this data integrity operation
2228 * even if there is now a new, dirty page at the same
2229 * pagecache address.
2231 if (unlikely(page->mapping != mapping)) {
2237 if (!PageDirty(page)) {
2238 /* someone wrote it for us */
2239 goto continue_unlock;
2242 if (PageWriteback(page)) {
2243 if (wbc->sync_mode != WB_SYNC_NONE)
2244 wait_on_page_writeback(page);
2246 goto continue_unlock;
2249 BUG_ON(PageWriteback(page));
2250 if (!clear_page_dirty_for_io(page))
2251 goto continue_unlock;
2253 trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
2254 error = (*writepage)(page, wbc, data);
2255 if (unlikely(error)) {
2257 * Handle errors according to the type of
2258 * writeback. There's no need to continue for
2259 * background writeback. Just push done_index
2260 * past this page so media errors won't choke
2261 * writeout for the entire file. For integrity
2262 * writeback, we must process the entire dirty
2263 * set regardless of errors because the fs may
2264 * still have state to clear for each page. In
2265 * that case we continue processing and return
2268 if (error == AOP_WRITEPAGE_ACTIVATE) {
2271 } else if (wbc->sync_mode != WB_SYNC_ALL) {
2273 done_index = page->index + 1;
2282 * We stop writing back only if we are not doing
2283 * integrity sync. In case of integrity sync we have to
2284 * keep going until we have written all the pages
2285 * we tagged for writeback prior to entering this loop.
2287 if (--wbc->nr_to_write <= 0 &&
2288 wbc->sync_mode == WB_SYNC_NONE) {
2293 pagevec_release(&pvec);
2298 * If we hit the last page and there is more work to be done: wrap
2299 * back the index back to the start of the file for the next
2300 * time we are called.
2302 if (wbc->range_cyclic && !done)
2304 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2305 mapping->writeback_index = done_index;
2309 EXPORT_SYMBOL(write_cache_pages);
2312 * Function used by generic_writepages to call the real writepage
2313 * function and set the mapping flags on error
2315 static int __writepage(struct page *page, struct writeback_control *wbc,
2318 struct address_space *mapping = data;
2319 int ret = mapping->a_ops->writepage(page, wbc);
2320 mapping_set_error(mapping, ret);
2325 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
2326 * @mapping: address space structure to write
2327 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2329 * This is a library function, which implements the writepages()
2330 * address_space_operation.
2332 * Return: %0 on success, negative error code otherwise
2334 int generic_writepages(struct address_space *mapping,
2335 struct writeback_control *wbc)
2337 struct blk_plug plug;
2340 /* deal with chardevs and other special file */
2341 if (!mapping->a_ops->writepage)
2344 blk_start_plug(&plug);
2345 ret = write_cache_pages(mapping, wbc, __writepage, mapping);
2346 blk_finish_plug(&plug);
2350 EXPORT_SYMBOL(generic_writepages);
2352 int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
2356 if (wbc->nr_to_write <= 0)
2359 if (mapping->a_ops->writepages)
2360 ret = mapping->a_ops->writepages(mapping, wbc);
2362 ret = generic_writepages(mapping, wbc);
2363 if ((ret != -ENOMEM) || (wbc->sync_mode != WB_SYNC_ALL))
2366 congestion_wait(BLK_RW_ASYNC, HZ/50);
2372 * write_one_page - write out a single page and wait on I/O
2373 * @page: the page to write
2375 * The page must be locked by the caller and will be unlocked upon return.
2377 * Note that the mapping's AS_EIO/AS_ENOSPC flags will be cleared when this
2380 * Return: %0 on success, negative error code otherwise
2382 int write_one_page(struct page *page)
2384 struct address_space *mapping = page->mapping;
2386 struct writeback_control wbc = {
2387 .sync_mode = WB_SYNC_ALL,
2391 BUG_ON(!PageLocked(page));
2393 wait_on_page_writeback(page);
2395 if (clear_page_dirty_for_io(page)) {
2397 ret = mapping->a_ops->writepage(page, &wbc);
2399 wait_on_page_writeback(page);
2406 ret = filemap_check_errors(mapping);
2409 EXPORT_SYMBOL(write_one_page);
2412 * For address_spaces which do not use buffers nor write back.
2414 int __set_page_dirty_no_writeback(struct page *page)
2416 if (!PageDirty(page))
2417 return !TestSetPageDirty(page);
2420 EXPORT_SYMBOL(__set_page_dirty_no_writeback);
2423 * Helper function for set_page_dirty family.
2425 * Caller must hold lock_page_memcg().
2427 * NOTE: This relies on being atomic wrt interrupts.
2429 static void account_page_dirtied(struct page *page,
2430 struct address_space *mapping)
2432 struct inode *inode = mapping->host;
2434 trace_writeback_dirty_page(page, mapping);
2436 if (mapping_can_writeback(mapping)) {
2437 struct bdi_writeback *wb;
2439 inode_attach_wb(inode, page);
2440 wb = inode_to_wb(inode);
2442 __inc_lruvec_page_state(page, NR_FILE_DIRTY);
2443 __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2444 __inc_node_page_state(page, NR_DIRTIED);
2445 inc_wb_stat(wb, WB_RECLAIMABLE);
2446 inc_wb_stat(wb, WB_DIRTIED);
2447 task_io_account_write(PAGE_SIZE);
2448 current->nr_dirtied++;
2449 __this_cpu_inc(bdp_ratelimits);
2451 mem_cgroup_track_foreign_dirty(page, wb);
2456 * Helper function for deaccounting dirty page without writeback.
2458 * Caller must hold lock_page_memcg().
2460 void account_page_cleaned(struct page *page, struct address_space *mapping,
2461 struct bdi_writeback *wb)
2463 if (mapping_can_writeback(mapping)) {
2464 dec_lruvec_page_state(page, NR_FILE_DIRTY);
2465 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2466 dec_wb_stat(wb, WB_RECLAIMABLE);
2467 task_io_account_cancelled_write(PAGE_SIZE);
2472 * Mark the page dirty, and set it dirty in the page cache, and mark the inode
2475 * If warn is true, then emit a warning if the page is not uptodate and has
2476 * not been truncated.
2478 * The caller must hold lock_page_memcg().
2480 void __set_page_dirty(struct page *page, struct address_space *mapping,
2483 unsigned long flags;
2485 xa_lock_irqsave(&mapping->i_pages, flags);
2486 if (page->mapping) { /* Race with truncate? */
2487 WARN_ON_ONCE(warn && !PageUptodate(page));
2488 account_page_dirtied(page, mapping);
2489 __xa_set_mark(&mapping->i_pages, page_index(page),
2490 PAGECACHE_TAG_DIRTY);
2492 xa_unlock_irqrestore(&mapping->i_pages, flags);
2496 * For address_spaces which do not use buffers. Just tag the page as dirty in
2499 * This is also used when a single buffer is being dirtied: we want to set the
2500 * page dirty in that case, but not all the buffers. This is a "bottom-up"
2501 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
2503 * The caller must ensure this doesn't race with truncation. Most will simply
2504 * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and
2505 * the pte lock held, which also locks out truncation.
2507 int __set_page_dirty_nobuffers(struct page *page)
2509 lock_page_memcg(page);
2510 if (!TestSetPageDirty(page)) {
2511 struct address_space *mapping = page_mapping(page);
2514 unlock_page_memcg(page);
2517 __set_page_dirty(page, mapping, !PagePrivate(page));
2518 unlock_page_memcg(page);
2520 if (mapping->host) {
2521 /* !PageAnon && !swapper_space */
2522 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
2526 unlock_page_memcg(page);
2529 EXPORT_SYMBOL(__set_page_dirty_nobuffers);
2532 * Call this whenever redirtying a page, to de-account the dirty counters
2533 * (NR_DIRTIED, WB_DIRTIED, tsk->nr_dirtied), so that they match the written
2534 * counters (NR_WRITTEN, WB_WRITTEN) in long term. The mismatches will lead to
2535 * systematic errors in balanced_dirty_ratelimit and the dirty pages position
2538 void account_page_redirty(struct page *page)
2540 struct address_space *mapping = page->mapping;
2542 if (mapping && mapping_can_writeback(mapping)) {
2543 struct inode *inode = mapping->host;
2544 struct bdi_writeback *wb;
2545 struct wb_lock_cookie cookie = {};
2547 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2548 current->nr_dirtied--;
2549 dec_node_page_state(page, NR_DIRTIED);
2550 dec_wb_stat(wb, WB_DIRTIED);
2551 unlocked_inode_to_wb_end(inode, &cookie);
2554 EXPORT_SYMBOL(account_page_redirty);
2557 * When a writepage implementation decides that it doesn't want to write this
2558 * page for some reason, it should redirty the locked page via
2559 * redirty_page_for_writepage() and it should then unlock the page and return 0
2561 int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
2565 wbc->pages_skipped++;
2566 ret = __set_page_dirty_nobuffers(page);
2567 account_page_redirty(page);
2570 EXPORT_SYMBOL(redirty_page_for_writepage);
2575 * For pages with a mapping this should be done under the page lock for the
2576 * benefit of asynchronous memory errors who prefer a consistent dirty state.
2577 * This rule can be broken in some special cases, but should be better not to.
2579 int set_page_dirty(struct page *page)
2581 struct address_space *mapping = page_mapping(page);
2583 page = compound_head(page);
2584 if (likely(mapping)) {
2586 * readahead/lru_deactivate_page could remain
2587 * PG_readahead/PG_reclaim due to race with end_page_writeback
2588 * About readahead, if the page is written, the flags would be
2589 * reset. So no problem.
2590 * About lru_deactivate_page, if the page is redirty, the flag
2591 * will be reset. So no problem. but if the page is used by readahead
2592 * it will confuse readahead and make it restart the size rampup
2593 * process. But it's a trivial problem.
2595 if (PageReclaim(page))
2596 ClearPageReclaim(page);
2597 return mapping->a_ops->set_page_dirty(page);
2599 if (!PageDirty(page)) {
2600 if (!TestSetPageDirty(page))
2605 EXPORT_SYMBOL(set_page_dirty);
2608 * set_page_dirty() is racy if the caller has no reference against
2609 * page->mapping->host, and if the page is unlocked. This is because another
2610 * CPU could truncate the page off the mapping and then free the mapping.
2612 * Usually, the page _is_ locked, or the caller is a user-space process which
2613 * holds a reference on the inode by having an open file.
2615 * In other cases, the page should be locked before running set_page_dirty().
2617 int set_page_dirty_lock(struct page *page)
2622 ret = set_page_dirty(page);
2626 EXPORT_SYMBOL(set_page_dirty_lock);
2629 * This cancels just the dirty bit on the kernel page itself, it does NOT
2630 * actually remove dirty bits on any mmap's that may be around. It also
2631 * leaves the page tagged dirty, so any sync activity will still find it on
2632 * the dirty lists, and in particular, clear_page_dirty_for_io() will still
2633 * look at the dirty bits in the VM.
2635 * Doing this should *normally* only ever be done when a page is truncated,
2636 * and is not actually mapped anywhere at all. However, fs/buffer.c does
2637 * this when it notices that somebody has cleaned out all the buffers on a
2638 * page without actually doing it through the VM. Can you say "ext3 is
2639 * horribly ugly"? Thought you could.
2641 void __cancel_dirty_page(struct page *page)
2643 struct address_space *mapping = page_mapping(page);
2645 if (mapping_can_writeback(mapping)) {
2646 struct inode *inode = mapping->host;
2647 struct bdi_writeback *wb;
2648 struct wb_lock_cookie cookie = {};
2650 lock_page_memcg(page);
2651 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2653 if (TestClearPageDirty(page))
2654 account_page_cleaned(page, mapping, wb);
2656 unlocked_inode_to_wb_end(inode, &cookie);
2657 unlock_page_memcg(page);
2659 ClearPageDirty(page);
2662 EXPORT_SYMBOL(__cancel_dirty_page);
2665 * Clear a page's dirty flag, while caring for dirty memory accounting.
2666 * Returns true if the page was previously dirty.
2668 * This is for preparing to put the page under writeout. We leave the page
2669 * tagged as dirty in the xarray so that a concurrent write-for-sync
2670 * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage
2671 * implementation will run either set_page_writeback() or set_page_dirty(),
2672 * at which stage we bring the page's dirty flag and xarray dirty tag
2675 * This incoherency between the page's dirty flag and xarray tag is
2676 * unfortunate, but it only exists while the page is locked.
2678 int clear_page_dirty_for_io(struct page *page)
2680 struct address_space *mapping = page_mapping(page);
2683 VM_BUG_ON_PAGE(!PageLocked(page), page);
2685 if (mapping && mapping_can_writeback(mapping)) {
2686 struct inode *inode = mapping->host;
2687 struct bdi_writeback *wb;
2688 struct wb_lock_cookie cookie = {};
2691 * Yes, Virginia, this is indeed insane.
2693 * We use this sequence to make sure that
2694 * (a) we account for dirty stats properly
2695 * (b) we tell the low-level filesystem to
2696 * mark the whole page dirty if it was
2697 * dirty in a pagetable. Only to then
2698 * (c) clean the page again and return 1 to
2699 * cause the writeback.
2701 * This way we avoid all nasty races with the
2702 * dirty bit in multiple places and clearing
2703 * them concurrently from different threads.
2705 * Note! Normally the "set_page_dirty(page)"
2706 * has no effect on the actual dirty bit - since
2707 * that will already usually be set. But we
2708 * need the side effects, and it can help us
2711 * We basically use the page "master dirty bit"
2712 * as a serialization point for all the different
2713 * threads doing their things.
2715 if (page_mkclean(page))
2716 set_page_dirty(page);
2718 * We carefully synchronise fault handlers against
2719 * installing a dirty pte and marking the page dirty
2720 * at this point. We do this by having them hold the
2721 * page lock while dirtying the page, and pages are
2722 * always locked coming in here, so we get the desired
2725 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2726 if (TestClearPageDirty(page)) {
2727 dec_lruvec_page_state(page, NR_FILE_DIRTY);
2728 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2729 dec_wb_stat(wb, WB_RECLAIMABLE);
2732 unlocked_inode_to_wb_end(inode, &cookie);
2735 return TestClearPageDirty(page);
2737 EXPORT_SYMBOL(clear_page_dirty_for_io);
2739 int test_clear_page_writeback(struct page *page)
2741 struct address_space *mapping = page_mapping(page);
2744 lock_page_memcg(page);
2745 if (mapping && mapping_use_writeback_tags(mapping)) {
2746 struct inode *inode = mapping->host;
2747 struct backing_dev_info *bdi = inode_to_bdi(inode);
2748 unsigned long flags;
2750 xa_lock_irqsave(&mapping->i_pages, flags);
2751 ret = TestClearPageWriteback(page);
2753 __xa_clear_mark(&mapping->i_pages, page_index(page),
2754 PAGECACHE_TAG_WRITEBACK);
2755 if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
2756 struct bdi_writeback *wb = inode_to_wb(inode);
2758 dec_wb_stat(wb, WB_WRITEBACK);
2759 __wb_writeout_inc(wb);
2763 if (mapping->host && !mapping_tagged(mapping,
2764 PAGECACHE_TAG_WRITEBACK))
2765 sb_clear_inode_writeback(mapping->host);
2767 xa_unlock_irqrestore(&mapping->i_pages, flags);
2769 ret = TestClearPageWriteback(page);
2772 dec_lruvec_page_state(page, NR_WRITEBACK);
2773 dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2774 inc_node_page_state(page, NR_WRITTEN);
2776 unlock_page_memcg(page);
2780 int __test_set_page_writeback(struct page *page, bool keep_write)
2782 struct address_space *mapping = page_mapping(page);
2783 int ret, access_ret;
2785 lock_page_memcg(page);
2786 if (mapping && mapping_use_writeback_tags(mapping)) {
2787 XA_STATE(xas, &mapping->i_pages, page_index(page));
2788 struct inode *inode = mapping->host;
2789 struct backing_dev_info *bdi = inode_to_bdi(inode);
2790 unsigned long flags;
2792 xas_lock_irqsave(&xas, flags);
2794 ret = TestSetPageWriteback(page);
2798 on_wblist = mapping_tagged(mapping,
2799 PAGECACHE_TAG_WRITEBACK);
2801 xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
2802 if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT)
2803 inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK);
2806 * We can come through here when swapping anonymous
2807 * pages, so we don't necessarily have an inode to track
2810 if (mapping->host && !on_wblist)
2811 sb_mark_inode_writeback(mapping->host);
2813 if (!PageDirty(page))
2814 xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
2816 xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
2817 xas_unlock_irqrestore(&xas, flags);
2819 ret = TestSetPageWriteback(page);
2822 inc_lruvec_page_state(page, NR_WRITEBACK);
2823 inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
2825 unlock_page_memcg(page);
2826 access_ret = arch_make_page_accessible(page);
2828 * If writeback has been triggered on a page that cannot be made
2829 * accessible, it is too late to recover here.
2831 VM_BUG_ON_PAGE(access_ret != 0, page);
2836 EXPORT_SYMBOL(__test_set_page_writeback);
2839 * Wait for a page to complete writeback
2841 void wait_on_page_writeback(struct page *page)
2843 while (PageWriteback(page)) {
2844 trace_wait_on_page_writeback(page, page_mapping(page));
2845 wait_on_page_bit(page, PG_writeback);
2848 EXPORT_SYMBOL_GPL(wait_on_page_writeback);
2851 * Wait for a page to complete writeback. Returns -EINTR if we get a
2852 * fatal signal while waiting.
2854 int wait_on_page_writeback_killable(struct page *page)
2856 while (PageWriteback(page)) {
2857 trace_wait_on_page_writeback(page, page_mapping(page));
2858 if (wait_on_page_bit_killable(page, PG_writeback))
2864 EXPORT_SYMBOL_GPL(wait_on_page_writeback_killable);
2867 * wait_for_stable_page() - wait for writeback to finish, if necessary.
2868 * @page: The page to wait on.
2870 * This function determines if the given page is related to a backing device
2871 * that requires page contents to be held stable during writeback. If so, then
2872 * it will wait for any pending writeback to complete.
2874 void wait_for_stable_page(struct page *page)
2876 page = thp_head(page);
2877 if (page->mapping->host->i_sb->s_iflags & SB_I_STABLE_WRITES)
2878 wait_on_page_writeback(page);
2880 EXPORT_SYMBOL_GPL(wait_for_stable_page);