writeback: rename nr_reclaimable to nr_dirty in balance_dirty_pages
[linux-block.git] / mm / page-writeback.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4 2/*
f30c2269 3 * mm/page-writeback.c
1da177e4
LT
4 *
5 * Copyright (C) 2002, Linus Torvalds.
90eec103 6 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
1da177e4
LT
7 *
8 * Contains functions related to writing back dirty pages at the
9 * address_space level.
10 *
e1f8e874 11 * 10Apr2002 Andrew Morton
1da177e4
LT
12 * Initial version
13 */
14
15#include <linux/kernel.h>
1bf27e98 16#include <linux/math64.h>
b95f1b31 17#include <linux/export.h>
1da177e4
LT
18#include <linux/spinlock.h>
19#include <linux/fs.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/slab.h>
23#include <linux/pagemap.h>
24#include <linux/writeback.h>
25#include <linux/init.h>
26#include <linux/backing-dev.h>
55e829af 27#include <linux/task_io_accounting_ops.h>
1da177e4
LT
28#include <linux/blkdev.h>
29#include <linux/mpage.h>
d08b3851 30#include <linux/rmap.h>
1da177e4 31#include <linux/percpu.h>
1da177e4
LT
32#include <linux/smp.h>
33#include <linux/sysctl.h>
34#include <linux/cpu.h>
35#include <linux/syscalls.h>
811d736f 36#include <linux/pagevec.h>
eb608e3a 37#include <linux/timer.h>
8bd75c77 38#include <linux/sched/rt.h>
f361bf4a 39#include <linux/sched/signal.h>
6e543d57 40#include <linux/mm_inline.h>
028c2dd1 41#include <trace/events/writeback.h>
1da177e4 42
6e543d57
LD
43#include "internal.h"
44
ffd1f609
WF
45/*
46 * Sleep at most 200ms at a time in balance_dirty_pages().
47 */
48#define MAX_PAUSE max(HZ/5, 1)
49
5b9b3574
WF
50/*
51 * Try to keep balance_dirty_pages() call intervals higher than this many pages
52 * by raising pause time to max_pause when falls below it.
53 */
54#define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10))
55
e98be2d5
WF
56/*
57 * Estimate write bandwidth at 200ms intervals.
58 */
59#define BANDWIDTH_INTERVAL max(HZ/5, 1)
60
6c14ae1e
WF
61#define RATELIMIT_CALC_SHIFT 10
62
1da177e4
LT
63/*
64 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
65 * will look to see if it needs to force writeback or throttling.
66 */
67static long ratelimit_pages = 32;
68
1da177e4
LT
69/* The following parameters are exported via /proc/sys/vm */
70
71/*
5b0830cb 72 * Start background writeback (via writeback threads) at this percentage
1da177e4 73 */
aa779e51 74static int dirty_background_ratio = 10;
1da177e4 75
2da02997
DR
76/*
77 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
78 * dirty_background_ratio * the amount of dirtyable memory
79 */
aa779e51 80static unsigned long dirty_background_bytes;
2da02997 81
195cf453
BG
82/*
83 * free highmem will not be subtracted from the total free memory
84 * for calculating free ratios if vm_highmem_is_dirtyable is true
85 */
aa779e51 86static int vm_highmem_is_dirtyable;
195cf453 87
1da177e4
LT
88/*
89 * The generator of dirty data starts writeback at this percentage
90 */
aa779e51 91static int vm_dirty_ratio = 20;
1da177e4 92
2da02997
DR
93/*
94 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
95 * vm_dirty_ratio * the amount of dirtyable memory
96 */
aa779e51 97static unsigned long vm_dirty_bytes;
2da02997 98
1da177e4 99/*
704503d8 100 * The interval between `kupdate'-style writebacks
1da177e4 101 */
22ef37ee 102unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
1da177e4 103
91913a29
AB
104EXPORT_SYMBOL_GPL(dirty_writeback_interval);
105
1da177e4 106/*
704503d8 107 * The longest time for which data is allowed to remain dirty
1da177e4 108 */
22ef37ee 109unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
1da177e4 110
1da177e4 111/*
ed5b43f1
BS
112 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
113 * a full sync is triggered after this time elapses without any disk activity.
1da177e4
LT
114 */
115int laptop_mode;
116
117EXPORT_SYMBOL(laptop_mode);
118
119/* End of sysctl-exported parameters */
120
dcc25ae7 121struct wb_domain global_wb_domain;
1da177e4 122
2bc00aef
TH
123/* consolidated parameters for balance_dirty_pages() and its subroutines */
124struct dirty_throttle_control {
e9f07dfd
TH
125#ifdef CONFIG_CGROUP_WRITEBACK
126 struct wb_domain *dom;
9fc3a43e 127 struct dirty_throttle_control *gdtc; /* only set in memcg dtc's */
e9f07dfd 128#endif
2bc00aef 129 struct bdi_writeback *wb;
e9770b34 130 struct fprop_local_percpu *wb_completions;
eb608e3a 131
9fc3a43e 132 unsigned long avail; /* dirtyable */
2bc00aef
TH
133 unsigned long dirty; /* file_dirty + write + nfs */
134 unsigned long thresh; /* dirty threshold */
135 unsigned long bg_thresh; /* dirty background threshold */
136
137 unsigned long wb_dirty; /* per-wb counterparts */
138 unsigned long wb_thresh;
970fb01a 139 unsigned long wb_bg_thresh;
daddfa3c
TH
140
141 unsigned long pos_ratio;
2bc00aef
TH
142};
143
eb608e3a
JK
144/*
145 * Length of period for aging writeout fractions of bdis. This is an
146 * arbitrarily chosen number. The longer the period, the slower fractions will
147 * reflect changes in current writeout rate.
148 */
149#define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
04fbfdc1 150
693108a8
TH
151#ifdef CONFIG_CGROUP_WRITEBACK
152
d60d1bdd
TH
153#define GDTC_INIT(__wb) .wb = (__wb), \
154 .dom = &global_wb_domain, \
155 .wb_completions = &(__wb)->completions
156
9fc3a43e 157#define GDTC_INIT_NO_WB .dom = &global_wb_domain
d60d1bdd
TH
158
159#define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \
160 .dom = mem_cgroup_wb_domain(__wb), \
161 .wb_completions = &(__wb)->memcg_completions, \
162 .gdtc = __gdtc
c2aa723a
TH
163
164static bool mdtc_valid(struct dirty_throttle_control *dtc)
165{
166 return dtc->dom;
167}
e9f07dfd
TH
168
169static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
170{
171 return dtc->dom;
172}
173
9fc3a43e
TH
174static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
175{
176 return mdtc->gdtc;
177}
178
841710aa
TH
179static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
180{
181 return &wb->memcg_completions;
182}
183
693108a8
TH
184static void wb_min_max_ratio(struct bdi_writeback *wb,
185 unsigned long *minp, unsigned long *maxp)
186{
20792ebf 187 unsigned long this_bw = READ_ONCE(wb->avg_write_bandwidth);
693108a8
TH
188 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
189 unsigned long long min = wb->bdi->min_ratio;
190 unsigned long long max = wb->bdi->max_ratio;
191
192 /*
193 * @wb may already be clean by the time control reaches here and
194 * the total may not include its bw.
195 */
196 if (this_bw < tot_bw) {
197 if (min) {
198 min *= this_bw;
6d9e8c65 199 min = div64_ul(min, tot_bw);
693108a8 200 }
ae82291e 201 if (max < 100 * BDI_RATIO_SCALE) {
693108a8 202 max *= this_bw;
6d9e8c65 203 max = div64_ul(max, tot_bw);
693108a8
TH
204 }
205 }
206
207 *minp = min;
208 *maxp = max;
209}
210
211#else /* CONFIG_CGROUP_WRITEBACK */
212
d60d1bdd
TH
213#define GDTC_INIT(__wb) .wb = (__wb), \
214 .wb_completions = &(__wb)->completions
9fc3a43e 215#define GDTC_INIT_NO_WB
c2aa723a
TH
216#define MDTC_INIT(__wb, __gdtc)
217
218static bool mdtc_valid(struct dirty_throttle_control *dtc)
219{
220 return false;
221}
e9f07dfd
TH
222
223static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
224{
225 return &global_wb_domain;
226}
227
9fc3a43e
TH
228static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
229{
230 return NULL;
231}
232
841710aa
TH
233static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
234{
235 return NULL;
236}
237
693108a8
TH
238static void wb_min_max_ratio(struct bdi_writeback *wb,
239 unsigned long *minp, unsigned long *maxp)
240{
241 *minp = wb->bdi->min_ratio;
242 *maxp = wb->bdi->max_ratio;
243}
244
245#endif /* CONFIG_CGROUP_WRITEBACK */
246
a756cf59
JW
247/*
248 * In a memory zone, there is a certain amount of pages we consider
249 * available for the page cache, which is essentially the number of
250 * free and reclaimable pages, minus some zone reserves to protect
251 * lowmem and the ability to uphold the zone's watermarks without
252 * requiring writeback.
253 *
254 * This number of dirtyable pages is the base value of which the
e0857cf5 255 * user-configurable dirty ratio is the effective number of pages that
a756cf59
JW
256 * are allowed to be actually dirtied. Per individual zone, or
257 * globally by using the sum of dirtyable pages over all zones.
258 *
259 * Because the user is allowed to specify the dirty limit globally as
260 * absolute number of bytes, calculating the per-zone dirty limit can
261 * require translating the configured limit into a percentage of
262 * global dirtyable memory first.
263 */
264
a804552b 265/**
281e3726
MG
266 * node_dirtyable_memory - number of dirtyable pages in a node
267 * @pgdat: the node
a804552b 268 *
a862f68a 269 * Return: the node's number of pages potentially available for dirty
281e3726 270 * page cache. This is the base value for the per-node dirty limits.
a804552b 271 */
281e3726 272static unsigned long node_dirtyable_memory(struct pglist_data *pgdat)
a804552b 273{
281e3726
MG
274 unsigned long nr_pages = 0;
275 int z;
276
277 for (z = 0; z < MAX_NR_ZONES; z++) {
278 struct zone *zone = pgdat->node_zones + z;
279
280 if (!populated_zone(zone))
281 continue;
282
283 nr_pages += zone_page_state(zone, NR_FREE_PAGES);
284 }
a804552b 285
a8d01437
JW
286 /*
287 * Pages reserved for the kernel should not be considered
288 * dirtyable, to prevent a situation where reclaim has to
289 * clean pages in order to balance the zones.
290 */
281e3726 291 nr_pages -= min(nr_pages, pgdat->totalreserve_pages);
a804552b 292
281e3726
MG
293 nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE);
294 nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE);
a804552b
JW
295
296 return nr_pages;
297}
298
1edf2234
JW
299static unsigned long highmem_dirtyable_memory(unsigned long total)
300{
301#ifdef CONFIG_HIGHMEM
302 int node;
bb4cc2be 303 unsigned long x = 0;
09b4ab3c 304 int i;
1edf2234
JW
305
306 for_each_node_state(node, N_HIGH_MEMORY) {
281e3726
MG
307 for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
308 struct zone *z;
9cb937e2 309 unsigned long nr_pages;
281e3726
MG
310
311 if (!is_highmem_idx(i))
312 continue;
313
314 z = &NODE_DATA(node)->node_zones[i];
9cb937e2
MK
315 if (!populated_zone(z))
316 continue;
1edf2234 317
9cb937e2 318 nr_pages = zone_page_state(z, NR_FREE_PAGES);
281e3726 319 /* watch for underflows */
9cb937e2 320 nr_pages -= min(nr_pages, high_wmark_pages(z));
bb4cc2be
MG
321 nr_pages += zone_page_state(z, NR_ZONE_INACTIVE_FILE);
322 nr_pages += zone_page_state(z, NR_ZONE_ACTIVE_FILE);
323 x += nr_pages;
09b4ab3c 324 }
1edf2234 325 }
281e3726 326
1edf2234
JW
327 /*
328 * Make sure that the number of highmem pages is never larger
329 * than the number of the total dirtyable memory. This can only
330 * occur in very strange VM situations but we want to make sure
331 * that this does not occur.
332 */
333 return min(x, total);
334#else
335 return 0;
336#endif
337}
338
339/**
ccafa287 340 * global_dirtyable_memory - number of globally dirtyable pages
1edf2234 341 *
a862f68a 342 * Return: the global number of pages potentially available for dirty
ccafa287 343 * page cache. This is the base value for the global dirty limits.
1edf2234 344 */
18cf8cf8 345static unsigned long global_dirtyable_memory(void)
1edf2234
JW
346{
347 unsigned long x;
348
c41f012a 349 x = global_zone_page_state(NR_FREE_PAGES);
a8d01437
JW
350 /*
351 * Pages reserved for the kernel should not be considered
352 * dirtyable, to prevent a situation where reclaim has to
353 * clean pages in order to balance the zones.
354 */
355 x -= min(x, totalreserve_pages);
1edf2234 356
599d0c95
MG
357 x += global_node_page_state(NR_INACTIVE_FILE);
358 x += global_node_page_state(NR_ACTIVE_FILE);
a804552b 359
1edf2234
JW
360 if (!vm_highmem_is_dirtyable)
361 x -= highmem_dirtyable_memory(x);
362
363 return x + 1; /* Ensure that we never return 0 */
364}
365
9fc3a43e
TH
366/**
367 * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain
368 * @dtc: dirty_throttle_control of interest
ccafa287 369 *
9fc3a43e
TH
370 * Calculate @dtc->thresh and ->bg_thresh considering
371 * vm_dirty_{bytes|ratio} and dirty_background_{bytes|ratio}. The caller
372 * must ensure that @dtc->avail is set before calling this function. The
a37b0715 373 * dirty limits will be lifted by 1/4 for real-time tasks.
ccafa287 374 */
9fc3a43e 375static void domain_dirty_limits(struct dirty_throttle_control *dtc)
ccafa287 376{
9fc3a43e
TH
377 const unsigned long available_memory = dtc->avail;
378 struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
379 unsigned long bytes = vm_dirty_bytes;
380 unsigned long bg_bytes = dirty_background_bytes;
62a584fe
TH
381 /* convert ratios to per-PAGE_SIZE for higher precision */
382 unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
383 unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
9fc3a43e
TH
384 unsigned long thresh;
385 unsigned long bg_thresh;
ccafa287
JW
386 struct task_struct *tsk;
387
9fc3a43e
TH
388 /* gdtc is !NULL iff @dtc is for memcg domain */
389 if (gdtc) {
390 unsigned long global_avail = gdtc->avail;
391
392 /*
393 * The byte settings can't be applied directly to memcg
394 * domains. Convert them to ratios by scaling against
62a584fe
TH
395 * globally available memory. As the ratios are in
396 * per-PAGE_SIZE, they can be obtained by dividing bytes by
397 * number of pages.
9fc3a43e
TH
398 */
399 if (bytes)
62a584fe
TH
400 ratio = min(DIV_ROUND_UP(bytes, global_avail),
401 PAGE_SIZE);
9fc3a43e 402 if (bg_bytes)
62a584fe
TH
403 bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
404 PAGE_SIZE);
9fc3a43e
TH
405 bytes = bg_bytes = 0;
406 }
407
408 if (bytes)
409 thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
ccafa287 410 else
62a584fe 411 thresh = (ratio * available_memory) / PAGE_SIZE;
ccafa287 412
9fc3a43e
TH
413 if (bg_bytes)
414 bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
ccafa287 415 else
62a584fe 416 bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
ccafa287 417
90daf306 418 if (bg_thresh >= thresh)
9fc3a43e 419 bg_thresh = thresh / 2;
ccafa287 420 tsk = current;
a37b0715 421 if (rt_task(tsk)) {
a53eaff8
N
422 bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
423 thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
ccafa287 424 }
9fc3a43e
TH
425 dtc->thresh = thresh;
426 dtc->bg_thresh = bg_thresh;
427
428 /* we should eventually report the domain in the TP */
429 if (!gdtc)
430 trace_global_dirty_state(bg_thresh, thresh);
431}
432
433/**
434 * global_dirty_limits - background-writeback and dirty-throttling thresholds
435 * @pbackground: out parameter for bg_thresh
436 * @pdirty: out parameter for thresh
437 *
438 * Calculate bg_thresh and thresh for global_wb_domain. See
439 * domain_dirty_limits() for details.
440 */
441void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
442{
443 struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
444
445 gdtc.avail = global_dirtyable_memory();
446 domain_dirty_limits(&gdtc);
447
448 *pbackground = gdtc.bg_thresh;
449 *pdirty = gdtc.thresh;
ccafa287
JW
450}
451
a756cf59 452/**
281e3726
MG
453 * node_dirty_limit - maximum number of dirty pages allowed in a node
454 * @pgdat: the node
a756cf59 455 *
a862f68a 456 * Return: the maximum number of dirty pages allowed in a node, based
281e3726 457 * on the node's dirtyable memory.
a756cf59 458 */
281e3726 459static unsigned long node_dirty_limit(struct pglist_data *pgdat)
a756cf59 460{
281e3726 461 unsigned long node_memory = node_dirtyable_memory(pgdat);
a756cf59
JW
462 struct task_struct *tsk = current;
463 unsigned long dirty;
464
465 if (vm_dirty_bytes)
466 dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
281e3726 467 node_memory / global_dirtyable_memory();
a756cf59 468 else
281e3726 469 dirty = vm_dirty_ratio * node_memory / 100;
a756cf59 470
a37b0715 471 if (rt_task(tsk))
a756cf59
JW
472 dirty += dirty / 4;
473
474 return dirty;
475}
476
477/**
281e3726
MG
478 * node_dirty_ok - tells whether a node is within its dirty limits
479 * @pgdat: the node to check
a756cf59 480 *
a862f68a 481 * Return: %true when the dirty pages in @pgdat are within the node's
a756cf59
JW
482 * dirty limit, %false if the limit is exceeded.
483 */
281e3726 484bool node_dirty_ok(struct pglist_data *pgdat)
a756cf59 485{
281e3726
MG
486 unsigned long limit = node_dirty_limit(pgdat);
487 unsigned long nr_pages = 0;
488
11fb9989 489 nr_pages += node_page_state(pgdat, NR_FILE_DIRTY);
11fb9989 490 nr_pages += node_page_state(pgdat, NR_WRITEBACK);
a756cf59 491
281e3726 492 return nr_pages <= limit;
a756cf59
JW
493}
494
aa779e51 495#ifdef CONFIG_SYSCTL
496static int dirty_background_ratio_handler(struct ctl_table *table, int write,
32927393 497 void *buffer, size_t *lenp, loff_t *ppos)
2da02997
DR
498{
499 int ret;
500
8d65af78 501 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2da02997
DR
502 if (ret == 0 && write)
503 dirty_background_bytes = 0;
504 return ret;
505}
506
aa779e51 507static int dirty_background_bytes_handler(struct ctl_table *table, int write,
32927393 508 void *buffer, size_t *lenp, loff_t *ppos)
2da02997
DR
509{
510 int ret;
511
8d65af78 512 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
2da02997
DR
513 if (ret == 0 && write)
514 dirty_background_ratio = 0;
515 return ret;
516}
517
aa779e51 518static int dirty_ratio_handler(struct ctl_table *table, int write, void *buffer,
32927393 519 size_t *lenp, loff_t *ppos)
04fbfdc1
PZ
520{
521 int old_ratio = vm_dirty_ratio;
2da02997
DR
522 int ret;
523
8d65af78 524 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
04fbfdc1 525 if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
eb608e3a 526 writeback_set_ratelimit();
2da02997
DR
527 vm_dirty_bytes = 0;
528 }
529 return ret;
530}
531
aa779e51 532static int dirty_bytes_handler(struct ctl_table *table, int write,
32927393 533 void *buffer, size_t *lenp, loff_t *ppos)
2da02997 534{
fc3501d4 535 unsigned long old_bytes = vm_dirty_bytes;
2da02997
DR
536 int ret;
537
8d65af78 538 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
2da02997 539 if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
eb608e3a 540 writeback_set_ratelimit();
2da02997 541 vm_dirty_ratio = 0;
04fbfdc1
PZ
542 }
543 return ret;
544}
aa779e51 545#endif
04fbfdc1 546
eb608e3a
JK
547static unsigned long wp_next_time(unsigned long cur_time)
548{
549 cur_time += VM_COMPLETIONS_PERIOD_LEN;
550 /* 0 has a special meaning... */
551 if (!cur_time)
552 return 1;
553 return cur_time;
554}
555
cc24df4c 556static void wb_domain_writeout_add(struct wb_domain *dom,
c7981433 557 struct fprop_local_percpu *completions,
cc24df4c 558 unsigned int max_prop_frac, long nr)
04fbfdc1 559{
be5f1797 560 __fprop_add_percpu_max(&dom->completions, completions,
cc24df4c 561 max_prop_frac, nr);
eb608e3a 562 /* First event after period switching was turned off? */
517663ed 563 if (unlikely(!dom->period_time)) {
eb608e3a
JK
564 /*
565 * We can race with other __bdi_writeout_inc calls here but
566 * it does not cause any harm since the resulting time when
567 * timer will fire and what is in writeout_period_time will be
568 * roughly the same.
569 */
380c27ca
TH
570 dom->period_time = wp_next_time(jiffies);
571 mod_timer(&dom->period_timer, dom->period_time);
eb608e3a 572 }
04fbfdc1
PZ
573}
574
c7981433
TH
575/*
576 * Increment @wb's writeout completion count and the global writeout
269ccca3 577 * completion count. Called from __folio_end_writeback().
c7981433 578 */
cc24df4c 579static inline void __wb_writeout_add(struct bdi_writeback *wb, long nr)
dd5656e5 580{
841710aa 581 struct wb_domain *cgdom;
dd5656e5 582
cc24df4c
MWO
583 wb_stat_mod(wb, WB_WRITTEN, nr);
584 wb_domain_writeout_add(&global_wb_domain, &wb->completions,
585 wb->bdi->max_prop_frac, nr);
841710aa
TH
586
587 cgdom = mem_cgroup_wb_domain(wb);
588 if (cgdom)
cc24df4c
MWO
589 wb_domain_writeout_add(cgdom, wb_memcg_completions(wb),
590 wb->bdi->max_prop_frac, nr);
dd5656e5 591}
dd5656e5 592
93f78d88 593void wb_writeout_inc(struct bdi_writeback *wb)
04fbfdc1 594{
dd5656e5
MS
595 unsigned long flags;
596
597 local_irq_save(flags);
cc24df4c 598 __wb_writeout_add(wb, 1);
dd5656e5 599 local_irq_restore(flags);
04fbfdc1 600}
93f78d88 601EXPORT_SYMBOL_GPL(wb_writeout_inc);
04fbfdc1 602
eb608e3a
JK
603/*
604 * On idle system, we can be called long after we scheduled because we use
605 * deferred timers so count with missed periods.
606 */
9823e51b 607static void writeout_period(struct timer_list *t)
eb608e3a 608{
9823e51b 609 struct wb_domain *dom = from_timer(dom, t, period_timer);
380c27ca 610 int miss_periods = (jiffies - dom->period_time) /
eb608e3a
JK
611 VM_COMPLETIONS_PERIOD_LEN;
612
380c27ca
TH
613 if (fprop_new_period(&dom->completions, miss_periods + 1)) {
614 dom->period_time = wp_next_time(dom->period_time +
eb608e3a 615 miss_periods * VM_COMPLETIONS_PERIOD_LEN);
380c27ca 616 mod_timer(&dom->period_timer, dom->period_time);
eb608e3a
JK
617 } else {
618 /*
619 * Aging has zeroed all fractions. Stop wasting CPU on period
620 * updates.
621 */
380c27ca 622 dom->period_time = 0;
eb608e3a
JK
623 }
624}
625
380c27ca
TH
626int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
627{
628 memset(dom, 0, sizeof(*dom));
dcc25ae7
TH
629
630 spin_lock_init(&dom->lock);
631
9823e51b 632 timer_setup(&dom->period_timer, writeout_period, TIMER_DEFERRABLE);
dcc25ae7
TH
633
634 dom->dirty_limit_tstamp = jiffies;
635
380c27ca
TH
636 return fprop_global_init(&dom->completions, gfp);
637}
638
841710aa
TH
639#ifdef CONFIG_CGROUP_WRITEBACK
640void wb_domain_exit(struct wb_domain *dom)
641{
642 del_timer_sync(&dom->period_timer);
643 fprop_global_destroy(&dom->completions);
644}
645#endif
646
189d3c4a 647/*
d08c429b
JW
648 * bdi_min_ratio keeps the sum of the minimum dirty shares of all
649 * registered backing devices, which, for obvious reasons, can not
650 * exceed 100%.
189d3c4a 651 */
189d3c4a
PZ
652static unsigned int bdi_min_ratio;
653
1bf27e98
SR
654static int bdi_check_pages_limit(unsigned long pages)
655{
656 unsigned long max_dirty_pages = global_dirtyable_memory();
657
658 if (pages > max_dirty_pages)
659 return -EINVAL;
660
661 return 0;
662}
663
664static unsigned long bdi_ratio_from_pages(unsigned long pages)
665{
666 unsigned long background_thresh;
667 unsigned long dirty_thresh;
668 unsigned long ratio;
669
670 global_dirty_limits(&background_thresh, &dirty_thresh);
671 ratio = div64_u64(pages * 100ULL * BDI_RATIO_SCALE, dirty_thresh);
672
673 return ratio;
674}
675
00df7d51
SR
676static u64 bdi_get_bytes(unsigned int ratio)
677{
678 unsigned long background_thresh;
679 unsigned long dirty_thresh;
680 u64 bytes;
681
682 global_dirty_limits(&background_thresh, &dirty_thresh);
683 bytes = (dirty_thresh * PAGE_SIZE * ratio) / BDI_RATIO_SCALE / 100;
684
685 return bytes;
686}
687
8021fb32 688static int __bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
189d3c4a 689{
21f0dd88 690 unsigned int delta;
189d3c4a 691 int ret = 0;
189d3c4a 692
2c44af4f
SR
693 if (min_ratio > 100 * BDI_RATIO_SCALE)
694 return -EINVAL;
ae82291e 695
cfc4ba53 696 spin_lock_bh(&bdi_lock);
a42dde04 697 if (min_ratio > bdi->max_ratio) {
189d3c4a 698 ret = -EINVAL;
a42dde04 699 } else {
21f0dd88
CW
700 if (min_ratio < bdi->min_ratio) {
701 delta = bdi->min_ratio - min_ratio;
702 bdi_min_ratio -= delta;
703 bdi->min_ratio = min_ratio;
a42dde04 704 } else {
21f0dd88 705 delta = min_ratio - bdi->min_ratio;
ae82291e 706 if (bdi_min_ratio + delta < 100 * BDI_RATIO_SCALE) {
21f0dd88
CW
707 bdi_min_ratio += delta;
708 bdi->min_ratio = min_ratio;
709 } else {
710 ret = -EINVAL;
711 }
a42dde04
PZ
712 }
713 }
cfc4ba53 714 spin_unlock_bh(&bdi_lock);
a42dde04
PZ
715
716 return ret;
717}
718
efc3e6ad 719static int __bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio)
a42dde04 720{
a42dde04
PZ
721 int ret = 0;
722
4e230b40
SR
723 if (max_ratio > 100 * BDI_RATIO_SCALE)
724 return -EINVAL;
725
cfc4ba53 726 spin_lock_bh(&bdi_lock);
a42dde04
PZ
727 if (bdi->min_ratio > max_ratio) {
728 ret = -EINVAL;
729 } else {
730 bdi->max_ratio = max_ratio;
fa151a39
JX
731 bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) /
732 (100 * BDI_RATIO_SCALE);
a42dde04 733 }
cfc4ba53 734 spin_unlock_bh(&bdi_lock);
189d3c4a
PZ
735
736 return ret;
737}
efc3e6ad 738
2c44af4f
SR
739int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio)
740{
741 return __bdi_set_min_ratio(bdi, min_ratio);
742}
743
4e230b40
SR
744int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio)
745{
746 return __bdi_set_max_ratio(bdi, max_ratio);
747}
748
8021fb32
SR
749int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
750{
751 return __bdi_set_min_ratio(bdi, min_ratio * BDI_RATIO_SCALE);
752}
753
efc3e6ad
SR
754int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio)
755{
efc3e6ad
SR
756 return __bdi_set_max_ratio(bdi, max_ratio * BDI_RATIO_SCALE);
757}
a42dde04 758EXPORT_SYMBOL(bdi_set_max_ratio);
189d3c4a 759
712c00d6
SR
760u64 bdi_get_min_bytes(struct backing_dev_info *bdi)
761{
762 return bdi_get_bytes(bdi->min_ratio);
763}
764
803c9805
SR
765int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes)
766{
767 int ret;
768 unsigned long pages = min_bytes >> PAGE_SHIFT;
769 unsigned long min_ratio;
770
771 ret = bdi_check_pages_limit(pages);
772 if (ret)
773 return ret;
774
775 min_ratio = bdi_ratio_from_pages(pages);
776 return __bdi_set_min_ratio(bdi, min_ratio);
777}
778
00df7d51
SR
779u64 bdi_get_max_bytes(struct backing_dev_info *bdi)
780{
781 return bdi_get_bytes(bdi->max_ratio);
782}
783
1bf27e98
SR
784int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes)
785{
786 int ret;
787 unsigned long pages = max_bytes >> PAGE_SHIFT;
788 unsigned long max_ratio;
789
790 ret = bdi_check_pages_limit(pages);
791 if (ret)
792 return ret;
793
794 max_ratio = bdi_ratio_from_pages(pages);
795 return __bdi_set_max_ratio(bdi, max_ratio);
796}
797
8e9d5ead
SR
798int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit)
799{
800 if (strict_limit > 1)
801 return -EINVAL;
802
803 spin_lock_bh(&bdi_lock);
804 if (strict_limit)
805 bdi->capabilities |= BDI_CAP_STRICTLIMIT;
806 else
807 bdi->capabilities &= ~BDI_CAP_STRICTLIMIT;
808 spin_unlock_bh(&bdi_lock);
809
810 return 0;
811}
812
6c14ae1e
WF
813static unsigned long dirty_freerun_ceiling(unsigned long thresh,
814 unsigned long bg_thresh)
815{
816 return (thresh + bg_thresh) / 2;
817}
818
c7981433
TH
819static unsigned long hard_dirty_limit(struct wb_domain *dom,
820 unsigned long thresh)
ffd1f609 821{
dcc25ae7 822 return max(thresh, dom->dirty_limit);
ffd1f609
WF
823}
824
c5edf9cd
TH
825/*
826 * Memory which can be further allocated to a memcg domain is capped by
827 * system-wide clean memory excluding the amount being used in the domain.
828 */
829static void mdtc_calc_avail(struct dirty_throttle_control *mdtc,
830 unsigned long filepages, unsigned long headroom)
c2aa723a
TH
831{
832 struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc);
c5edf9cd
TH
833 unsigned long clean = filepages - min(filepages, mdtc->dirty);
834 unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty);
835 unsigned long other_clean = global_clean - min(global_clean, clean);
c2aa723a 836
c5edf9cd 837 mdtc->avail = filepages + min(headroom, other_clean);
ffd1f609
WF
838}
839
6f718656 840/**
b1cbc6d4
TH
841 * __wb_calc_thresh - @wb's share of dirty throttling threshold
842 * @dtc: dirty_throttle_context of interest
1babe183 843 *
aed21ad2
WF
844 * Note that balance_dirty_pages() will only seriously take it as a hard limit
845 * when sleeping max_pause per page is not enough to keep the dirty pages under
846 * control. For example, when the device is completely stalled due to some error
847 * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
848 * In the other normal situations, it acts more gently by throttling the tasks
a88a341a 849 * more (rather than completely block them) when the wb dirty pages go high.
1babe183 850 *
6f718656 851 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
1babe183
WF
852 * - starving fast devices
853 * - piling up dirty pages (that will take long time to sync) on slow devices
854 *
a88a341a 855 * The wb's share of dirty limit will be adapting to its throughput and
1babe183 856 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
a862f68a
MR
857 *
858 * Return: @wb's dirty limit in pages. The term "dirty" in the context of
8d92890b 859 * dirty balancing includes all PG_dirty and PG_writeback pages.
1babe183 860 */
b1cbc6d4 861static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
16c4042f 862{
e9f07dfd 863 struct wb_domain *dom = dtc_dom(dtc);
b1cbc6d4 864 unsigned long thresh = dtc->thresh;
0d960a38 865 u64 wb_thresh;
d3ac946e 866 unsigned long numerator, denominator;
693108a8 867 unsigned long wb_min_ratio, wb_max_ratio;
04fbfdc1 868
16c4042f 869 /*
0d960a38 870 * Calculate this BDI's share of the thresh ratio.
16c4042f 871 */
e9770b34 872 fprop_fraction_percpu(&dom->completions, dtc->wb_completions,
380c27ca 873 &numerator, &denominator);
04fbfdc1 874
ae82291e 875 wb_thresh = (thresh * (100 * BDI_RATIO_SCALE - bdi_min_ratio)) / (100 * BDI_RATIO_SCALE);
0d960a38 876 wb_thresh *= numerator;
d3ac946e 877 wb_thresh = div64_ul(wb_thresh, denominator);
04fbfdc1 878
b1cbc6d4 879 wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio);
04fbfdc1 880
ae82291e
SR
881 wb_thresh += (thresh * wb_min_ratio) / (100 * BDI_RATIO_SCALE);
882 if (wb_thresh > (thresh * wb_max_ratio) / (100 * BDI_RATIO_SCALE))
883 wb_thresh = thresh * wb_max_ratio / (100 * BDI_RATIO_SCALE);
16c4042f 884
0d960a38 885 return wb_thresh;
1da177e4
LT
886}
887
b1cbc6d4
TH
888unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
889{
890 struct dirty_throttle_control gdtc = { GDTC_INIT(wb),
891 .thresh = thresh };
892 return __wb_calc_thresh(&gdtc);
1da177e4
LT
893}
894
4b5bbc39
KS
895unsigned long cgwb_calc_thresh(struct bdi_writeback *wb)
896{
897 struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
898 struct dirty_throttle_control mdtc = { MDTC_INIT(wb, &gdtc) };
899 unsigned long filepages = 0, headroom = 0, writeback = 0;
900
901 gdtc.avail = global_dirtyable_memory();
902 gdtc.dirty = global_node_page_state(NR_FILE_DIRTY) +
903 global_node_page_state(NR_WRITEBACK);
904
905 mem_cgroup_wb_stats(wb, &filepages, &headroom,
906 &mdtc.dirty, &writeback);
907 mdtc.dirty += writeback;
908 mdtc_calc_avail(&mdtc, filepages, headroom);
909 domain_dirty_limits(&mdtc);
910
911 return __wb_calc_thresh(&mdtc);
912}
913
5a537485
MP
914/*
915 * setpoint - dirty 3
916 * f(dirty) := 1.0 + (----------------)
917 * limit - setpoint
918 *
919 * it's a 3rd order polynomial that subjects to
920 *
921 * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast
922 * (2) f(setpoint) = 1.0 => the balance point
923 * (3) f(limit) = 0 => the hard limit
924 * (4) df/dx <= 0 => negative feedback control
925 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
926 * => fast response on large errors; small oscillation near setpoint
927 */
d5c9fde3 928static long long pos_ratio_polynom(unsigned long setpoint,
5a537485
MP
929 unsigned long dirty,
930 unsigned long limit)
931{
932 long long pos_ratio;
933 long x;
934
d5c9fde3 935 x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
464d1387 936 (limit - setpoint) | 1);
5a537485
MP
937 pos_ratio = x;
938 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
939 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
940 pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
941
942 return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
943}
944
6c14ae1e
WF
945/*
946 * Dirty position control.
947 *
948 * (o) global/bdi setpoints
949 *
de1fff37 950 * We want the dirty pages be balanced around the global/wb setpoints.
6c14ae1e
WF
951 * When the number of dirty pages is higher/lower than the setpoint, the
952 * dirty position control ratio (and hence task dirty ratelimit) will be
953 * decreased/increased to bring the dirty pages back to the setpoint.
954 *
955 * pos_ratio = 1 << RATELIMIT_CALC_SHIFT
956 *
957 * if (dirty < setpoint) scale up pos_ratio
958 * if (dirty > setpoint) scale down pos_ratio
959 *
de1fff37
TH
960 * if (wb_dirty < wb_setpoint) scale up pos_ratio
961 * if (wb_dirty > wb_setpoint) scale down pos_ratio
6c14ae1e
WF
962 *
963 * task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
964 *
965 * (o) global control line
966 *
967 * ^ pos_ratio
968 * |
969 * | |<===== global dirty control scope ======>|
03231554 970 * 2.0 * * * * * * *
6c14ae1e
WF
971 * | .*
972 * | . *
973 * | . *
974 * | . *
975 * | . *
976 * | . *
977 * 1.0 ................................*
978 * | . . *
979 * | . . *
980 * | . . *
981 * | . . *
982 * | . . *
983 * 0 +------------.------------------.----------------------*------------->
984 * freerun^ setpoint^ limit^ dirty pages
985 *
de1fff37 986 * (o) wb control line
6c14ae1e
WF
987 *
988 * ^ pos_ratio
989 * |
990 * | *
991 * | *
992 * | *
993 * | *
994 * | * |<=========== span ============>|
995 * 1.0 .......................*
996 * | . *
997 * | . *
998 * | . *
999 * | . *
1000 * | . *
1001 * | . *
1002 * | . *
1003 * | . *
1004 * | . *
1005 * | . *
1006 * | . *
1007 * 1/4 ...............................................* * * * * * * * * * * *
1008 * | . .
1009 * | . .
1010 * | . .
1011 * 0 +----------------------.-------------------------------.------------->
de1fff37 1012 * wb_setpoint^ x_intercept^
6c14ae1e 1013 *
de1fff37 1014 * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
6c14ae1e
WF
1015 * be smoothly throttled down to normal if it starts high in situations like
1016 * - start writing to a slow SD card and a fast disk at the same time. The SD
de1fff37
TH
1017 * card's wb_dirty may rush to many times higher than wb_setpoint.
1018 * - the wb dirty thresh drops quickly due to change of JBOD workload
6c14ae1e 1019 */
daddfa3c 1020static void wb_position_ratio(struct dirty_throttle_control *dtc)
6c14ae1e 1021{
2bc00aef 1022 struct bdi_writeback *wb = dtc->wb;
20792ebf 1023 unsigned long write_bw = READ_ONCE(wb->avg_write_bandwidth);
2bc00aef 1024 unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
c7981433 1025 unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
2bc00aef 1026 unsigned long wb_thresh = dtc->wb_thresh;
6c14ae1e
WF
1027 unsigned long x_intercept;
1028 unsigned long setpoint; /* dirty pages' target balance point */
de1fff37 1029 unsigned long wb_setpoint;
6c14ae1e
WF
1030 unsigned long span;
1031 long long pos_ratio; /* for scaling up/down the rate limit */
1032 long x;
1033
daddfa3c
TH
1034 dtc->pos_ratio = 0;
1035
2bc00aef 1036 if (unlikely(dtc->dirty >= limit))
daddfa3c 1037 return;
6c14ae1e
WF
1038
1039 /*
1040 * global setpoint
1041 *
5a537485
MP
1042 * See comment for pos_ratio_polynom().
1043 */
1044 setpoint = (freerun + limit) / 2;
2bc00aef 1045 pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit);
5a537485
MP
1046
1047 /*
1048 * The strictlimit feature is a tool preventing mistrusted filesystems
1049 * from growing a large number of dirty pages before throttling. For
de1fff37
TH
1050 * such filesystems balance_dirty_pages always checks wb counters
1051 * against wb limits. Even if global "nr_dirty" is under "freerun".
5a537485
MP
1052 * This is especially important for fuse which sets bdi->max_ratio to
1053 * 1% by default. Without strictlimit feature, fuse writeback may
1054 * consume arbitrary amount of RAM because it is accounted in
1055 * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty".
6c14ae1e 1056 *
a88a341a 1057 * Here, in wb_position_ratio(), we calculate pos_ratio based on
de1fff37 1058 * two values: wb_dirty and wb_thresh. Let's consider an example:
5a537485
MP
1059 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
1060 * limits are set by default to 10% and 20% (background and throttle).
de1fff37 1061 * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
0d960a38 1062 * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is
de1fff37 1063 * about ~6K pages (as the average of background and throttle wb
5a537485 1064 * limits). The 3rd order polynomial will provide positive feedback if
de1fff37 1065 * wb_dirty is under wb_setpoint and vice versa.
6c14ae1e 1066 *
5a537485 1067 * Note, that we cannot use global counters in these calculations
de1fff37 1068 * because we want to throttle process writing to a strictlimit wb
5a537485
MP
1069 * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
1070 * in the example above).
6c14ae1e 1071 */
a88a341a 1072 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
de1fff37 1073 long long wb_pos_ratio;
5a537485 1074
daddfa3c
TH
1075 if (dtc->wb_dirty < 8) {
1076 dtc->pos_ratio = min_t(long long, pos_ratio * 2,
1077 2 << RATELIMIT_CALC_SHIFT);
1078 return;
1079 }
5a537485 1080
2bc00aef 1081 if (dtc->wb_dirty >= wb_thresh)
daddfa3c 1082 return;
5a537485 1083
970fb01a
TH
1084 wb_setpoint = dirty_freerun_ceiling(wb_thresh,
1085 dtc->wb_bg_thresh);
5a537485 1086
de1fff37 1087 if (wb_setpoint == 0 || wb_setpoint == wb_thresh)
daddfa3c 1088 return;
5a537485 1089
2bc00aef 1090 wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty,
de1fff37 1091 wb_thresh);
5a537485
MP
1092
1093 /*
de1fff37
TH
1094 * Typically, for strictlimit case, wb_setpoint << setpoint
1095 * and pos_ratio >> wb_pos_ratio. In the other words global
5a537485 1096 * state ("dirty") is not limiting factor and we have to
de1fff37 1097 * make decision based on wb counters. But there is an
5a537485
MP
1098 * important case when global pos_ratio should get precedence:
1099 * global limits are exceeded (e.g. due to activities on other
de1fff37 1100 * wb's) while given strictlimit wb is below limit.
5a537485 1101 *
de1fff37 1102 * "pos_ratio * wb_pos_ratio" would work for the case above,
5a537485 1103 * but it would look too non-natural for the case of all
de1fff37 1104 * activity in the system coming from a single strictlimit wb
5a537485
MP
1105 * with bdi->max_ratio == 100%.
1106 *
1107 * Note that min() below somewhat changes the dynamics of the
1108 * control system. Normally, pos_ratio value can be well over 3
de1fff37 1109 * (when globally we are at freerun and wb is well below wb
5a537485
MP
1110 * setpoint). Now the maximum pos_ratio in the same situation
1111 * is 2. We might want to tweak this if we observe the control
1112 * system is too slow to adapt.
1113 */
daddfa3c
TH
1114 dtc->pos_ratio = min(pos_ratio, wb_pos_ratio);
1115 return;
5a537485 1116 }
6c14ae1e
WF
1117
1118 /*
1119 * We have computed basic pos_ratio above based on global situation. If
de1fff37 1120 * the wb is over/under its share of dirty pages, we want to scale
6c14ae1e
WF
1121 * pos_ratio further down/up. That is done by the following mechanism.
1122 */
1123
1124 /*
de1fff37 1125 * wb setpoint
6c14ae1e 1126 *
de1fff37 1127 * f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint)
6c14ae1e 1128 *
de1fff37 1129 * x_intercept - wb_dirty
6c14ae1e 1130 * := --------------------------
de1fff37 1131 * x_intercept - wb_setpoint
6c14ae1e 1132 *
de1fff37 1133 * The main wb control line is a linear function that subjects to
6c14ae1e 1134 *
de1fff37
TH
1135 * (1) f(wb_setpoint) = 1.0
1136 * (2) k = - 1 / (8 * write_bw) (in single wb case)
1137 * or equally: x_intercept = wb_setpoint + 8 * write_bw
6c14ae1e 1138 *
de1fff37 1139 * For single wb case, the dirty pages are observed to fluctuate
6c14ae1e 1140 * regularly within range
de1fff37 1141 * [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2]
6c14ae1e
WF
1142 * for various filesystems, where (2) can yield in a reasonable 12.5%
1143 * fluctuation range for pos_ratio.
1144 *
de1fff37 1145 * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its
6c14ae1e 1146 * own size, so move the slope over accordingly and choose a slope that
de1fff37 1147 * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh.
6c14ae1e 1148 */
2bc00aef
TH
1149 if (unlikely(wb_thresh > dtc->thresh))
1150 wb_thresh = dtc->thresh;
aed21ad2 1151 /*
de1fff37 1152 * It's very possible that wb_thresh is close to 0 not because the
aed21ad2
WF
1153 * device is slow, but that it has remained inactive for long time.
1154 * Honour such devices a reasonable good (hopefully IO efficient)
1155 * threshold, so that the occasional writes won't be blocked and active
1156 * writes can rampup the threshold quickly.
1157 */
2bc00aef 1158 wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8);
6c14ae1e 1159 /*
de1fff37
TH
1160 * scale global setpoint to wb's:
1161 * wb_setpoint = setpoint * wb_thresh / thresh
6c14ae1e 1162 */
e4bc13ad 1163 x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1);
de1fff37 1164 wb_setpoint = setpoint * (u64)x >> 16;
6c14ae1e 1165 /*
de1fff37
TH
1166 * Use span=(8*write_bw) in single wb case as indicated by
1167 * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case.
6c14ae1e 1168 *
de1fff37
TH
1169 * wb_thresh thresh - wb_thresh
1170 * span = --------- * (8 * write_bw) + ------------------ * wb_thresh
1171 * thresh thresh
6c14ae1e 1172 */
2bc00aef 1173 span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16;
de1fff37 1174 x_intercept = wb_setpoint + span;
6c14ae1e 1175
2bc00aef
TH
1176 if (dtc->wb_dirty < x_intercept - span / 4) {
1177 pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty),
e4bc13ad 1178 (x_intercept - wb_setpoint) | 1);
6c14ae1e
WF
1179 } else
1180 pos_ratio /= 4;
1181
8927f66c 1182 /*
de1fff37 1183 * wb reserve area, safeguard against dirty pool underrun and disk idle
8927f66c
WF
1184 * It may push the desired control point of global dirty pages higher
1185 * than setpoint.
1186 */
de1fff37 1187 x_intercept = wb_thresh / 2;
2bc00aef
TH
1188 if (dtc->wb_dirty < x_intercept) {
1189 if (dtc->wb_dirty > x_intercept / 8)
1190 pos_ratio = div_u64(pos_ratio * x_intercept,
1191 dtc->wb_dirty);
50657fc4 1192 else
8927f66c
WF
1193 pos_ratio *= 8;
1194 }
1195
daddfa3c 1196 dtc->pos_ratio = pos_ratio;
6c14ae1e
WF
1197}
1198
a88a341a
TH
1199static void wb_update_write_bandwidth(struct bdi_writeback *wb,
1200 unsigned long elapsed,
1201 unsigned long written)
e98be2d5
WF
1202{
1203 const unsigned long period = roundup_pow_of_two(3 * HZ);
a88a341a
TH
1204 unsigned long avg = wb->avg_write_bandwidth;
1205 unsigned long old = wb->write_bandwidth;
e98be2d5
WF
1206 u64 bw;
1207
1208 /*
1209 * bw = written * HZ / elapsed
1210 *
1211 * bw * elapsed + write_bandwidth * (period - elapsed)
1212 * write_bandwidth = ---------------------------------------------------
1213 * period
c72efb65 1214 *
ed2da924 1215 * @written may have decreased due to folio_redirty_for_writepage().
c72efb65 1216 * Avoid underflowing @bw calculation.
e98be2d5 1217 */
a88a341a 1218 bw = written - min(written, wb->written_stamp);
e98be2d5
WF
1219 bw *= HZ;
1220 if (unlikely(elapsed > period)) {
0a5d1a7f 1221 bw = div64_ul(bw, elapsed);
e98be2d5
WF
1222 avg = bw;
1223 goto out;
1224 }
a88a341a 1225 bw += (u64)wb->write_bandwidth * (period - elapsed);
e98be2d5
WF
1226 bw >>= ilog2(period);
1227
1228 /*
1229 * one more level of smoothing, for filtering out sudden spikes
1230 */
1231 if (avg > old && old >= (unsigned long)bw)
1232 avg -= (avg - old) >> 3;
1233
1234 if (avg < old && old <= (unsigned long)bw)
1235 avg += (old - avg) >> 3;
1236
1237out:
95a46c65
TH
1238 /* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */
1239 avg = max(avg, 1LU);
1240 if (wb_has_dirty_io(wb)) {
1241 long delta = avg - wb->avg_write_bandwidth;
1242 WARN_ON_ONCE(atomic_long_add_return(delta,
1243 &wb->bdi->tot_write_bandwidth) <= 0);
1244 }
a88a341a 1245 wb->write_bandwidth = bw;
20792ebf 1246 WRITE_ONCE(wb->avg_write_bandwidth, avg);
e98be2d5
WF
1247}
1248
2bc00aef 1249static void update_dirty_limit(struct dirty_throttle_control *dtc)
c42843f2 1250{
e9f07dfd 1251 struct wb_domain *dom = dtc_dom(dtc);
2bc00aef 1252 unsigned long thresh = dtc->thresh;
dcc25ae7 1253 unsigned long limit = dom->dirty_limit;
c42843f2
WF
1254
1255 /*
1256 * Follow up in one step.
1257 */
1258 if (limit < thresh) {
1259 limit = thresh;
1260 goto update;
1261 }
1262
1263 /*
1264 * Follow down slowly. Use the higher one as the target, because thresh
1265 * may drop below dirty. This is exactly the reason to introduce
dcc25ae7 1266 * dom->dirty_limit which is guaranteed to lie above the dirty pages.
c42843f2 1267 */
2bc00aef 1268 thresh = max(thresh, dtc->dirty);
c42843f2
WF
1269 if (limit > thresh) {
1270 limit -= (limit - thresh) >> 5;
1271 goto update;
1272 }
1273 return;
1274update:
dcc25ae7 1275 dom->dirty_limit = limit;
c42843f2
WF
1276}
1277
42dd235c
JK
1278static void domain_update_dirty_limit(struct dirty_throttle_control *dtc,
1279 unsigned long now)
c42843f2 1280{
e9f07dfd 1281 struct wb_domain *dom = dtc_dom(dtc);
c42843f2
WF
1282
1283 /*
1284 * check locklessly first to optimize away locking for the most time
1285 */
dcc25ae7 1286 if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
c42843f2
WF
1287 return;
1288
dcc25ae7
TH
1289 spin_lock(&dom->lock);
1290 if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
2bc00aef 1291 update_dirty_limit(dtc);
dcc25ae7 1292 dom->dirty_limit_tstamp = now;
c42843f2 1293 }
dcc25ae7 1294 spin_unlock(&dom->lock);
c42843f2
WF
1295}
1296
be3ffa27 1297/*
de1fff37 1298 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
be3ffa27 1299 *
de1fff37 1300 * Normal wb tasks will be curbed at or below it in long term.
be3ffa27
WF
1301 * Obviously it should be around (write_bw / N) when there are N dd tasks.
1302 */
2bc00aef 1303static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
a88a341a
TH
1304 unsigned long dirtied,
1305 unsigned long elapsed)
be3ffa27 1306{
2bc00aef
TH
1307 struct bdi_writeback *wb = dtc->wb;
1308 unsigned long dirty = dtc->dirty;
1309 unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
c7981433 1310 unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
7381131c 1311 unsigned long setpoint = (freerun + limit) / 2;
a88a341a
TH
1312 unsigned long write_bw = wb->avg_write_bandwidth;
1313 unsigned long dirty_ratelimit = wb->dirty_ratelimit;
be3ffa27
WF
1314 unsigned long dirty_rate;
1315 unsigned long task_ratelimit;
1316 unsigned long balanced_dirty_ratelimit;
7381131c
WF
1317 unsigned long step;
1318 unsigned long x;
d59b1087 1319 unsigned long shift;
be3ffa27
WF
1320
1321 /*
1322 * The dirty rate will match the writeout rate in long term, except
1323 * when dirty pages are truncated by userspace or re-dirtied by FS.
1324 */
a88a341a 1325 dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed;
be3ffa27 1326
be3ffa27
WF
1327 /*
1328 * task_ratelimit reflects each dd's dirty rate for the past 200ms.
1329 */
1330 task_ratelimit = (u64)dirty_ratelimit *
daddfa3c 1331 dtc->pos_ratio >> RATELIMIT_CALC_SHIFT;
be3ffa27
WF
1332 task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
1333
1334 /*
1335 * A linear estimation of the "balanced" throttle rate. The theory is,
de1fff37 1336 * if there are N dd tasks, each throttled at task_ratelimit, the wb's
be3ffa27
WF
1337 * dirty_rate will be measured to be (N * task_ratelimit). So the below
1338 * formula will yield the balanced rate limit (write_bw / N).
1339 *
1340 * Note that the expanded form is not a pure rate feedback:
1341 * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) (1)
1342 * but also takes pos_ratio into account:
1343 * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio (2)
1344 *
1345 * (1) is not realistic because pos_ratio also takes part in balancing
1346 * the dirty rate. Consider the state
1347 * pos_ratio = 0.5 (3)
1348 * rate = 2 * (write_bw / N) (4)
1349 * If (1) is used, it will stuck in that state! Because each dd will
1350 * be throttled at
1351 * task_ratelimit = pos_ratio * rate = (write_bw / N) (5)
1352 * yielding
1353 * dirty_rate = N * task_ratelimit = write_bw (6)
1354 * put (6) into (1) we get
1355 * rate_(i+1) = rate_(i) (7)
1356 *
1357 * So we end up using (2) to always keep
1358 * rate_(i+1) ~= (write_bw / N) (8)
1359 * regardless of the value of pos_ratio. As long as (8) is satisfied,
1360 * pos_ratio is able to drive itself to 1.0, which is not only where
1361 * the dirty count meet the setpoint, but also where the slope of
1362 * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
1363 */
1364 balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
1365 dirty_rate | 1);
bdaac490
WF
1366 /*
1367 * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
1368 */
1369 if (unlikely(balanced_dirty_ratelimit > write_bw))
1370 balanced_dirty_ratelimit = write_bw;
be3ffa27 1371
7381131c
WF
1372 /*
1373 * We could safely do this and return immediately:
1374 *
de1fff37 1375 * wb->dirty_ratelimit = balanced_dirty_ratelimit;
7381131c
WF
1376 *
1377 * However to get a more stable dirty_ratelimit, the below elaborated
331cbdee 1378 * code makes use of task_ratelimit to filter out singular points and
7381131c
WF
1379 * limit the step size.
1380 *
1381 * The below code essentially only uses the relative value of
1382 *
1383 * task_ratelimit - dirty_ratelimit
1384 * = (pos_ratio - 1) * dirty_ratelimit
1385 *
1386 * which reflects the direction and size of dirty position error.
1387 */
1388
1389 /*
1390 * dirty_ratelimit will follow balanced_dirty_ratelimit iff
1391 * task_ratelimit is on the same side of dirty_ratelimit, too.
1392 * For example, when
1393 * - dirty_ratelimit > balanced_dirty_ratelimit
1394 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
1395 * lowering dirty_ratelimit will help meet both the position and rate
1396 * control targets. Otherwise, don't update dirty_ratelimit if it will
1397 * only help meet the rate target. After all, what the users ultimately
1398 * feel and care are stable dirty rate and small position error.
1399 *
1400 * |task_ratelimit - dirty_ratelimit| is used to limit the step size
331cbdee 1401 * and filter out the singular points of balanced_dirty_ratelimit. Which
7381131c
WF
1402 * keeps jumping around randomly and can even leap far away at times
1403 * due to the small 200ms estimation period of dirty_rate (we want to
1404 * keep that period small to reduce time lags).
1405 */
1406 step = 0;
5a537485
MP
1407
1408 /*
de1fff37 1409 * For strictlimit case, calculations above were based on wb counters
a88a341a 1410 * and limits (starting from pos_ratio = wb_position_ratio() and up to
5a537485 1411 * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
de1fff37
TH
1412 * Hence, to calculate "step" properly, we have to use wb_dirty as
1413 * "dirty" and wb_setpoint as "setpoint".
5a537485 1414 *
de1fff37
TH
1415 * We rampup dirty_ratelimit forcibly if wb_dirty is low because
1416 * it's possible that wb_thresh is close to zero due to inactivity
970fb01a 1417 * of backing device.
5a537485 1418 */
a88a341a 1419 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
2bc00aef
TH
1420 dirty = dtc->wb_dirty;
1421 if (dtc->wb_dirty < 8)
1422 setpoint = dtc->wb_dirty + 1;
5a537485 1423 else
970fb01a 1424 setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
5a537485
MP
1425 }
1426
7381131c 1427 if (dirty < setpoint) {
a88a341a 1428 x = min3(wb->balanced_dirty_ratelimit,
7c809968 1429 balanced_dirty_ratelimit, task_ratelimit);
7381131c
WF
1430 if (dirty_ratelimit < x)
1431 step = x - dirty_ratelimit;
1432 } else {
a88a341a 1433 x = max3(wb->balanced_dirty_ratelimit,
7c809968 1434 balanced_dirty_ratelimit, task_ratelimit);
7381131c
WF
1435 if (dirty_ratelimit > x)
1436 step = dirty_ratelimit - x;
1437 }
1438
1439 /*
1440 * Don't pursue 100% rate matching. It's impossible since the balanced
1441 * rate itself is constantly fluctuating. So decrease the track speed
1442 * when it gets close to the target. Helps eliminate pointless tremors.
1443 */
d59b1087
AR
1444 shift = dirty_ratelimit / (2 * step + 1);
1445 if (shift < BITS_PER_LONG)
1446 step = DIV_ROUND_UP(step >> shift, 8);
1447 else
1448 step = 0;
7381131c
WF
1449
1450 if (dirty_ratelimit < balanced_dirty_ratelimit)
1451 dirty_ratelimit += step;
1452 else
1453 dirty_ratelimit -= step;
1454
20792ebf 1455 WRITE_ONCE(wb->dirty_ratelimit, max(dirty_ratelimit, 1UL));
a88a341a 1456 wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
b48c104d 1457
5634cc2a 1458 trace_bdi_dirty_ratelimit(wb, dirty_rate, task_ratelimit);
be3ffa27
WF
1459}
1460
c2aa723a
TH
1461static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
1462 struct dirty_throttle_control *mdtc,
8a731799 1463 bool update_ratelimit)
e98be2d5 1464{
c2aa723a 1465 struct bdi_writeback *wb = gdtc->wb;
e98be2d5 1466 unsigned long now = jiffies;
45a2966f 1467 unsigned long elapsed;
be3ffa27 1468 unsigned long dirtied;
e98be2d5
WF
1469 unsigned long written;
1470
45a2966f 1471 spin_lock(&wb->list_lock);
8a731799 1472
e98be2d5 1473 /*
45a2966f
JK
1474 * Lockless checks for elapsed time are racy and delayed update after
1475 * IO completion doesn't do it at all (to make sure written pages are
1476 * accounted reasonably quickly). Make sure elapsed >= 1 to avoid
1477 * division errors.
e98be2d5 1478 */
45a2966f 1479 elapsed = max(now - wb->bw_time_stamp, 1UL);
a88a341a
TH
1480 dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
1481 written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
e98be2d5 1482
8a731799 1483 if (update_ratelimit) {
42dd235c 1484 domain_update_dirty_limit(gdtc, now);
c2aa723a
TH
1485 wb_update_dirty_ratelimit(gdtc, dirtied, elapsed);
1486
1487 /*
1488 * @mdtc is always NULL if !CGROUP_WRITEBACK but the
1489 * compiler has no way to figure that out. Help it.
1490 */
1491 if (IS_ENABLED(CONFIG_CGROUP_WRITEBACK) && mdtc) {
42dd235c 1492 domain_update_dirty_limit(mdtc, now);
c2aa723a
TH
1493 wb_update_dirty_ratelimit(mdtc, dirtied, elapsed);
1494 }
be3ffa27 1495 }
a88a341a 1496 wb_update_write_bandwidth(wb, elapsed, written);
e98be2d5 1497
a88a341a
TH
1498 wb->dirtied_stamp = dirtied;
1499 wb->written_stamp = written;
20792ebf 1500 WRITE_ONCE(wb->bw_time_stamp, now);
45a2966f 1501 spin_unlock(&wb->list_lock);
e98be2d5
WF
1502}
1503
45a2966f 1504void wb_update_bandwidth(struct bdi_writeback *wb)
e98be2d5 1505{
2bc00aef
TH
1506 struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
1507
fee468fd 1508 __wb_update_bandwidth(&gdtc, NULL, false);
fee468fd
JK
1509}
1510
1511/* Interval after which we consider wb idle and don't estimate bandwidth */
1512#define WB_BANDWIDTH_IDLE_JIF (HZ)
1513
1514static void wb_bandwidth_estimate_start(struct bdi_writeback *wb)
1515{
1516 unsigned long now = jiffies;
1517 unsigned long elapsed = now - READ_ONCE(wb->bw_time_stamp);
1518
1519 if (elapsed > WB_BANDWIDTH_IDLE_JIF &&
1520 !atomic_read(&wb->writeback_inodes)) {
1521 spin_lock(&wb->list_lock);
1522 wb->dirtied_stamp = wb_stat(wb, WB_DIRTIED);
1523 wb->written_stamp = wb_stat(wb, WB_WRITTEN);
20792ebf 1524 WRITE_ONCE(wb->bw_time_stamp, now);
fee468fd
JK
1525 spin_unlock(&wb->list_lock);
1526 }
e98be2d5
WF
1527}
1528
9d823e8f 1529/*
d0e1d66b 1530 * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
9d823e8f
WF
1531 * will look to see if it needs to start dirty throttling.
1532 *
1533 * If dirty_poll_interval is too low, big NUMA machines will call the expensive
c41f012a 1534 * global_zone_page_state() too often. So scale it near-sqrt to the safety margin
9d823e8f
WF
1535 * (the number of pages we may dirty without exceeding the dirty limits).
1536 */
1537static unsigned long dirty_poll_interval(unsigned long dirty,
1538 unsigned long thresh)
1539{
1540 if (thresh > dirty)
1541 return 1UL << (ilog2(thresh - dirty) >> 1);
1542
1543 return 1;
1544}
1545
a88a341a 1546static unsigned long wb_max_pause(struct bdi_writeback *wb,
de1fff37 1547 unsigned long wb_dirty)
c8462cc9 1548{
20792ebf 1549 unsigned long bw = READ_ONCE(wb->avg_write_bandwidth);
e3b6c655 1550 unsigned long t;
c8462cc9 1551
7ccb9ad5
WF
1552 /*
1553 * Limit pause time for small memory systems. If sleeping for too long
1554 * time, a small pool of dirty/writeback pages may go empty and disk go
1555 * idle.
1556 *
1557 * 8 serves as the safety ratio.
1558 */
de1fff37 1559 t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
7ccb9ad5
WF
1560 t++;
1561
e3b6c655 1562 return min_t(unsigned long, t, MAX_PAUSE);
7ccb9ad5
WF
1563}
1564
a88a341a
TH
1565static long wb_min_pause(struct bdi_writeback *wb,
1566 long max_pause,
1567 unsigned long task_ratelimit,
1568 unsigned long dirty_ratelimit,
1569 int *nr_dirtied_pause)
c8462cc9 1570{
20792ebf
JK
1571 long hi = ilog2(READ_ONCE(wb->avg_write_bandwidth));
1572 long lo = ilog2(READ_ONCE(wb->dirty_ratelimit));
7ccb9ad5
WF
1573 long t; /* target pause */
1574 long pause; /* estimated next pause */
1575 int pages; /* target nr_dirtied_pause */
c8462cc9 1576
7ccb9ad5
WF
1577 /* target for 10ms pause on 1-dd case */
1578 t = max(1, HZ / 100);
c8462cc9
WF
1579
1580 /*
1581 * Scale up pause time for concurrent dirtiers in order to reduce CPU
1582 * overheads.
1583 *
7ccb9ad5 1584 * (N * 10ms) on 2^N concurrent tasks.
c8462cc9
WF
1585 */
1586 if (hi > lo)
7ccb9ad5 1587 t += (hi - lo) * (10 * HZ) / 1024;
c8462cc9
WF
1588
1589 /*
7ccb9ad5
WF
1590 * This is a bit convoluted. We try to base the next nr_dirtied_pause
1591 * on the much more stable dirty_ratelimit. However the next pause time
1592 * will be computed based on task_ratelimit and the two rate limits may
1593 * depart considerably at some time. Especially if task_ratelimit goes
1594 * below dirty_ratelimit/2 and the target pause is max_pause, the next
1595 * pause time will be max_pause*2 _trimmed down_ to max_pause. As a
1596 * result task_ratelimit won't be executed faithfully, which could
1597 * eventually bring down dirty_ratelimit.
c8462cc9 1598 *
7ccb9ad5
WF
1599 * We apply two rules to fix it up:
1600 * 1) try to estimate the next pause time and if necessary, use a lower
1601 * nr_dirtied_pause so as not to exceed max_pause. When this happens,
1602 * nr_dirtied_pause will be "dancing" with task_ratelimit.
1603 * 2) limit the target pause time to max_pause/2, so that the normal
1604 * small fluctuations of task_ratelimit won't trigger rule (1) and
1605 * nr_dirtied_pause will remain as stable as dirty_ratelimit.
c8462cc9 1606 */
7ccb9ad5
WF
1607 t = min(t, 1 + max_pause / 2);
1608 pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
c8462cc9
WF
1609
1610 /*
5b9b3574
WF
1611 * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
1612 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
1613 * When the 16 consecutive reads are often interrupted by some dirty
1614 * throttling pause during the async writes, cfq will go into idles
1615 * (deadline is fine). So push nr_dirtied_pause as high as possible
1616 * until reaches DIRTY_POLL_THRESH=32 pages.
c8462cc9 1617 */
5b9b3574
WF
1618 if (pages < DIRTY_POLL_THRESH) {
1619 t = max_pause;
1620 pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1621 if (pages > DIRTY_POLL_THRESH) {
1622 pages = DIRTY_POLL_THRESH;
1623 t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
1624 }
1625 }
1626
7ccb9ad5
WF
1627 pause = HZ * pages / (task_ratelimit + 1);
1628 if (pause > max_pause) {
1629 t = max_pause;
1630 pages = task_ratelimit * t / roundup_pow_of_two(HZ);
1631 }
c8462cc9 1632
7ccb9ad5 1633 *nr_dirtied_pause = pages;
c8462cc9 1634 /*
7ccb9ad5 1635 * The minimal pause time will normally be half the target pause time.
c8462cc9 1636 */
5b9b3574 1637 return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
c8462cc9
WF
1638}
1639
970fb01a 1640static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
5a537485 1641{
2bc00aef 1642 struct bdi_writeback *wb = dtc->wb;
93f78d88 1643 unsigned long wb_reclaimable;
5a537485
MP
1644
1645 /*
de1fff37 1646 * wb_thresh is not treated as some limiting factor as
5a537485 1647 * dirty_thresh, due to reasons
de1fff37 1648 * - in JBOD setup, wb_thresh can fluctuate a lot
5a537485 1649 * - in a system with HDD and USB key, the USB key may somehow
de1fff37
TH
1650 * go into state (wb_dirty >> wb_thresh) either because
1651 * wb_dirty starts high, or because wb_thresh drops low.
5a537485 1652 * In this case we don't want to hard throttle the USB key
de1fff37
TH
1653 * dirtiers for 100 seconds until wb_dirty drops under
1654 * wb_thresh. Instead the auxiliary wb control line in
a88a341a 1655 * wb_position_ratio() will let the dirtier task progress
de1fff37 1656 * at some rate <= (write_bw / 2) for bringing down wb_dirty.
5a537485 1657 */
b1cbc6d4 1658 dtc->wb_thresh = __wb_calc_thresh(dtc);
970fb01a 1659 dtc->wb_bg_thresh = dtc->thresh ?
9319b647 1660 div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
5a537485
MP
1661
1662 /*
1663 * In order to avoid the stacked BDI deadlock we need
1664 * to ensure we accurately count the 'dirty' pages when
1665 * the threshold is low.
1666 *
1667 * Otherwise it would be possible to get thresh+n pages
1668 * reported dirty, even though there are thresh-m pages
1669 * actually dirty; with m+n sitting in the percpu
1670 * deltas.
1671 */
2bce774e 1672 if (dtc->wb_thresh < 2 * wb_stat_error()) {
93f78d88 1673 wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
2bc00aef 1674 dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
5a537485 1675 } else {
93f78d88 1676 wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
2bc00aef 1677 dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
5a537485
MP
1678 }
1679}
1680
1da177e4
LT
1681/*
1682 * balance_dirty_pages() must be called by processes which are generating dirty
1683 * data. It looks at the number of dirty pages in the machine and will force
143dfe86 1684 * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
5b0830cb
JA
1685 * If we're over `background_thresh' then the writeback threads are woken to
1686 * perform some writeout.
1da177e4 1687 */
fe6c9c6e
JK
1688static int balance_dirty_pages(struct bdi_writeback *wb,
1689 unsigned long pages_dirtied, unsigned int flags)
1da177e4 1690{
2bc00aef 1691 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
c2aa723a 1692 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
2bc00aef 1693 struct dirty_throttle_control * const gdtc = &gdtc_stor;
c2aa723a
TH
1694 struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
1695 &mdtc_stor : NULL;
1696 struct dirty_throttle_control *sdtc;
826881a7 1697 unsigned long nr_dirty;
83712358 1698 long period;
7ccb9ad5
WF
1699 long pause;
1700 long max_pause;
1701 long min_pause;
1702 int nr_dirtied_pause;
e50e3720 1703 bool dirty_exceeded = false;
143dfe86 1704 unsigned long task_ratelimit;
7ccb9ad5 1705 unsigned long dirty_ratelimit;
dfb8ae56 1706 struct backing_dev_info *bdi = wb->bdi;
5a537485 1707 bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
e98be2d5 1708 unsigned long start_time = jiffies;
fe6c9c6e 1709 int ret = 0;
1da177e4
LT
1710
1711 for (;;) {
83712358 1712 unsigned long now = jiffies;
2bc00aef 1713 unsigned long dirty, thresh, bg_thresh;
50e55bf6
YS
1714 unsigned long m_dirty = 0; /* stop bogus uninit warnings */
1715 unsigned long m_thresh = 0;
1716 unsigned long m_bg_thresh = 0;
83712358 1717
826881a7 1718 nr_dirty = global_node_page_state(NR_FILE_DIRTY);
9fc3a43e 1719 gdtc->avail = global_dirtyable_memory();
826881a7 1720 gdtc->dirty = nr_dirty + global_node_page_state(NR_WRITEBACK);
5fce25a9 1721
9fc3a43e 1722 domain_dirty_limits(gdtc);
16c4042f 1723
5a537485 1724 if (unlikely(strictlimit)) {
970fb01a 1725 wb_dirty_limits(gdtc);
5a537485 1726
2bc00aef
TH
1727 dirty = gdtc->wb_dirty;
1728 thresh = gdtc->wb_thresh;
970fb01a 1729 bg_thresh = gdtc->wb_bg_thresh;
5a537485 1730 } else {
2bc00aef
TH
1731 dirty = gdtc->dirty;
1732 thresh = gdtc->thresh;
1733 bg_thresh = gdtc->bg_thresh;
5a537485
MP
1734 }
1735
c2aa723a 1736 if (mdtc) {
c5edf9cd 1737 unsigned long filepages, headroom, writeback;
c2aa723a
TH
1738
1739 /*
1740 * If @wb belongs to !root memcg, repeat the same
1741 * basic calculations for the memcg domain.
1742 */
c5edf9cd
TH
1743 mem_cgroup_wb_stats(wb, &filepages, &headroom,
1744 &mdtc->dirty, &writeback);
c2aa723a 1745 mdtc->dirty += writeback;
c5edf9cd 1746 mdtc_calc_avail(mdtc, filepages, headroom);
c2aa723a
TH
1747
1748 domain_dirty_limits(mdtc);
1749
1750 if (unlikely(strictlimit)) {
1751 wb_dirty_limits(mdtc);
1752 m_dirty = mdtc->wb_dirty;
1753 m_thresh = mdtc->wb_thresh;
1754 m_bg_thresh = mdtc->wb_bg_thresh;
1755 } else {
1756 m_dirty = mdtc->dirty;
1757 m_thresh = mdtc->thresh;
1758 m_bg_thresh = mdtc->bg_thresh;
1759 }
5a537485
MP
1760 }
1761
ea6813be
JK
1762 /*
1763 * In laptop mode, we wait until hitting the higher threshold
1764 * before starting background writeout, and then write out all
1765 * the way down to the lower threshold. So slow writers cause
1766 * minimal disk activity.
1767 *
1768 * In normal mode, we start background writeout at the lower
1769 * background_thresh, to keep the amount of dirty memory low.
1770 */
826881a7 1771 if (!laptop_mode && nr_dirty > gdtc->bg_thresh &&
ea6813be
JK
1772 !writeback_in_progress(wb))
1773 wb_start_background_writeback(wb);
1774
16c4042f
WF
1775 /*
1776 * Throttle it only when the background writeback cannot
1777 * catch-up. This avoids (excessively) small writeouts
de1fff37 1778 * when the wb limits are ramping up in case of !strictlimit.
5a537485 1779 *
de1fff37
TH
1780 * In strictlimit case make decision based on the wb counters
1781 * and limits. Small writeouts when the wb limits are ramping
5a537485 1782 * up are the price we consciously pay for strictlimit-ing.
c2aa723a
TH
1783 *
1784 * If memcg domain is in effect, @dirty should be under
1785 * both global and memcg freerun ceilings.
16c4042f 1786 */
c2aa723a
TH
1787 if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh) &&
1788 (!mdtc ||
1789 m_dirty <= dirty_freerun_ceiling(m_thresh, m_bg_thresh))) {
a37b0715
N
1790 unsigned long intv;
1791 unsigned long m_intv;
1792
1793free_running:
1794 intv = dirty_poll_interval(dirty, thresh);
1795 m_intv = ULONG_MAX;
c2aa723a 1796
83712358
WF
1797 current->dirty_paused_when = now;
1798 current->nr_dirtied = 0;
c2aa723a
TH
1799 if (mdtc)
1800 m_intv = dirty_poll_interval(m_dirty, m_thresh);
1801 current->nr_dirtied_pause = min(intv, m_intv);
16c4042f 1802 break;
83712358 1803 }
16c4042f 1804
ea6813be 1805 /* Start writeback even when in laptop mode */
bc05873d 1806 if (unlikely(!writeback_in_progress(wb)))
9ecf4866 1807 wb_start_background_writeback(wb);
143dfe86 1808
97b27821
TH
1809 mem_cgroup_flush_foreign(wb);
1810
c2aa723a
TH
1811 /*
1812 * Calculate global domain's pos_ratio and select the
1813 * global dtc by default.
1814 */
a37b0715 1815 if (!strictlimit) {
970fb01a 1816 wb_dirty_limits(gdtc);
5fce25a9 1817
a37b0715
N
1818 if ((current->flags & PF_LOCAL_THROTTLE) &&
1819 gdtc->wb_dirty <
1820 dirty_freerun_ceiling(gdtc->wb_thresh,
1821 gdtc->wb_bg_thresh))
1822 /*
1823 * LOCAL_THROTTLE tasks must not be throttled
1824 * when below the per-wb freerun ceiling.
1825 */
1826 goto free_running;
1827 }
1828
2bc00aef
TH
1829 dirty_exceeded = (gdtc->wb_dirty > gdtc->wb_thresh) &&
1830 ((gdtc->dirty > gdtc->thresh) || strictlimit);
daddfa3c
TH
1831
1832 wb_position_ratio(gdtc);
c2aa723a
TH
1833 sdtc = gdtc;
1834
1835 if (mdtc) {
1836 /*
1837 * If memcg domain is in effect, calculate its
1838 * pos_ratio. @wb should satisfy constraints from
1839 * both global and memcg domains. Choose the one
1840 * w/ lower pos_ratio.
1841 */
a37b0715 1842 if (!strictlimit) {
c2aa723a
TH
1843 wb_dirty_limits(mdtc);
1844
a37b0715
N
1845 if ((current->flags & PF_LOCAL_THROTTLE) &&
1846 mdtc->wb_dirty <
1847 dirty_freerun_ceiling(mdtc->wb_thresh,
1848 mdtc->wb_bg_thresh))
1849 /*
1850 * LOCAL_THROTTLE tasks must not be
1851 * throttled when below the per-wb
1852 * freerun ceiling.
1853 */
1854 goto free_running;
1855 }
c2aa723a
TH
1856 dirty_exceeded |= (mdtc->wb_dirty > mdtc->wb_thresh) &&
1857 ((mdtc->dirty > mdtc->thresh) || strictlimit);
1858
1859 wb_position_ratio(mdtc);
1860 if (mdtc->pos_ratio < gdtc->pos_ratio)
1861 sdtc = mdtc;
1862 }
daddfa3c 1863
e92eebbb
JK
1864 if (dirty_exceeded != wb->dirty_exceeded)
1865 wb->dirty_exceeded = dirty_exceeded;
1da177e4 1866
20792ebf 1867 if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
45a2966f 1868 BANDWIDTH_INTERVAL))
fee468fd 1869 __wb_update_bandwidth(gdtc, mdtc, true);
e98be2d5 1870
c2aa723a 1871 /* throttle according to the chosen dtc */
20792ebf 1872 dirty_ratelimit = READ_ONCE(wb->dirty_ratelimit);
c2aa723a 1873 task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >>
3a73dbbc 1874 RATELIMIT_CALC_SHIFT;
c2aa723a 1875 max_pause = wb_max_pause(wb, sdtc->wb_dirty);
a88a341a
TH
1876 min_pause = wb_min_pause(wb, max_pause,
1877 task_ratelimit, dirty_ratelimit,
1878 &nr_dirtied_pause);
7ccb9ad5 1879
3a73dbbc 1880 if (unlikely(task_ratelimit == 0)) {
83712358 1881 period = max_pause;
c8462cc9 1882 pause = max_pause;
143dfe86 1883 goto pause;
04fbfdc1 1884 }
83712358
WF
1885 period = HZ * pages_dirtied / task_ratelimit;
1886 pause = period;
1887 if (current->dirty_paused_when)
1888 pause -= now - current->dirty_paused_when;
1889 /*
1890 * For less than 1s think time (ext3/4 may block the dirtier
1891 * for up to 800ms from time to time on 1-HDD; so does xfs,
1892 * however at much less frequency), try to compensate it in
1893 * future periods by updating the virtual time; otherwise just
1894 * do a reset, as it may be a light dirtier.
1895 */
7ccb9ad5 1896 if (pause < min_pause) {
5634cc2a 1897 trace_balance_dirty_pages(wb,
c2aa723a
TH
1898 sdtc->thresh,
1899 sdtc->bg_thresh,
1900 sdtc->dirty,
1901 sdtc->wb_thresh,
1902 sdtc->wb_dirty,
ece13ac3
WF
1903 dirty_ratelimit,
1904 task_ratelimit,
1905 pages_dirtied,
83712358 1906 period,
7ccb9ad5 1907 min(pause, 0L),
ece13ac3 1908 start_time);
83712358
WF
1909 if (pause < -HZ) {
1910 current->dirty_paused_when = now;
1911 current->nr_dirtied = 0;
1912 } else if (period) {
1913 current->dirty_paused_when += period;
1914 current->nr_dirtied = 0;
7ccb9ad5
WF
1915 } else if (current->nr_dirtied_pause <= pages_dirtied)
1916 current->nr_dirtied_pause += pages_dirtied;
57fc978c 1917 break;
04fbfdc1 1918 }
7ccb9ad5
WF
1919 if (unlikely(pause > max_pause)) {
1920 /* for occasional dropped task_ratelimit */
1921 now += min(pause - max_pause, max_pause);
1922 pause = max_pause;
1923 }
143dfe86
WF
1924
1925pause:
5634cc2a 1926 trace_balance_dirty_pages(wb,
c2aa723a
TH
1927 sdtc->thresh,
1928 sdtc->bg_thresh,
1929 sdtc->dirty,
1930 sdtc->wb_thresh,
1931 sdtc->wb_dirty,
ece13ac3
WF
1932 dirty_ratelimit,
1933 task_ratelimit,
1934 pages_dirtied,
83712358 1935 period,
ece13ac3
WF
1936 pause,
1937 start_time);
fe6c9c6e
JK
1938 if (flags & BDP_ASYNC) {
1939 ret = -EAGAIN;
1940 break;
1941 }
499d05ec 1942 __set_current_state(TASK_KILLABLE);
f814bdda 1943 bdi->last_bdp_sleep = jiffies;
d25105e8 1944 io_schedule_timeout(pause);
87c6a9b2 1945
83712358
WF
1946 current->dirty_paused_when = now + pause;
1947 current->nr_dirtied = 0;
7ccb9ad5 1948 current->nr_dirtied_pause = nr_dirtied_pause;
83712358 1949
ffd1f609 1950 /*
2bc00aef
TH
1951 * This is typically equal to (dirty < thresh) and can also
1952 * keep "1000+ dd on a slow USB stick" under control.
ffd1f609 1953 */
1df64719 1954 if (task_ratelimit)
ffd1f609 1955 break;
499d05ec 1956
c5c6343c 1957 /*
f0953a1b 1958 * In the case of an unresponsive NFS server and the NFS dirty
de1fff37 1959 * pages exceeds dirty_thresh, give the other good wb's a pipe
c5c6343c
WF
1960 * to go through, so that tasks on them still remain responsive.
1961 *
3f8b6fb7 1962 * In theory 1 page is enough to keep the consumer-producer
c5c6343c 1963 * pipe going: the flusher cleans 1 page => the task dirties 1
de1fff37 1964 * more page. However wb_dirty has accounting errors. So use
93f78d88 1965 * the larger and more IO friendly wb_stat_error.
c5c6343c 1966 */
2bce774e 1967 if (sdtc->wb_dirty <= wb_stat_error())
c5c6343c
WF
1968 break;
1969
499d05ec
JK
1970 if (fatal_signal_pending(current))
1971 break;
1da177e4 1972 }
fe6c9c6e 1973 return ret;
1da177e4
LT
1974}
1975
9d823e8f 1976static DEFINE_PER_CPU(int, bdp_ratelimits);
245b2e70 1977
54848d73
WF
1978/*
1979 * Normal tasks are throttled by
1980 * loop {
1981 * dirty tsk->nr_dirtied_pause pages;
1982 * take a snap in balance_dirty_pages();
1983 * }
1984 * However there is a worst case. If every task exit immediately when dirtied
1985 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
1986 * called to throttle the page dirties. The solution is to save the not yet
1987 * throttled page dirties in dirty_throttle_leaks on task exit and charge them
1988 * randomly into the running tasks. This works well for the above worst case,
1989 * as the new task will pick up and accumulate the old task's leaked dirty
1990 * count and eventually get throttled.
1991 */
1992DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
1993
1da177e4 1994/**
fe6c9c6e
JK
1995 * balance_dirty_pages_ratelimited_flags - Balance dirty memory state.
1996 * @mapping: address_space which was dirtied.
1997 * @flags: BDP flags.
1da177e4
LT
1998 *
1999 * Processes which are dirtying memory should call in here once for each page
2000 * which was newly dirtied. The function will periodically check the system's
2001 * dirty state and will initiate writeback if needed.
2002 *
fe6c9c6e
JK
2003 * See balance_dirty_pages_ratelimited() for details.
2004 *
2005 * Return: If @flags contains BDP_ASYNC, it may return -EAGAIN to
2006 * indicate that memory is out of balance and the caller must wait
2007 * for I/O to complete. Otherwise, it will return 0 to indicate
2008 * that either memory was already in balance, or it was able to sleep
2009 * until the amount of dirty memory returned to balance.
1da177e4 2010 */
fe6c9c6e
JK
2011int balance_dirty_pages_ratelimited_flags(struct address_space *mapping,
2012 unsigned int flags)
1da177e4 2013{
dfb8ae56
TH
2014 struct inode *inode = mapping->host;
2015 struct backing_dev_info *bdi = inode_to_bdi(inode);
2016 struct bdi_writeback *wb = NULL;
9d823e8f 2017 int ratelimit;
fe6c9c6e 2018 int ret = 0;
9d823e8f 2019 int *p;
1da177e4 2020
f56753ac 2021 if (!(bdi->capabilities & BDI_CAP_WRITEBACK))
fe6c9c6e 2022 return ret;
36715cef 2023
dfb8ae56
TH
2024 if (inode_cgwb_enabled(inode))
2025 wb = wb_get_create_current(bdi, GFP_KERNEL);
2026 if (!wb)
2027 wb = &bdi->wb;
2028
9d823e8f 2029 ratelimit = current->nr_dirtied_pause;
a88a341a 2030 if (wb->dirty_exceeded)
9d823e8f
WF
2031 ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
2032
9d823e8f 2033 preempt_disable();
1da177e4 2034 /*
9d823e8f
WF
2035 * This prevents one CPU to accumulate too many dirtied pages without
2036 * calling into balance_dirty_pages(), which can happen when there are
2037 * 1000+ tasks, all of them start dirtying pages at exactly the same
2038 * time, hence all honoured too large initial task->nr_dirtied_pause.
1da177e4 2039 */
7c8e0181 2040 p = this_cpu_ptr(&bdp_ratelimits);
9d823e8f 2041 if (unlikely(current->nr_dirtied >= ratelimit))
fa5a734e 2042 *p = 0;
d3bc1fef
WF
2043 else if (unlikely(*p >= ratelimit_pages)) {
2044 *p = 0;
2045 ratelimit = 0;
1da177e4 2046 }
54848d73
WF
2047 /*
2048 * Pick up the dirtied pages by the exited tasks. This avoids lots of
2049 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
2050 * the dirty throttling and livelock other long-run dirtiers.
2051 */
7c8e0181 2052 p = this_cpu_ptr(&dirty_throttle_leaks);
54848d73 2053 if (*p > 0 && current->nr_dirtied < ratelimit) {
d0e1d66b 2054 unsigned long nr_pages_dirtied;
54848d73
WF
2055 nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
2056 *p -= nr_pages_dirtied;
2057 current->nr_dirtied += nr_pages_dirtied;
1da177e4 2058 }
fa5a734e 2059 preempt_enable();
9d823e8f
WF
2060
2061 if (unlikely(current->nr_dirtied >= ratelimit))
fe6c9c6e 2062 ret = balance_dirty_pages(wb, current->nr_dirtied, flags);
dfb8ae56
TH
2063
2064 wb_put(wb);
fe6c9c6e
JK
2065 return ret;
2066}
611df5d6 2067EXPORT_SYMBOL_GPL(balance_dirty_pages_ratelimited_flags);
fe6c9c6e
JK
2068
2069/**
2070 * balance_dirty_pages_ratelimited - balance dirty memory state.
2071 * @mapping: address_space which was dirtied.
2072 *
2073 * Processes which are dirtying memory should call in here once for each page
2074 * which was newly dirtied. The function will periodically check the system's
2075 * dirty state and will initiate writeback if needed.
2076 *
2077 * Once we're over the dirty memory limit we decrease the ratelimiting
2078 * by a lot, to prevent individual processes from overshooting the limit
2079 * by (ratelimit_pages) each.
2080 */
2081void balance_dirty_pages_ratelimited(struct address_space *mapping)
2082{
2083 balance_dirty_pages_ratelimited_flags(mapping, 0);
1da177e4 2084}
d0e1d66b 2085EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
1da177e4 2086
aa661bbe
TH
2087/**
2088 * wb_over_bg_thresh - does @wb need to be written back?
2089 * @wb: bdi_writeback of interest
2090 *
2091 * Determines whether background writeback should keep writing @wb or it's
a862f68a
MR
2092 * clean enough.
2093 *
2094 * Return: %true if writeback should continue.
aa661bbe
TH
2095 */
2096bool wb_over_bg_thresh(struct bdi_writeback *wb)
2097{
947e9762 2098 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
c2aa723a 2099 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
947e9762 2100 struct dirty_throttle_control * const gdtc = &gdtc_stor;
c2aa723a
TH
2101 struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
2102 &mdtc_stor : NULL;
ab19939a
CW
2103 unsigned long reclaimable;
2104 unsigned long thresh;
aa661bbe 2105
947e9762
TH
2106 /*
2107 * Similar to balance_dirty_pages() but ignores pages being written
2108 * as we're trying to decide whether to put more under writeback.
2109 */
2110 gdtc->avail = global_dirtyable_memory();
8d92890b 2111 gdtc->dirty = global_node_page_state(NR_FILE_DIRTY);
947e9762 2112 domain_dirty_limits(gdtc);
aa661bbe 2113
947e9762 2114 if (gdtc->dirty > gdtc->bg_thresh)
aa661bbe
TH
2115 return true;
2116
ab19939a
CW
2117 thresh = wb_calc_thresh(gdtc->wb, gdtc->bg_thresh);
2118 if (thresh < 2 * wb_stat_error())
2119 reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
2120 else
2121 reclaimable = wb_stat(wb, WB_RECLAIMABLE);
2122
2123 if (reclaimable > thresh)
aa661bbe
TH
2124 return true;
2125
c2aa723a 2126 if (mdtc) {
c5edf9cd 2127 unsigned long filepages, headroom, writeback;
c2aa723a 2128
c5edf9cd
TH
2129 mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty,
2130 &writeback);
2131 mdtc_calc_avail(mdtc, filepages, headroom);
c2aa723a
TH
2132 domain_dirty_limits(mdtc); /* ditto, ignore writeback */
2133
2134 if (mdtc->dirty > mdtc->bg_thresh)
2135 return true;
2136
ab19939a
CW
2137 thresh = wb_calc_thresh(mdtc->wb, mdtc->bg_thresh);
2138 if (thresh < 2 * wb_stat_error())
2139 reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
2140 else
2141 reclaimable = wb_stat(wb, WB_RECLAIMABLE);
2142
2143 if (reclaimable > thresh)
c2aa723a
TH
2144 return true;
2145 }
2146
aa661bbe
TH
2147 return false;
2148}
2149
aa779e51 2150#ifdef CONFIG_SYSCTL
1da177e4
LT
2151/*
2152 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
2153 */
aa779e51 2154static int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
32927393 2155 void *buffer, size_t *length, loff_t *ppos)
1da177e4 2156{
94af5846
YS
2157 unsigned int old_interval = dirty_writeback_interval;
2158 int ret;
2159
2160 ret = proc_dointvec(table, write, buffer, length, ppos);
515c24c1
YS
2161
2162 /*
2163 * Writing 0 to dirty_writeback_interval will disable periodic writeback
2164 * and a different non-zero value will wakeup the writeback threads.
2165 * wb_wakeup_delayed() would be more appropriate, but it's a pain to
2166 * iterate over all bdis and wbs.
2167 * The reason we do this is to make the change take effect immediately.
2168 */
2169 if (!ret && write && dirty_writeback_interval &&
2170 dirty_writeback_interval != old_interval)
94af5846
YS
2171 wakeup_flusher_threads(WB_REASON_PERIODIC);
2172
2173 return ret;
1da177e4 2174}
aa779e51 2175#endif
1da177e4 2176
bca237a5 2177void laptop_mode_timer_fn(struct timer_list *t)
1da177e4 2178{
bca237a5
KC
2179 struct backing_dev_info *backing_dev_info =
2180 from_timer(backing_dev_info, t, laptop_mode_wb_timer);
1da177e4 2181
bca237a5 2182 wakeup_flusher_threads_bdi(backing_dev_info, WB_REASON_LAPTOP_TIMER);
1da177e4
LT
2183}
2184
2185/*
2186 * We've spun up the disk and we're in laptop mode: schedule writeback
2187 * of all dirty data a few seconds from now. If the flush is already scheduled
2188 * then push it back - the user is still using the disk.
2189 */
31373d09 2190void laptop_io_completion(struct backing_dev_info *info)
1da177e4 2191{
31373d09 2192 mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
1da177e4
LT
2193}
2194
2195/*
2196 * We're in laptop mode and we've just synced. The sync's writes will have
2197 * caused another writeback to be scheduled by laptop_io_completion.
2198 * Nothing needs to be written back anymore, so we unschedule the writeback.
2199 */
2200void laptop_sync_completion(void)
2201{
31373d09
MG
2202 struct backing_dev_info *bdi;
2203
2204 rcu_read_lock();
2205
2206 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
2207 del_timer(&bdi->laptop_mode_wb_timer);
2208
2209 rcu_read_unlock();
1da177e4
LT
2210}
2211
2212/*
2213 * If ratelimit_pages is too high then we can get into dirty-data overload
2214 * if a large number of processes all perform writes at the same time.
1da177e4
LT
2215 *
2216 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
2217 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
9d823e8f 2218 * thresholds.
1da177e4
LT
2219 */
2220
2d1d43f6 2221void writeback_set_ratelimit(void)
1da177e4 2222{
dcc25ae7 2223 struct wb_domain *dom = &global_wb_domain;
9d823e8f
WF
2224 unsigned long background_thresh;
2225 unsigned long dirty_thresh;
dcc25ae7 2226
9d823e8f 2227 global_dirty_limits(&background_thresh, &dirty_thresh);
dcc25ae7 2228 dom->dirty_limit = dirty_thresh;
9d823e8f 2229 ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
1da177e4
LT
2230 if (ratelimit_pages < 16)
2231 ratelimit_pages = 16;
1da177e4
LT
2232}
2233
1d7ac6ae 2234static int page_writeback_cpu_online(unsigned int cpu)
1da177e4 2235{
1d7ac6ae
SAS
2236 writeback_set_ratelimit();
2237 return 0;
1da177e4
LT
2238}
2239
aa779e51 2240#ifdef CONFIG_SYSCTL
3c6a4cba
LC
2241
2242/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
2243static const unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
2244
aa779e51 2245static struct ctl_table vm_page_writeback_sysctls[] = {
2246 {
2247 .procname = "dirty_background_ratio",
2248 .data = &dirty_background_ratio,
2249 .maxlen = sizeof(dirty_background_ratio),
2250 .mode = 0644,
2251 .proc_handler = dirty_background_ratio_handler,
2252 .extra1 = SYSCTL_ZERO,
2253 .extra2 = SYSCTL_ONE_HUNDRED,
2254 },
2255 {
2256 .procname = "dirty_background_bytes",
2257 .data = &dirty_background_bytes,
2258 .maxlen = sizeof(dirty_background_bytes),
2259 .mode = 0644,
2260 .proc_handler = dirty_background_bytes_handler,
2261 .extra1 = SYSCTL_LONG_ONE,
2262 },
2263 {
2264 .procname = "dirty_ratio",
2265 .data = &vm_dirty_ratio,
2266 .maxlen = sizeof(vm_dirty_ratio),
2267 .mode = 0644,
2268 .proc_handler = dirty_ratio_handler,
2269 .extra1 = SYSCTL_ZERO,
2270 .extra2 = SYSCTL_ONE_HUNDRED,
2271 },
2272 {
2273 .procname = "dirty_bytes",
2274 .data = &vm_dirty_bytes,
2275 .maxlen = sizeof(vm_dirty_bytes),
2276 .mode = 0644,
2277 .proc_handler = dirty_bytes_handler,
2278 .extra1 = (void *)&dirty_bytes_min,
2279 },
2280 {
2281 .procname = "dirty_writeback_centisecs",
2282 .data = &dirty_writeback_interval,
2283 .maxlen = sizeof(dirty_writeback_interval),
2284 .mode = 0644,
2285 .proc_handler = dirty_writeback_centisecs_handler,
2286 },
2287 {
2288 .procname = "dirty_expire_centisecs",
2289 .data = &dirty_expire_interval,
2290 .maxlen = sizeof(dirty_expire_interval),
2291 .mode = 0644,
2292 .proc_handler = proc_dointvec_minmax,
2293 .extra1 = SYSCTL_ZERO,
2294 },
2295#ifdef CONFIG_HIGHMEM
2296 {
2297 .procname = "highmem_is_dirtyable",
2298 .data = &vm_highmem_is_dirtyable,
2299 .maxlen = sizeof(vm_highmem_is_dirtyable),
2300 .mode = 0644,
2301 .proc_handler = proc_dointvec_minmax,
2302 .extra1 = SYSCTL_ZERO,
2303 .extra2 = SYSCTL_ONE,
2304 },
2305#endif
2306 {
2307 .procname = "laptop_mode",
2308 .data = &laptop_mode,
2309 .maxlen = sizeof(laptop_mode),
2310 .mode = 0644,
2311 .proc_handler = proc_dointvec_jiffies,
2312 },
aa779e51 2313};
2314#endif
2315
1da177e4 2316/*
dc6e29da
LT
2317 * Called early on to tune the page writeback dirty limits.
2318 *
2319 * We used to scale dirty pages according to how total memory
0a18e607 2320 * related to pages that could be allocated for buffers.
dc6e29da
LT
2321 *
2322 * However, that was when we used "dirty_ratio" to scale with
2323 * all memory, and we don't do that any more. "dirty_ratio"
0a18e607 2324 * is now applied to total non-HIGHPAGE memory, and as such we can't
dc6e29da
LT
2325 * get into the old insane situation any more where we had
2326 * large amounts of dirty pages compared to a small amount of
2327 * non-HIGHMEM memory.
2328 *
2329 * But we might still want to scale the dirty_ratio by how
2330 * much memory the box has..
1da177e4
LT
2331 */
2332void __init page_writeback_init(void)
2333{
a50fcb51
RV
2334 BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2335
1d7ac6ae
SAS
2336 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/writeback:online",
2337 page_writeback_cpu_online, NULL);
2338 cpuhp_setup_state(CPUHP_MM_WRITEBACK_DEAD, "mm/writeback:dead", NULL,
2339 page_writeback_cpu_online);
aa779e51 2340#ifdef CONFIG_SYSCTL
2341 register_sysctl_init("vm", vm_page_writeback_sysctls);
2342#endif
1da177e4
LT
2343}
2344
f446daae 2345/**
cdc150b5 2346 * tag_pages_for_writeback - tag pages to be written by writeback
f446daae
JK
2347 * @mapping: address space structure to write
2348 * @start: starting page index
2349 * @end: ending page index (inclusive)
2350 *
2351 * This function scans the page range from @start to @end (inclusive) and tags
cdc150b5
CH
2352 * all pages that have DIRTY tag set with a special TOWRITE tag. The caller
2353 * can then use the TOWRITE tag to identify pages eligible for writeback.
2354 * This mechanism is used to avoid livelocking of writeback by a process
2355 * steadily creating new dirty pages in the file (thus it is important for this
2356 * function to be quick so that it can tag pages faster than a dirtying process
2357 * can create them).
f446daae 2358 */
f446daae
JK
2359void tag_pages_for_writeback(struct address_space *mapping,
2360 pgoff_t start, pgoff_t end)
2361{
ff9c745b
MW
2362 XA_STATE(xas, &mapping->i_pages, start);
2363 unsigned int tagged = 0;
2364 void *page;
268f42de 2365
ff9c745b
MW
2366 xas_lock_irq(&xas);
2367 xas_for_each_marked(&xas, page, end, PAGECACHE_TAG_DIRTY) {
2368 xas_set_mark(&xas, PAGECACHE_TAG_TOWRITE);
2369 if (++tagged % XA_CHECK_SCHED)
268f42de 2370 continue;
ff9c745b
MW
2371
2372 xas_pause(&xas);
2373 xas_unlock_irq(&xas);
f446daae 2374 cond_resched();
ff9c745b 2375 xas_lock_irq(&xas);
268f42de 2376 }
ff9c745b 2377 xas_unlock_irq(&xas);
f446daae
JK
2378}
2379EXPORT_SYMBOL(tag_pages_for_writeback);
2380
b1793929
MWO
2381static bool folio_prepare_writeback(struct address_space *mapping,
2382 struct writeback_control *wbc, struct folio *folio)
2383{
2384 /*
2385 * Folio truncated or invalidated. We can freely skip it then,
2386 * even for data integrity operations: the folio has disappeared
2387 * concurrently, so there could be no real expectation of this
2388 * data integrity operation even if there is now a new, dirty
2389 * folio at the same pagecache index.
2390 */
2391 if (unlikely(folio->mapping != mapping))
2392 return false;
2393
2394 /*
2395 * Did somebody else write it for us?
2396 */
2397 if (!folio_test_dirty(folio))
2398 return false;
2399
2400 if (folio_test_writeback(folio)) {
2401 if (wbc->sync_mode == WB_SYNC_NONE)
2402 return false;
2403 folio_wait_writeback(folio);
2404 }
2405 BUG_ON(folio_test_writeback(folio));
2406
2407 if (!folio_clear_dirty_for_io(folio))
2408 return false;
2409
2410 return true;
2411}
2412
751e0d55
MWO
2413static xa_mark_t wbc_to_tag(struct writeback_control *wbc)
2414{
2415 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2416 return PAGECACHE_TAG_TOWRITE;
2417 return PAGECACHE_TAG_DIRTY;
2418}
2419
2420static pgoff_t wbc_end(struct writeback_control *wbc)
2421{
2422 if (wbc->range_cyclic)
2423 return -1;
2424 return wbc->range_end >> PAGE_SHIFT;
2425}
2426
e6d0ab87 2427static struct folio *writeback_get_folio(struct address_space *mapping,
751e0d55
MWO
2428 struct writeback_control *wbc)
2429{
e6d0ab87
MWO
2430 struct folio *folio;
2431
a2cbc136 2432retry:
e6d0ab87
MWO
2433 folio = folio_batch_next(&wbc->fbatch);
2434 if (!folio) {
2435 folio_batch_release(&wbc->fbatch);
2436 cond_resched();
2437 filemap_get_folios_tag(mapping, &wbc->index, wbc_end(wbc),
2438 wbc_to_tag(wbc), &wbc->fbatch);
2439 folio = folio_batch_next(&wbc->fbatch);
a2cbc136
MWO
2440 if (!folio)
2441 return NULL;
2442 }
2443
2444 folio_lock(folio);
2445 if (unlikely(!folio_prepare_writeback(mapping, wbc, folio))) {
2446 folio_unlock(folio);
2447 goto retry;
e6d0ab87
MWO
2448 }
2449
a2cbc136 2450 trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
e6d0ab87 2451 return folio;
751e0d55
MWO
2452}
2453
811d736f 2454/**
cdc150b5 2455 * writeback_iter - iterate folio of a mapping for writeback
811d736f 2456 * @mapping: address space structure to write
cdc150b5
CH
2457 * @wbc: writeback context
2458 * @folio: previously iterated folio (%NULL to start)
2459 * @error: in-out pointer for writeback errors (see below)
811d736f 2460 *
cdc150b5
CH
2461 * This function returns the next folio for the writeback operation described by
2462 * @wbc on @mapping and should be called in a while loop in the ->writepages
2463 * implementation.
f446daae 2464 *
cdc150b5
CH
2465 * To start the writeback operation, %NULL is passed in the @folio argument, and
2466 * for every subsequent iteration the folio returned previously should be passed
2467 * back in.
64081362 2468 *
cdc150b5
CH
2469 * If there was an error in the per-folio writeback inside the writeback_iter()
2470 * loop, @error should be set to the error value.
a862f68a 2471 *
cdc150b5
CH
2472 * Once the writeback described in @wbc has finished, this function will return
2473 * %NULL and if there was an error in any iteration restore it to @error.
2474 *
2475 * Note: callers should not manually break out of the loop using break or goto
2476 * but must keep calling writeback_iter() until it returns %NULL.
2477 *
2478 * Return: the folio to write or %NULL if the loop is done.
811d736f 2479 */
cdc150b5
CH
2480struct folio *writeback_iter(struct address_space *mapping,
2481 struct writeback_control *wbc, struct folio *folio, int *error)
811d736f 2482{
cdc150b5
CH
2483 if (!folio) {
2484 folio_batch_init(&wbc->fbatch);
2485 wbc->saved_err = *error = 0;
751e0d55 2486
cdc150b5
CH
2487 /*
2488 * For range cyclic writeback we remember where we stopped so
2489 * that we can continue where we stopped.
2490 *
2491 * For non-cyclic writeback we always start at the beginning of
2492 * the passed in range.
2493 */
2494 if (wbc->range_cyclic)
2495 wbc->index = mapping->writeback_index;
2496 else
2497 wbc->index = wbc->range_start >> PAGE_SHIFT;
811d736f 2498
cdc150b5
CH
2499 /*
2500 * To avoid livelocks when other processes dirty new pages, we
2501 * first tag pages which should be written back and only then
2502 * start writing them.
2503 *
2504 * For data-integrity writeback we have to be careful so that we
2505 * do not miss some pages (e.g., because some other process has
2506 * cleared the TOWRITE tag we set). The rule we follow is that
2507 * TOWRITE tag can be cleared only by the process clearing the
2508 * DIRTY tag (and submitting the page for I/O).
2509 */
2510 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2511 tag_pages_for_writeback(mapping, wbc->index,
2512 wbc_end(wbc));
2513 } else {
807d1fe3 2514 wbc->nr_to_write -= folio_nr_pages(folio);
f946e0d2 2515
cdc150b5 2516 WARN_ON_ONCE(*error > 0);
00266770 2517
807d1fe3
MWO
2518 /*
2519 * For integrity writeback we have to keep going until we have
2520 * written all the folios we tagged for writeback above, even if
2521 * we run past wbc->nr_to_write or encounter errors.
2522 * We stash away the first error we encounter in wbc->saved_err
2523 * so that it can be retrieved when we're done. This is because
2524 * the file system may still have state to clear for each folio.
2525 *
2526 * For background writeback we exit as soon as we run past
2527 * wbc->nr_to_write or encounter the first error.
2528 */
2529 if (wbc->sync_mode == WB_SYNC_ALL) {
cdc150b5
CH
2530 if (*error && !wbc->saved_err)
2531 wbc->saved_err = *error;
807d1fe3 2532 } else {
cdc150b5 2533 if (*error || wbc->nr_to_write <= 0)
807d1fe3 2534 goto done;
811d736f 2535 }
811d736f 2536 }
64081362 2537
cdc150b5
CH
2538 folio = writeback_get_folio(mapping, wbc);
2539 if (!folio) {
2540 /*
2541 * To avoid deadlocks between range_cyclic writeback and callers
2542 * that hold pages in PageWriteback to aggregate I/O until
2543 * the writeback iteration finishes, we do not loop back to the
2544 * start of the file. Doing so causes a page lock/page
2545 * writeback access order inversion - we should only ever lock
2546 * multiple pages in ascending page->index order, and looping
2547 * back to the start of the file violates that rule and causes
2548 * deadlocks.
2549 */
2550 if (wbc->range_cyclic)
2551 mapping->writeback_index = 0;
2552
2553 /*
2554 * Return the first error we encountered (if there was any) to
2555 * the caller.
2556 */
2557 *error = wbc->saved_err;
2558 }
2559 return folio;
f946e0d2
CH
2560
2561done:
2562 if (wbc->range_cyclic)
2563 mapping->writeback_index = folio->index + folio_nr_pages(folio);
751e0d55 2564 folio_batch_release(&wbc->fbatch);
cdc150b5
CH
2565 return NULL;
2566}
2567
2568/**
2569 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
2570 * @mapping: address space structure to write
2571 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2572 * @writepage: function called for each page
2573 * @data: data passed to writepage function
2574 *
2575 * Return: %0 on success, negative error code otherwise
2576 *
2577 * Note: please use writeback_iter() instead.
2578 */
2579int write_cache_pages(struct address_space *mapping,
2580 struct writeback_control *wbc, writepage_t writepage,
2581 void *data)
2582{
2583 struct folio *folio = NULL;
2584 int error;
2585
2586 while ((folio = writeback_iter(mapping, wbc, folio, &error))) {
2587 error = writepage(folio, wbc, data);
2588 if (error == AOP_WRITEPAGE_ACTIVATE) {
2589 folio_unlock(folio);
2590 error = 0;
2591 }
2592 }
2593
f946e0d2 2594 return error;
811d736f 2595}
0ea97180
MS
2596EXPORT_SYMBOL(write_cache_pages);
2597
c44ed5b7
MWO
2598static int writeback_use_writepage(struct address_space *mapping,
2599 struct writeback_control *wbc)
0ea97180 2600{
c44ed5b7
MWO
2601 struct folio *folio = NULL;
2602 struct blk_plug plug;
2603 int err;
6768907e 2604
c44ed5b7
MWO
2605 blk_start_plug(&plug);
2606 while ((folio = writeback_iter(mapping, wbc, folio, &err))) {
2607 err = mapping->a_ops->writepage(&folio->page, wbc);
2608 if (err == AOP_WRITEPAGE_ACTIVATE) {
2609 folio_unlock(folio);
2610 err = 0;
2611 }
2612 mapping_set_error(mapping, err);
2613 }
2614 blk_finish_plug(&plug);
2615
2616 return err;
0ea97180
MS
2617}
2618
1da177e4
LT
2619int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
2620{
22905f77 2621 int ret;
fee468fd 2622 struct bdi_writeback *wb;
22905f77 2623
1da177e4
LT
2624 if (wbc->nr_to_write <= 0)
2625 return 0;
fee468fd
JK
2626 wb = inode_to_wb_wbc(mapping->host, wbc);
2627 wb_bandwidth_estimate_start(wb);
80a2ea9f 2628 while (1) {
c2ca7a59 2629 if (mapping->a_ops->writepages) {
80a2ea9f 2630 ret = mapping->a_ops->writepages(mapping, wbc);
c2ca7a59 2631 } else if (mapping->a_ops->writepage) {
c44ed5b7 2632 ret = writeback_use_writepage(mapping, wbc);
c2ca7a59
CH
2633 } else {
2634 /* deal with chardevs and other special files */
2635 ret = 0;
2636 }
2637 if (ret != -ENOMEM || wbc->sync_mode != WB_SYNC_ALL)
80a2ea9f 2638 break;
8d58802f
MG
2639
2640 /*
2641 * Lacking an allocation context or the locality or writeback
2642 * state of any of the inode's pages, throttle based on
2643 * writeback activity on the local node. It's as good a
2644 * guess as any.
2645 */
2646 reclaim_throttle(NODE_DATA(numa_node_id()),
c3f4a9a2 2647 VMSCAN_THROTTLE_WRITEBACK);
80a2ea9f 2648 }
45a2966f
JK
2649 /*
2650 * Usually few pages are written by now from those we've just submitted
2651 * but if there's constant writeback being submitted, this makes sure
2652 * writeback bandwidth is updated once in a while.
2653 */
20792ebf
JK
2654 if (time_is_before_jiffies(READ_ONCE(wb->bw_time_stamp) +
2655 BANDWIDTH_INTERVAL))
45a2966f 2656 wb_update_bandwidth(wb);
22905f77 2657 return ret;
1da177e4
LT
2658}
2659
76719325
KC
2660/*
2661 * For address_spaces which do not use buffers nor write back.
2662 */
46de8b97 2663bool noop_dirty_folio(struct address_space *mapping, struct folio *folio)
76719325 2664{
46de8b97
MWO
2665 if (!folio_test_dirty(folio))
2666 return !folio_test_set_dirty(folio);
2667 return false;
76719325 2668}
46de8b97 2669EXPORT_SYMBOL(noop_dirty_folio);
76719325 2670
e3a7cca1
ES
2671/*
2672 * Helper function for set_page_dirty family.
c4843a75 2673 *
6c77b607 2674 * Caller must hold folio_memcg_lock().
c4843a75 2675 *
e3a7cca1
ES
2676 * NOTE: This relies on being atomic wrt interrupts.
2677 */
203a3151 2678static void folio_account_dirtied(struct folio *folio,
6e1cae88 2679 struct address_space *mapping)
e3a7cca1 2680{
52ebea74
TH
2681 struct inode *inode = mapping->host;
2682
b9b0ff61 2683 trace_writeback_dirty_folio(folio, mapping);
9fb0a7da 2684
f56753ac 2685 if (mapping_can_writeback(mapping)) {
52ebea74 2686 struct bdi_writeback *wb;
203a3151 2687 long nr = folio_nr_pages(folio);
de1414a6 2688
9cfb816b 2689 inode_attach_wb(inode, folio);
52ebea74 2690 wb = inode_to_wb(inode);
de1414a6 2691
203a3151
MWO
2692 __lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
2693 __zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
2694 __node_stat_mod_folio(folio, NR_DIRTIED, nr);
2695 wb_stat_mod(wb, WB_RECLAIMABLE, nr);
2696 wb_stat_mod(wb, WB_DIRTIED, nr);
2697 task_io_account_write(nr * PAGE_SIZE);
2698 current->nr_dirtied += nr;
2699 __this_cpu_add(bdp_ratelimits, nr);
97b27821 2700
203a3151 2701 mem_cgroup_track_foreign_dirty(folio, wb);
e3a7cca1
ES
2702 }
2703}
2704
b9ea2515
KK
2705/*
2706 * Helper function for deaccounting dirty page without writeback.
2707 *
6c77b607 2708 * Caller must hold folio_memcg_lock().
b9ea2515 2709 */
566d3362 2710void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
b9ea2515 2711{
566d3362
HD
2712 long nr = folio_nr_pages(folio);
2713
2714 lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
2715 zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2716 wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
2717 task_io_account_cancelled_write(nr * PAGE_SIZE);
b9ea2515 2718}
b9ea2515 2719
6e1cae88 2720/*
203a3151
MWO
2721 * Mark the folio dirty, and set it dirty in the page cache, and mark
2722 * the inode dirty.
6e1cae88 2723 *
203a3151 2724 * If warn is true, then emit a warning if the folio is not uptodate and has
6e1cae88
MWO
2725 * not been truncated.
2726 *
3d84d897
MWO
2727 * The caller must hold folio_memcg_lock(). It is the caller's
2728 * responsibility to prevent the folio from being truncated while
2729 * this function is in progress, although it may have been truncated
2730 * before this function is called. Most callers have the folio locked.
2731 * A few have the folio blocked from truncation through other means (e.g.
2732 * zap_vma_pages() has it mapped and is holding the page table lock).
2733 * When called from mark_buffer_dirty(), the filesystem should hold a
2734 * reference to the buffer_head that is being marked dirty, which causes
2735 * try_to_free_buffers() to fail.
6e1cae88 2736 */
203a3151 2737void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
6e1cae88
MWO
2738 int warn)
2739{
2740 unsigned long flags;
2741
2742 xa_lock_irqsave(&mapping->i_pages, flags);
203a3151
MWO
2743 if (folio->mapping) { /* Race with truncate? */
2744 WARN_ON_ONCE(warn && !folio_test_uptodate(folio));
2745 folio_account_dirtied(folio, mapping);
2746 __xa_set_mark(&mapping->i_pages, folio_index(folio),
6e1cae88
MWO
2747 PAGECACHE_TAG_DIRTY);
2748 }
2749 xa_unlock_irqrestore(&mapping->i_pages, flags);
2750}
2751
85d4d2eb
MWO
2752/**
2753 * filemap_dirty_folio - Mark a folio dirty for filesystems which do not use buffer_heads.
2754 * @mapping: Address space this folio belongs to.
2755 * @folio: Folio to be marked as dirty.
1da177e4 2756 *
85d4d2eb 2757 * Filesystems which do not use buffer heads should call this function
ab428b4c 2758 * from their dirty_folio address space operation. It ignores the
85d4d2eb
MWO
2759 * contents of folio_get_private(), so if the filesystem marks individual
2760 * blocks as dirty, the filesystem should handle that itself.
1da177e4 2761 *
85d4d2eb
MWO
2762 * This is also sometimes used by filesystems which use buffer_heads when
2763 * a single buffer is being dirtied: we want to set the folio dirty in
2764 * that case, but not all the buffers. This is a "bottom-up" dirtying,
e621900a 2765 * whereas block_dirty_folio() is a "top-down" dirtying.
85d4d2eb
MWO
2766 *
2767 * The caller must ensure this doesn't race with truncation. Most will
2768 * simply hold the folio lock, but e.g. zap_pte_range() calls with the
2769 * folio mapped and the pte lock held, which also locks out truncation.
1da177e4 2770 */
85d4d2eb 2771bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio)
1da177e4 2772{
85d4d2eb
MWO
2773 folio_memcg_lock(folio);
2774 if (folio_test_set_dirty(folio)) {
2775 folio_memcg_unlock(folio);
2776 return false;
2777 }
1da177e4 2778
85d4d2eb
MWO
2779 __folio_mark_dirty(folio, mapping, !folio_test_private(folio));
2780 folio_memcg_unlock(folio);
c4843a75 2781
85d4d2eb
MWO
2782 if (mapping->host) {
2783 /* !PageAnon && !swapper_space */
2784 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1da177e4 2785 }
85d4d2eb 2786 return true;
1da177e4 2787}
85d4d2eb 2788EXPORT_SYMBOL(filemap_dirty_folio);
1da177e4 2789
cd78ab11
MWO
2790/**
2791 * folio_redirty_for_writepage - Decline to write a dirty folio.
2792 * @wbc: The writeback control.
2793 * @folio: The folio.
2794 *
2795 * When a writepage implementation decides that it doesn't want to write
2796 * @folio for some reason, it should call this function, unlock @folio and
2797 * return 0.
2798 *
2799 * Return: True if we redirtied the folio. False if someone else dirtied
2800 * it first.
1da177e4 2801 */
cd78ab11
MWO
2802bool folio_redirty_for_writepage(struct writeback_control *wbc,
2803 struct folio *folio)
1da177e4 2804{
ed2da924 2805 struct address_space *mapping = folio->mapping;
cd78ab11 2806 long nr = folio_nr_pages(folio);
ed2da924 2807 bool ret;
cd78ab11
MWO
2808
2809 wbc->pages_skipped += nr;
ed2da924
CH
2810 ret = filemap_dirty_folio(mapping, folio);
2811 if (mapping && mapping_can_writeback(mapping)) {
2812 struct inode *inode = mapping->host;
2813 struct bdi_writeback *wb;
2814 struct wb_lock_cookie cookie = {};
8d38633c 2815
ed2da924
CH
2816 wb = unlocked_inode_to_wb_begin(inode, &cookie);
2817 current->nr_dirtied -= nr;
2818 node_stat_mod_folio(folio, NR_DIRTIED, -nr);
2819 wb_stat_mod(wb, WB_DIRTIED, -nr);
2820 unlocked_inode_to_wb_end(inode, &cookie);
2821 }
8d38633c 2822 return ret;
1da177e4 2823}
cd78ab11 2824EXPORT_SYMBOL(folio_redirty_for_writepage);
1da177e4 2825
b5e84594
MWO
2826/**
2827 * folio_mark_dirty - Mark a folio as being modified.
2828 * @folio: The folio.
6746aff7 2829 *
2ca456c2
MWO
2830 * The folio may not be truncated while this function is running.
2831 * Holding the folio lock is sufficient to prevent truncation, but some
2832 * callers cannot acquire a sleeping lock. These callers instead hold
2833 * the page table lock for a page table which contains at least one page
2834 * in this folio. Truncation will block on the page table lock as it
2835 * unmaps pages before removing the folio from its mapping.
b5e84594
MWO
2836 *
2837 * Return: True if the folio was newly dirtied, false if it was already dirty.
1da177e4 2838 */
b5e84594 2839bool folio_mark_dirty(struct folio *folio)
1da177e4 2840{
b5e84594 2841 struct address_space *mapping = folio_mapping(folio);
1da177e4
LT
2842
2843 if (likely(mapping)) {
278df9f4 2844 /*
5a9e3474 2845 * readahead/folio_deactivate could remain
6f31a5a2
MWO
2846 * PG_readahead/PG_reclaim due to race with folio_end_writeback
2847 * About readahead, if the folio is written, the flags would be
278df9f4 2848 * reset. So no problem.
5a9e3474 2849 * About folio_deactivate, if the folio is redirtied,
6f31a5a2
MWO
2850 * the flag will be reset. So no problem. but if the
2851 * folio is used by readahead it will confuse readahead
2852 * and make it restart the size rampup process. But it's
2853 * a trivial problem.
278df9f4 2854 */
b5e84594
MWO
2855 if (folio_test_reclaim(folio))
2856 folio_clear_reclaim(folio);
3a3bae50 2857 return mapping->a_ops->dirty_folio(mapping, folio);
4741c9fd 2858 }
3a3bae50
MWO
2859
2860 return noop_dirty_folio(mapping, folio);
1da177e4 2861}
b5e84594 2862EXPORT_SYMBOL(folio_mark_dirty);
1da177e4
LT
2863
2864/*
2865 * set_page_dirty() is racy if the caller has no reference against
2866 * page->mapping->host, and if the page is unlocked. This is because another
2867 * CPU could truncate the page off the mapping and then free the mapping.
2868 *
2869 * Usually, the page _is_ locked, or the caller is a user-space process which
2870 * holds a reference on the inode by having an open file.
2871 *
2872 * In other cases, the page should be locked before running set_page_dirty().
2873 */
2874int set_page_dirty_lock(struct page *page)
2875{
2876 int ret;
2877
7eaceacc 2878 lock_page(page);
1da177e4
LT
2879 ret = set_page_dirty(page);
2880 unlock_page(page);
2881 return ret;
2882}
2883EXPORT_SYMBOL(set_page_dirty_lock);
2884
11f81bec
TH
2885/*
2886 * This cancels just the dirty bit on the kernel page itself, it does NOT
2887 * actually remove dirty bits on any mmap's that may be around. It also
2888 * leaves the page tagged dirty, so any sync activity will still find it on
2889 * the dirty lists, and in particular, clear_page_dirty_for_io() will still
2890 * look at the dirty bits in the VM.
2891 *
2892 * Doing this should *normally* only ever be done when a page is truncated,
2893 * and is not actually mapped anywhere at all. However, fs/buffer.c does
2894 * this when it notices that somebody has cleaned out all the buffers on a
2895 * page without actually doing it through the VM. Can you say "ext3 is
2896 * horribly ugly"? Thought you could.
2897 */
fdaf532a 2898void __folio_cancel_dirty(struct folio *folio)
11f81bec 2899{
fdaf532a 2900 struct address_space *mapping = folio_mapping(folio);
c4843a75 2901
f56753ac 2902 if (mapping_can_writeback(mapping)) {
682aa8e1
TH
2903 struct inode *inode = mapping->host;
2904 struct bdi_writeback *wb;
2e898e4c 2905 struct wb_lock_cookie cookie = {};
c4843a75 2906
fdaf532a 2907 folio_memcg_lock(folio);
2e898e4c 2908 wb = unlocked_inode_to_wb_begin(inode, &cookie);
c4843a75 2909
fdaf532a 2910 if (folio_test_clear_dirty(folio))
566d3362 2911 folio_account_cleaned(folio, wb);
c4843a75 2912
2e898e4c 2913 unlocked_inode_to_wb_end(inode, &cookie);
fdaf532a 2914 folio_memcg_unlock(folio);
c4843a75 2915 } else {
fdaf532a 2916 folio_clear_dirty(folio);
c4843a75 2917 }
11f81bec 2918}
fdaf532a 2919EXPORT_SYMBOL(__folio_cancel_dirty);
11f81bec 2920
1da177e4 2921/*
9350f20a
MWO
2922 * Clear a folio's dirty flag, while caring for dirty memory accounting.
2923 * Returns true if the folio was previously dirty.
1da177e4 2924 *
9350f20a
MWO
2925 * This is for preparing to put the folio under writeout. We leave
2926 * the folio tagged as dirty in the xarray so that a concurrent
2927 * write-for-sync can discover it via a PAGECACHE_TAG_DIRTY walk.
2928 * The ->writepage implementation will run either folio_start_writeback()
2929 * or folio_mark_dirty(), at which stage we bring the folio's dirty flag
2930 * and xarray dirty tag back into sync.
1da177e4 2931 *
9350f20a
MWO
2932 * This incoherency between the folio's dirty flag and xarray tag is
2933 * unfortunate, but it only exists while the folio is locked.
1da177e4 2934 */
9350f20a 2935bool folio_clear_dirty_for_io(struct folio *folio)
1da177e4 2936{
9350f20a
MWO
2937 struct address_space *mapping = folio_mapping(folio);
2938 bool ret = false;
1da177e4 2939
9350f20a 2940 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
79352894 2941
f56753ac 2942 if (mapping && mapping_can_writeback(mapping)) {
682aa8e1
TH
2943 struct inode *inode = mapping->host;
2944 struct bdi_writeback *wb;
2e898e4c 2945 struct wb_lock_cookie cookie = {};
682aa8e1 2946
7658cc28
LT
2947 /*
2948 * Yes, Virginia, this is indeed insane.
2949 *
2950 * We use this sequence to make sure that
2951 * (a) we account for dirty stats properly
2952 * (b) we tell the low-level filesystem to
9350f20a 2953 * mark the whole folio dirty if it was
7658cc28 2954 * dirty in a pagetable. Only to then
9350f20a 2955 * (c) clean the folio again and return 1 to
7658cc28
LT
2956 * cause the writeback.
2957 *
2958 * This way we avoid all nasty races with the
2959 * dirty bit in multiple places and clearing
2960 * them concurrently from different threads.
2961 *
9350f20a 2962 * Note! Normally the "folio_mark_dirty(folio)"
7658cc28
LT
2963 * has no effect on the actual dirty bit - since
2964 * that will already usually be set. But we
2965 * need the side effects, and it can help us
2966 * avoid races.
2967 *
9350f20a 2968 * We basically use the folio "master dirty bit"
7658cc28
LT
2969 * as a serialization point for all the different
2970 * threads doing their things.
7658cc28 2971 */
9350f20a
MWO
2972 if (folio_mkclean(folio))
2973 folio_mark_dirty(folio);
79352894
NP
2974 /*
2975 * We carefully synchronise fault handlers against
9350f20a 2976 * installing a dirty pte and marking the folio dirty
2d6d7f98 2977 * at this point. We do this by having them hold the
9350f20a 2978 * page lock while dirtying the folio, and folios are
2d6d7f98
JW
2979 * always locked coming in here, so we get the desired
2980 * exclusion.
79352894 2981 */
2e898e4c 2982 wb = unlocked_inode_to_wb_begin(inode, &cookie);
9350f20a
MWO
2983 if (folio_test_clear_dirty(folio)) {
2984 long nr = folio_nr_pages(folio);
2985 lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
2986 zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
2987 wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
2988 ret = true;
1da177e4 2989 }
2e898e4c 2990 unlocked_inode_to_wb_end(inode, &cookie);
c4843a75 2991 return ret;
1da177e4 2992 }
9350f20a 2993 return folio_test_clear_dirty(folio);
1da177e4 2994}
9350f20a 2995EXPORT_SYMBOL(folio_clear_dirty_for_io);
1da177e4 2996
633a2abb
JK
2997static void wb_inode_writeback_start(struct bdi_writeback *wb)
2998{
2999 atomic_inc(&wb->writeback_inodes);
3000}
3001
3002static void wb_inode_writeback_end(struct bdi_writeback *wb)
3003{
f87904c0 3004 unsigned long flags;
633a2abb 3005 atomic_dec(&wb->writeback_inodes);
45a2966f
JK
3006 /*
3007 * Make sure estimate of writeback throughput gets updated after
3008 * writeback completed. We delay the update by BANDWIDTH_INTERVAL
3009 * (which is the interval other bandwidth updates use for batching) so
3010 * that if multiple inodes end writeback at a similar time, they get
3011 * batched into one bandwidth update.
3012 */
f87904c0
KK
3013 spin_lock_irqsave(&wb->work_lock, flags);
3014 if (test_bit(WB_registered, &wb->state))
3015 queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL);
3016 spin_unlock_irqrestore(&wb->work_lock, flags);
633a2abb
JK
3017}
3018
2580d554 3019bool __folio_end_writeback(struct folio *folio)
1da177e4 3020{
269ccca3
MWO
3021 long nr = folio_nr_pages(folio);
3022 struct address_space *mapping = folio_mapping(folio);
2580d554 3023 bool ret;
1da177e4 3024
269ccca3 3025 folio_memcg_lock(folio);
371a096e 3026 if (mapping && mapping_use_writeback_tags(mapping)) {
91018134
TH
3027 struct inode *inode = mapping->host;
3028 struct backing_dev_info *bdi = inode_to_bdi(inode);
1da177e4
LT
3029 unsigned long flags;
3030
b93b0163 3031 xa_lock_irqsave(&mapping->i_pages, flags);
2580d554 3032 ret = folio_xor_flags_has_waiters(folio, 1 << PG_writeback);
7d0795d0
MWO
3033 __xa_clear_mark(&mapping->i_pages, folio_index(folio),
3034 PAGECACHE_TAG_WRITEBACK);
3035 if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
3036 struct bdi_writeback *wb = inode_to_wb(inode);
3037
3038 wb_stat_mod(wb, WB_WRITEBACK, -nr);
3039 __wb_writeout_add(wb, nr);
3040 if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
3041 wb_inode_writeback_end(wb);
69cb51d1 3042 }
6c60d2b5
DC
3043
3044 if (mapping->host && !mapping_tagged(mapping,
3045 PAGECACHE_TAG_WRITEBACK))
3046 sb_clear_inode_writeback(mapping->host);
3047
b93b0163 3048 xa_unlock_irqrestore(&mapping->i_pages, flags);
1da177e4 3049 } else {
2580d554 3050 ret = folio_xor_flags_has_waiters(folio, 1 << PG_writeback);
99b12e3d 3051 }
7d0795d0
MWO
3052
3053 lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
3054 zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
3055 node_stat_mod_folio(folio, NR_WRITTEN, nr);
269ccca3 3056 folio_memcg_unlock(folio);
2580d554
MWO
3057
3058 return ret;
1da177e4
LT
3059}
3060
b5612c36 3061void __folio_start_writeback(struct folio *folio, bool keep_write)
1da177e4 3062{
f143f1ea
MWO
3063 long nr = folio_nr_pages(folio);
3064 struct address_space *mapping = folio_mapping(folio);
f143f1ea 3065 int access_ret;
1da177e4 3066
b5612c36
MWO
3067 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
3068
f143f1ea 3069 folio_memcg_lock(folio);
371a096e 3070 if (mapping && mapping_use_writeback_tags(mapping)) {
f143f1ea 3071 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
91018134
TH
3072 struct inode *inode = mapping->host;
3073 struct backing_dev_info *bdi = inode_to_bdi(inode);
1da177e4 3074 unsigned long flags;
b5612c36 3075 bool on_wblist;
1da177e4 3076
ff9c745b
MW
3077 xas_lock_irqsave(&xas, flags);
3078 xas_load(&xas);
b5612c36 3079 folio_test_set_writeback(folio);
6c60d2b5 3080
b5612c36 3081 on_wblist = mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
6c60d2b5 3082
b5612c36
MWO
3083 xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
3084 if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
3085 struct bdi_writeback *wb = inode_to_wb(inode);
6c60d2b5 3086
b5612c36
MWO
3087 wb_stat_mod(wb, WB_WRITEBACK, nr);
3088 if (!on_wblist)
3089 wb_inode_writeback_start(wb);
69cb51d1 3090 }
b5612c36
MWO
3091
3092 /*
3093 * We can come through here when swapping anonymous
3094 * folios, so we don't necessarily have an inode to
3095 * track for sync.
3096 */
3097 if (mapping->host && !on_wblist)
3098 sb_mark_inode_writeback(mapping->host);
f143f1ea 3099 if (!folio_test_dirty(folio))
ff9c745b 3100 xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
1c8349a1 3101 if (!keep_write)
ff9c745b
MW
3102 xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
3103 xas_unlock_irqrestore(&xas, flags);
1da177e4 3104 } else {
b5612c36 3105 folio_test_set_writeback(folio);
3a3c02ec 3106 }
b5612c36
MWO
3107
3108 lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
3109 zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
f143f1ea 3110 folio_memcg_unlock(folio);
b5612c36 3111
f143f1ea 3112 access_ret = arch_make_folio_accessible(folio);
f28d4363
CI
3113 /*
3114 * If writeback has been triggered on a page that cannot be made
3115 * accessible, it is too late to recover here.
3116 */
f143f1ea 3117 VM_BUG_ON_FOLIO(access_ret != 0, folio);
1da177e4 3118}
f143f1ea 3119EXPORT_SYMBOL(__folio_start_writeback);
1da177e4 3120
490e016f
MWO
3121/**
3122 * folio_wait_writeback - Wait for a folio to finish writeback.
3123 * @folio: The folio to wait for.
3124 *
3125 * If the folio is currently being written back to storage, wait for the
3126 * I/O to complete.
3127 *
3128 * Context: Sleeps. Must be called in process context and with
3129 * no spinlocks held. Caller should hold a reference on the folio.
3130 * If the folio is not locked, writeback may start again after writeback
3131 * has finished.
19343b5b 3132 */
490e016f 3133void folio_wait_writeback(struct folio *folio)
19343b5b 3134{
490e016f 3135 while (folio_test_writeback(folio)) {
b9b0ff61 3136 trace_folio_wait_writeback(folio, folio_mapping(folio));
101c0bf6 3137 folio_wait_bit(folio, PG_writeback);
19343b5b
YS
3138 }
3139}
490e016f 3140EXPORT_SYMBOL_GPL(folio_wait_writeback);
19343b5b 3141
490e016f
MWO
3142/**
3143 * folio_wait_writeback_killable - Wait for a folio to finish writeback.
3144 * @folio: The folio to wait for.
3145 *
3146 * If the folio is currently being written back to storage, wait for the
3147 * I/O to complete or a fatal signal to arrive.
3148 *
3149 * Context: Sleeps. Must be called in process context and with
3150 * no spinlocks held. Caller should hold a reference on the folio.
3151 * If the folio is not locked, writeback may start again after writeback
3152 * has finished.
3153 * Return: 0 on success, -EINTR if we get a fatal signal while waiting.
e5dbd332 3154 */
490e016f 3155int folio_wait_writeback_killable(struct folio *folio)
e5dbd332 3156{
490e016f 3157 while (folio_test_writeback(folio)) {
b9b0ff61 3158 trace_folio_wait_writeback(folio, folio_mapping(folio));
101c0bf6 3159 if (folio_wait_bit_killable(folio, PG_writeback))
e5dbd332
MWO
3160 return -EINTR;
3161 }
3162
3163 return 0;
3164}
490e016f 3165EXPORT_SYMBOL_GPL(folio_wait_writeback_killable);
e5dbd332 3166
1d1d1a76 3167/**
a49d0c50
MWO
3168 * folio_wait_stable() - wait for writeback to finish, if necessary.
3169 * @folio: The folio to wait on.
1d1d1a76 3170 *
a49d0c50
MWO
3171 * This function determines if the given folio is related to a backing
3172 * device that requires folio contents to be held stable during writeback.
3173 * If so, then it will wait for any pending writeback to complete.
3174 *
3175 * Context: Sleeps. Must be called in process context and with
3176 * no spinlocks held. Caller should hold a reference on the folio.
3177 * If the folio is not locked, writeback may start again after writeback
3178 * has finished.
1d1d1a76 3179 */
a49d0c50 3180void folio_wait_stable(struct folio *folio)
1d1d1a76 3181{
762321da 3182 if (mapping_stable_writes(folio_mapping(folio)))
a49d0c50 3183 folio_wait_writeback(folio);
1d1d1a76 3184}
a49d0c50 3185EXPORT_SYMBOL_GPL(folio_wait_stable);