Merge tag 'mac80211-for-davem-2015-08-14' of git://git.kernel.org/pub/scm/linux/kerne...
[linux-2.6-block.git] / mm / page-writeback.c
CommitLineData
1da177e4 1/*
f30c2269 2 * mm/page-writeback.c
1da177e4
LT
3 *
4 * Copyright (C) 2002, Linus Torvalds.
04fbfdc1 5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
1da177e4
LT
6 *
7 * Contains functions related to writing back dirty pages at the
8 * address_space level.
9 *
e1f8e874 10 * 10Apr2002 Andrew Morton
1da177e4
LT
11 * Initial version
12 */
13
14#include <linux/kernel.h>
b95f1b31 15#include <linux/export.h>
1da177e4
LT
16#include <linux/spinlock.h>
17#include <linux/fs.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/slab.h>
21#include <linux/pagemap.h>
22#include <linux/writeback.h>
23#include <linux/init.h>
24#include <linux/backing-dev.h>
55e829af 25#include <linux/task_io_accounting_ops.h>
1da177e4
LT
26#include <linux/blkdev.h>
27#include <linux/mpage.h>
d08b3851 28#include <linux/rmap.h>
1da177e4
LT
29#include <linux/percpu.h>
30#include <linux/notifier.h>
31#include <linux/smp.h>
32#include <linux/sysctl.h>
33#include <linux/cpu.h>
34#include <linux/syscalls.h>
ff01bb48 35#include <linux/buffer_head.h> /* __set_page_dirty_buffers */
811d736f 36#include <linux/pagevec.h>
eb608e3a 37#include <linux/timer.h>
8bd75c77 38#include <linux/sched/rt.h>
6e543d57 39#include <linux/mm_inline.h>
028c2dd1 40#include <trace/events/writeback.h>
1da177e4 41
6e543d57
LD
42#include "internal.h"
43
ffd1f609
WF
44/*
45 * Sleep at most 200ms at a time in balance_dirty_pages().
46 */
47#define MAX_PAUSE max(HZ/5, 1)
48
5b9b3574
WF
49/*
50 * Try to keep balance_dirty_pages() call intervals higher than this many pages
51 * by raising pause time to max_pause when falls below it.
52 */
53#define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10))
54
e98be2d5
WF
55/*
56 * Estimate write bandwidth at 200ms intervals.
57 */
58#define BANDWIDTH_INTERVAL max(HZ/5, 1)
59
6c14ae1e
WF
60#define RATELIMIT_CALC_SHIFT 10
61
1da177e4
LT
62/*
63 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
64 * will look to see if it needs to force writeback or throttling.
65 */
66static long ratelimit_pages = 32;
67
1da177e4
LT
68/* The following parameters are exported via /proc/sys/vm */
69
70/*
5b0830cb 71 * Start background writeback (via writeback threads) at this percentage
1da177e4 72 */
1b5e62b4 73int dirty_background_ratio = 10;
1da177e4 74
2da02997
DR
75/*
76 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
77 * dirty_background_ratio * the amount of dirtyable memory
78 */
79unsigned long dirty_background_bytes;
80
195cf453
BG
81/*
82 * free highmem will not be subtracted from the total free memory
83 * for calculating free ratios if vm_highmem_is_dirtyable is true
84 */
85int vm_highmem_is_dirtyable;
86
1da177e4
LT
87/*
88 * The generator of dirty data starts writeback at this percentage
89 */
1b5e62b4 90int vm_dirty_ratio = 20;
1da177e4 91
2da02997
DR
92/*
93 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
94 * vm_dirty_ratio * the amount of dirtyable memory
95 */
96unsigned long vm_dirty_bytes;
97
1da177e4 98/*
704503d8 99 * The interval between `kupdate'-style writebacks
1da177e4 100 */
22ef37ee 101unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
1da177e4 102
91913a29
AB
103EXPORT_SYMBOL_GPL(dirty_writeback_interval);
104
1da177e4 105/*
704503d8 106 * The longest time for which data is allowed to remain dirty
1da177e4 107 */
22ef37ee 108unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
1da177e4
LT
109
110/*
111 * Flag that makes the machine dump writes/reads and block dirtyings.
112 */
113int block_dump;
114
115/*
ed5b43f1
BS
116 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
117 * a full sync is triggered after this time elapses without any disk activity.
1da177e4
LT
118 */
119int laptop_mode;
120
121EXPORT_SYMBOL(laptop_mode);
122
123/* End of sysctl-exported parameters */
124
dcc25ae7 125struct wb_domain global_wb_domain;
1da177e4 126
2bc00aef
TH
127/* consolidated parameters for balance_dirty_pages() and its subroutines */
128struct dirty_throttle_control {
e9f07dfd
TH
129#ifdef CONFIG_CGROUP_WRITEBACK
130 struct wb_domain *dom;
9fc3a43e 131 struct dirty_throttle_control *gdtc; /* only set in memcg dtc's */
e9f07dfd 132#endif
2bc00aef 133 struct bdi_writeback *wb;
e9770b34 134 struct fprop_local_percpu *wb_completions;
eb608e3a 135
9fc3a43e 136 unsigned long avail; /* dirtyable */
2bc00aef
TH
137 unsigned long dirty; /* file_dirty + write + nfs */
138 unsigned long thresh; /* dirty threshold */
139 unsigned long bg_thresh; /* dirty background threshold */
140
141 unsigned long wb_dirty; /* per-wb counterparts */
142 unsigned long wb_thresh;
970fb01a 143 unsigned long wb_bg_thresh;
daddfa3c
TH
144
145 unsigned long pos_ratio;
2bc00aef
TH
146};
147
e9f07dfd 148#define DTC_INIT_COMMON(__wb) .wb = (__wb), \
e9770b34 149 .wb_completions = &(__wb)->completions
eb608e3a
JK
150
151/*
152 * Length of period for aging writeout fractions of bdis. This is an
153 * arbitrarily chosen number. The longer the period, the slower fractions will
154 * reflect changes in current writeout rate.
155 */
156#define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
04fbfdc1 157
693108a8
TH
158#ifdef CONFIG_CGROUP_WRITEBACK
159
e9f07dfd
TH
160#define GDTC_INIT(__wb) .dom = &global_wb_domain, \
161 DTC_INIT_COMMON(__wb)
9fc3a43e 162#define GDTC_INIT_NO_WB .dom = &global_wb_domain
c2aa723a
TH
163#define MDTC_INIT(__wb, __gdtc) .dom = mem_cgroup_wb_domain(__wb), \
164 .gdtc = __gdtc, \
165 DTC_INIT_COMMON(__wb)
166
167static bool mdtc_valid(struct dirty_throttle_control *dtc)
168{
169 return dtc->dom;
170}
e9f07dfd
TH
171
172static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
173{
174 return dtc->dom;
175}
176
9fc3a43e
TH
177static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
178{
179 return mdtc->gdtc;
180}
181
841710aa
TH
182static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
183{
184 return &wb->memcg_completions;
185}
186
693108a8
TH
187static void wb_min_max_ratio(struct bdi_writeback *wb,
188 unsigned long *minp, unsigned long *maxp)
189{
190 unsigned long this_bw = wb->avg_write_bandwidth;
191 unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
192 unsigned long long min = wb->bdi->min_ratio;
193 unsigned long long max = wb->bdi->max_ratio;
194
195 /*
196 * @wb may already be clean by the time control reaches here and
197 * the total may not include its bw.
198 */
199 if (this_bw < tot_bw) {
200 if (min) {
201 min *= this_bw;
202 do_div(min, tot_bw);
203 }
204 if (max < 100) {
205 max *= this_bw;
206 do_div(max, tot_bw);
207 }
208 }
209
210 *minp = min;
211 *maxp = max;
212}
213
214#else /* CONFIG_CGROUP_WRITEBACK */
215
e9f07dfd 216#define GDTC_INIT(__wb) DTC_INIT_COMMON(__wb)
9fc3a43e 217#define GDTC_INIT_NO_WB
c2aa723a
TH
218#define MDTC_INIT(__wb, __gdtc)
219
220static bool mdtc_valid(struct dirty_throttle_control *dtc)
221{
222 return false;
223}
e9f07dfd
TH
224
225static struct wb_domain *dtc_dom(struct dirty_throttle_control *dtc)
226{
227 return &global_wb_domain;
228}
229
9fc3a43e
TH
230static struct dirty_throttle_control *mdtc_gdtc(struct dirty_throttle_control *mdtc)
231{
232 return NULL;
233}
234
841710aa
TH
235static struct fprop_local_percpu *wb_memcg_completions(struct bdi_writeback *wb)
236{
237 return NULL;
238}
239
693108a8
TH
240static void wb_min_max_ratio(struct bdi_writeback *wb,
241 unsigned long *minp, unsigned long *maxp)
242{
243 *minp = wb->bdi->min_ratio;
244 *maxp = wb->bdi->max_ratio;
245}
246
247#endif /* CONFIG_CGROUP_WRITEBACK */
248
a756cf59
JW
249/*
250 * In a memory zone, there is a certain amount of pages we consider
251 * available for the page cache, which is essentially the number of
252 * free and reclaimable pages, minus some zone reserves to protect
253 * lowmem and the ability to uphold the zone's watermarks without
254 * requiring writeback.
255 *
256 * This number of dirtyable pages is the base value of which the
257 * user-configurable dirty ratio is the effictive number of pages that
258 * are allowed to be actually dirtied. Per individual zone, or
259 * globally by using the sum of dirtyable pages over all zones.
260 *
261 * Because the user is allowed to specify the dirty limit globally as
262 * absolute number of bytes, calculating the per-zone dirty limit can
263 * require translating the configured limit into a percentage of
264 * global dirtyable memory first.
265 */
266
a804552b
JW
267/**
268 * zone_dirtyable_memory - number of dirtyable pages in a zone
269 * @zone: the zone
270 *
271 * Returns the zone's number of pages potentially available for dirty
272 * page cache. This is the base value for the per-zone dirty limits.
273 */
274static unsigned long zone_dirtyable_memory(struct zone *zone)
275{
276 unsigned long nr_pages;
277
278 nr_pages = zone_page_state(zone, NR_FREE_PAGES);
279 nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
280
a1c3bfb2
JW
281 nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
282 nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
a804552b
JW
283
284 return nr_pages;
285}
286
1edf2234
JW
287static unsigned long highmem_dirtyable_memory(unsigned long total)
288{
289#ifdef CONFIG_HIGHMEM
290 int node;
291 unsigned long x = 0;
292
293 for_each_node_state(node, N_HIGH_MEMORY) {
a804552b 294 struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
1edf2234 295
a804552b 296 x += zone_dirtyable_memory(z);
1edf2234 297 }
c8b74c2f
SR
298 /*
299 * Unreclaimable memory (kernel memory or anonymous memory
300 * without swap) can bring down the dirtyable pages below
301 * the zone's dirty balance reserve and the above calculation
302 * will underflow. However we still want to add in nodes
303 * which are below threshold (negative values) to get a more
304 * accurate calculation but make sure that the total never
305 * underflows.
306 */
307 if ((long)x < 0)
308 x = 0;
309
1edf2234
JW
310 /*
311 * Make sure that the number of highmem pages is never larger
312 * than the number of the total dirtyable memory. This can only
313 * occur in very strange VM situations but we want to make sure
314 * that this does not occur.
315 */
316 return min(x, total);
317#else
318 return 0;
319#endif
320}
321
322/**
ccafa287 323 * global_dirtyable_memory - number of globally dirtyable pages
1edf2234 324 *
ccafa287
JW
325 * Returns the global number of pages potentially available for dirty
326 * page cache. This is the base value for the global dirty limits.
1edf2234 327 */
18cf8cf8 328static unsigned long global_dirtyable_memory(void)
1edf2234
JW
329{
330 unsigned long x;
331
a804552b 332 x = global_page_state(NR_FREE_PAGES);
c8b74c2f 333 x -= min(x, dirty_balance_reserve);
1edf2234 334
a1c3bfb2
JW
335 x += global_page_state(NR_INACTIVE_FILE);
336 x += global_page_state(NR_ACTIVE_FILE);
a804552b 337
1edf2234
JW
338 if (!vm_highmem_is_dirtyable)
339 x -= highmem_dirtyable_memory(x);
340
341 return x + 1; /* Ensure that we never return 0 */
342}
343
9fc3a43e
TH
344/**
345 * domain_dirty_limits - calculate thresh and bg_thresh for a wb_domain
346 * @dtc: dirty_throttle_control of interest
ccafa287 347 *
9fc3a43e
TH
348 * Calculate @dtc->thresh and ->bg_thresh considering
349 * vm_dirty_{bytes|ratio} and dirty_background_{bytes|ratio}. The caller
350 * must ensure that @dtc->avail is set before calling this function. The
351 * dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
ccafa287
JW
352 * real-time tasks.
353 */
9fc3a43e 354static void domain_dirty_limits(struct dirty_throttle_control *dtc)
ccafa287 355{
9fc3a43e
TH
356 const unsigned long available_memory = dtc->avail;
357 struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
358 unsigned long bytes = vm_dirty_bytes;
359 unsigned long bg_bytes = dirty_background_bytes;
360 unsigned long ratio = vm_dirty_ratio;
361 unsigned long bg_ratio = dirty_background_ratio;
362 unsigned long thresh;
363 unsigned long bg_thresh;
ccafa287
JW
364 struct task_struct *tsk;
365
9fc3a43e
TH
366 /* gdtc is !NULL iff @dtc is for memcg domain */
367 if (gdtc) {
368 unsigned long global_avail = gdtc->avail;
369
370 /*
371 * The byte settings can't be applied directly to memcg
372 * domains. Convert them to ratios by scaling against
373 * globally available memory.
374 */
375 if (bytes)
376 ratio = min(DIV_ROUND_UP(bytes, PAGE_SIZE) * 100 /
377 global_avail, 100UL);
378 if (bg_bytes)
379 bg_ratio = min(DIV_ROUND_UP(bg_bytes, PAGE_SIZE) * 100 /
380 global_avail, 100UL);
381 bytes = bg_bytes = 0;
382 }
383
384 if (bytes)
385 thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
ccafa287 386 else
9fc3a43e 387 thresh = (ratio * available_memory) / 100;
ccafa287 388
9fc3a43e
TH
389 if (bg_bytes)
390 bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
ccafa287 391 else
9fc3a43e 392 bg_thresh = (bg_ratio * available_memory) / 100;
ccafa287 393
9fc3a43e
TH
394 if (bg_thresh >= thresh)
395 bg_thresh = thresh / 2;
ccafa287
JW
396 tsk = current;
397 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
9fc3a43e
TH
398 bg_thresh += bg_thresh / 4;
399 thresh += thresh / 4;
ccafa287 400 }
9fc3a43e
TH
401 dtc->thresh = thresh;
402 dtc->bg_thresh = bg_thresh;
403
404 /* we should eventually report the domain in the TP */
405 if (!gdtc)
406 trace_global_dirty_state(bg_thresh, thresh);
407}
408
409/**
410 * global_dirty_limits - background-writeback and dirty-throttling thresholds
411 * @pbackground: out parameter for bg_thresh
412 * @pdirty: out parameter for thresh
413 *
414 * Calculate bg_thresh and thresh for global_wb_domain. See
415 * domain_dirty_limits() for details.
416 */
417void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
418{
419 struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB };
420
421 gdtc.avail = global_dirtyable_memory();
422 domain_dirty_limits(&gdtc);
423
424 *pbackground = gdtc.bg_thresh;
425 *pdirty = gdtc.thresh;
ccafa287
JW
426}
427
a756cf59
JW
428/**
429 * zone_dirty_limit - maximum number of dirty pages allowed in a zone
430 * @zone: the zone
431 *
432 * Returns the maximum number of dirty pages allowed in a zone, based
433 * on the zone's dirtyable memory.
434 */
435static unsigned long zone_dirty_limit(struct zone *zone)
436{
437 unsigned long zone_memory = zone_dirtyable_memory(zone);
438 struct task_struct *tsk = current;
439 unsigned long dirty;
440
441 if (vm_dirty_bytes)
442 dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
443 zone_memory / global_dirtyable_memory();
444 else
445 dirty = vm_dirty_ratio * zone_memory / 100;
446
447 if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk))
448 dirty += dirty / 4;
449
450 return dirty;
451}
452
453/**
454 * zone_dirty_ok - tells whether a zone is within its dirty limits
455 * @zone: the zone to check
456 *
457 * Returns %true when the dirty pages in @zone are within the zone's
458 * dirty limit, %false if the limit is exceeded.
459 */
460bool zone_dirty_ok(struct zone *zone)
461{
462 unsigned long limit = zone_dirty_limit(zone);
463
464 return zone_page_state(zone, NR_FILE_DIRTY) +
465 zone_page_state(zone, NR_UNSTABLE_NFS) +
466 zone_page_state(zone, NR_WRITEBACK) <= limit;
467}
468
2da02997 469int dirty_background_ratio_handler(struct ctl_table *table, int write,
8d65af78 470 void __user *buffer, size_t *lenp,
2da02997
DR
471 loff_t *ppos)
472{
473 int ret;
474
8d65af78 475 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2da02997
DR
476 if (ret == 0 && write)
477 dirty_background_bytes = 0;
478 return ret;
479}
480
481int dirty_background_bytes_handler(struct ctl_table *table, int write,
8d65af78 482 void __user *buffer, size_t *lenp,
2da02997
DR
483 loff_t *ppos)
484{
485 int ret;
486
8d65af78 487 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
2da02997
DR
488 if (ret == 0 && write)
489 dirty_background_ratio = 0;
490 return ret;
491}
492
04fbfdc1 493int dirty_ratio_handler(struct ctl_table *table, int write,
8d65af78 494 void __user *buffer, size_t *lenp,
04fbfdc1
PZ
495 loff_t *ppos)
496{
497 int old_ratio = vm_dirty_ratio;
2da02997
DR
498 int ret;
499
8d65af78 500 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
04fbfdc1 501 if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
eb608e3a 502 writeback_set_ratelimit();
2da02997
DR
503 vm_dirty_bytes = 0;
504 }
505 return ret;
506}
507
2da02997 508int dirty_bytes_handler(struct ctl_table *table, int write,
8d65af78 509 void __user *buffer, size_t *lenp,
2da02997
DR
510 loff_t *ppos)
511{
fc3501d4 512 unsigned long old_bytes = vm_dirty_bytes;
2da02997
DR
513 int ret;
514
8d65af78 515 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
2da02997 516 if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
eb608e3a 517 writeback_set_ratelimit();
2da02997 518 vm_dirty_ratio = 0;
04fbfdc1
PZ
519 }
520 return ret;
521}
522
eb608e3a
JK
523static unsigned long wp_next_time(unsigned long cur_time)
524{
525 cur_time += VM_COMPLETIONS_PERIOD_LEN;
526 /* 0 has a special meaning... */
527 if (!cur_time)
528 return 1;
529 return cur_time;
530}
531
c7981433
TH
532static void wb_domain_writeout_inc(struct wb_domain *dom,
533 struct fprop_local_percpu *completions,
534 unsigned int max_prop_frac)
04fbfdc1 535{
c7981433
TH
536 __fprop_inc_percpu_max(&dom->completions, completions,
537 max_prop_frac);
eb608e3a 538 /* First event after period switching was turned off? */
380c27ca 539 if (!unlikely(dom->period_time)) {
eb608e3a
JK
540 /*
541 * We can race with other __bdi_writeout_inc calls here but
542 * it does not cause any harm since the resulting time when
543 * timer will fire and what is in writeout_period_time will be
544 * roughly the same.
545 */
380c27ca
TH
546 dom->period_time = wp_next_time(jiffies);
547 mod_timer(&dom->period_timer, dom->period_time);
eb608e3a 548 }
04fbfdc1
PZ
549}
550
c7981433
TH
551/*
552 * Increment @wb's writeout completion count and the global writeout
553 * completion count. Called from test_clear_page_writeback().
554 */
555static inline void __wb_writeout_inc(struct bdi_writeback *wb)
dd5656e5 556{
841710aa 557 struct wb_domain *cgdom;
dd5656e5 558
c7981433
TH
559 __inc_wb_stat(wb, WB_WRITTEN);
560 wb_domain_writeout_inc(&global_wb_domain, &wb->completions,
561 wb->bdi->max_prop_frac);
841710aa
TH
562
563 cgdom = mem_cgroup_wb_domain(wb);
564 if (cgdom)
565 wb_domain_writeout_inc(cgdom, wb_memcg_completions(wb),
566 wb->bdi->max_prop_frac);
dd5656e5 567}
dd5656e5 568
93f78d88 569void wb_writeout_inc(struct bdi_writeback *wb)
04fbfdc1 570{
dd5656e5
MS
571 unsigned long flags;
572
573 local_irq_save(flags);
93f78d88 574 __wb_writeout_inc(wb);
dd5656e5 575 local_irq_restore(flags);
04fbfdc1 576}
93f78d88 577EXPORT_SYMBOL_GPL(wb_writeout_inc);
04fbfdc1 578
eb608e3a
JK
579/*
580 * On idle system, we can be called long after we scheduled because we use
581 * deferred timers so count with missed periods.
582 */
583static void writeout_period(unsigned long t)
584{
380c27ca
TH
585 struct wb_domain *dom = (void *)t;
586 int miss_periods = (jiffies - dom->period_time) /
eb608e3a
JK
587 VM_COMPLETIONS_PERIOD_LEN;
588
380c27ca
TH
589 if (fprop_new_period(&dom->completions, miss_periods + 1)) {
590 dom->period_time = wp_next_time(dom->period_time +
eb608e3a 591 miss_periods * VM_COMPLETIONS_PERIOD_LEN);
380c27ca 592 mod_timer(&dom->period_timer, dom->period_time);
eb608e3a
JK
593 } else {
594 /*
595 * Aging has zeroed all fractions. Stop wasting CPU on period
596 * updates.
597 */
380c27ca 598 dom->period_time = 0;
eb608e3a
JK
599 }
600}
601
380c27ca
TH
602int wb_domain_init(struct wb_domain *dom, gfp_t gfp)
603{
604 memset(dom, 0, sizeof(*dom));
dcc25ae7
TH
605
606 spin_lock_init(&dom->lock);
607
380c27ca
TH
608 init_timer_deferrable(&dom->period_timer);
609 dom->period_timer.function = writeout_period;
610 dom->period_timer.data = (unsigned long)dom;
dcc25ae7
TH
611
612 dom->dirty_limit_tstamp = jiffies;
613
380c27ca
TH
614 return fprop_global_init(&dom->completions, gfp);
615}
616
841710aa
TH
617#ifdef CONFIG_CGROUP_WRITEBACK
618void wb_domain_exit(struct wb_domain *dom)
619{
620 del_timer_sync(&dom->period_timer);
621 fprop_global_destroy(&dom->completions);
622}
623#endif
624
189d3c4a 625/*
d08c429b
JW
626 * bdi_min_ratio keeps the sum of the minimum dirty shares of all
627 * registered backing devices, which, for obvious reasons, can not
628 * exceed 100%.
189d3c4a 629 */
189d3c4a
PZ
630static unsigned int bdi_min_ratio;
631
632int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
633{
634 int ret = 0;
189d3c4a 635
cfc4ba53 636 spin_lock_bh(&bdi_lock);
a42dde04 637 if (min_ratio > bdi->max_ratio) {
189d3c4a 638 ret = -EINVAL;
a42dde04
PZ
639 } else {
640 min_ratio -= bdi->min_ratio;
641 if (bdi_min_ratio + min_ratio < 100) {
642 bdi_min_ratio += min_ratio;
643 bdi->min_ratio += min_ratio;
644 } else {
645 ret = -EINVAL;
646 }
647 }
cfc4ba53 648 spin_unlock_bh(&bdi_lock);
a42dde04
PZ
649
650 return ret;
651}
652
653int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
654{
a42dde04
PZ
655 int ret = 0;
656
657 if (max_ratio > 100)
658 return -EINVAL;
659
cfc4ba53 660 spin_lock_bh(&bdi_lock);
a42dde04
PZ
661 if (bdi->min_ratio > max_ratio) {
662 ret = -EINVAL;
663 } else {
664 bdi->max_ratio = max_ratio;
eb608e3a 665 bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100;
a42dde04 666 }
cfc4ba53 667 spin_unlock_bh(&bdi_lock);
189d3c4a
PZ
668
669 return ret;
670}
a42dde04 671EXPORT_SYMBOL(bdi_set_max_ratio);
189d3c4a 672
6c14ae1e
WF
673static unsigned long dirty_freerun_ceiling(unsigned long thresh,
674 unsigned long bg_thresh)
675{
676 return (thresh + bg_thresh) / 2;
677}
678
c7981433
TH
679static unsigned long hard_dirty_limit(struct wb_domain *dom,
680 unsigned long thresh)
ffd1f609 681{
dcc25ae7 682 return max(thresh, dom->dirty_limit);
ffd1f609
WF
683}
684
c2aa723a
TH
685/* memory available to a memcg domain is capped by system-wide clean memory */
686static void mdtc_cap_avail(struct dirty_throttle_control *mdtc)
687{
688 struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc);
689 unsigned long clean = gdtc->avail - min(gdtc->avail, gdtc->dirty);
690
691 mdtc->avail = min(mdtc->avail, clean);
ffd1f609
WF
692}
693
6f718656 694/**
b1cbc6d4
TH
695 * __wb_calc_thresh - @wb's share of dirty throttling threshold
696 * @dtc: dirty_throttle_context of interest
1babe183 697 *
a88a341a 698 * Returns @wb's dirty limit in pages. The term "dirty" in the context of
6f718656 699 * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
aed21ad2
WF
700 *
701 * Note that balance_dirty_pages() will only seriously take it as a hard limit
702 * when sleeping max_pause per page is not enough to keep the dirty pages under
703 * control. For example, when the device is completely stalled due to some error
704 * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
705 * In the other normal situations, it acts more gently by throttling the tasks
a88a341a 706 * more (rather than completely block them) when the wb dirty pages go high.
1babe183 707 *
6f718656 708 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
1babe183
WF
709 * - starving fast devices
710 * - piling up dirty pages (that will take long time to sync) on slow devices
711 *
a88a341a 712 * The wb's share of dirty limit will be adapting to its throughput and
1babe183
WF
713 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
714 */
b1cbc6d4 715static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc)
16c4042f 716{
e9f07dfd 717 struct wb_domain *dom = dtc_dom(dtc);
b1cbc6d4 718 unsigned long thresh = dtc->thresh;
0d960a38 719 u64 wb_thresh;
16c4042f 720 long numerator, denominator;
693108a8 721 unsigned long wb_min_ratio, wb_max_ratio;
04fbfdc1 722
16c4042f 723 /*
0d960a38 724 * Calculate this BDI's share of the thresh ratio.
16c4042f 725 */
e9770b34 726 fprop_fraction_percpu(&dom->completions, dtc->wb_completions,
380c27ca 727 &numerator, &denominator);
04fbfdc1 728
0d960a38
TH
729 wb_thresh = (thresh * (100 - bdi_min_ratio)) / 100;
730 wb_thresh *= numerator;
731 do_div(wb_thresh, denominator);
04fbfdc1 732
b1cbc6d4 733 wb_min_max_ratio(dtc->wb, &wb_min_ratio, &wb_max_ratio);
04fbfdc1 734
0d960a38
TH
735 wb_thresh += (thresh * wb_min_ratio) / 100;
736 if (wb_thresh > (thresh * wb_max_ratio) / 100)
737 wb_thresh = thresh * wb_max_ratio / 100;
16c4042f 738
0d960a38 739 return wb_thresh;
1da177e4
LT
740}
741
b1cbc6d4
TH
742unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh)
743{
744 struct dirty_throttle_control gdtc = { GDTC_INIT(wb),
745 .thresh = thresh };
746 return __wb_calc_thresh(&gdtc);
1da177e4
LT
747}
748
5a537485
MP
749/*
750 * setpoint - dirty 3
751 * f(dirty) := 1.0 + (----------------)
752 * limit - setpoint
753 *
754 * it's a 3rd order polynomial that subjects to
755 *
756 * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast
757 * (2) f(setpoint) = 1.0 => the balance point
758 * (3) f(limit) = 0 => the hard limit
759 * (4) df/dx <= 0 => negative feedback control
760 * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
761 * => fast response on large errors; small oscillation near setpoint
762 */
d5c9fde3 763static long long pos_ratio_polynom(unsigned long setpoint,
5a537485
MP
764 unsigned long dirty,
765 unsigned long limit)
766{
767 long long pos_ratio;
768 long x;
769
d5c9fde3 770 x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
464d1387 771 (limit - setpoint) | 1);
5a537485
MP
772 pos_ratio = x;
773 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
774 pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
775 pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
776
777 return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT);
778}
779
6c14ae1e
WF
780/*
781 * Dirty position control.
782 *
783 * (o) global/bdi setpoints
784 *
de1fff37 785 * We want the dirty pages be balanced around the global/wb setpoints.
6c14ae1e
WF
786 * When the number of dirty pages is higher/lower than the setpoint, the
787 * dirty position control ratio (and hence task dirty ratelimit) will be
788 * decreased/increased to bring the dirty pages back to the setpoint.
789 *
790 * pos_ratio = 1 << RATELIMIT_CALC_SHIFT
791 *
792 * if (dirty < setpoint) scale up pos_ratio
793 * if (dirty > setpoint) scale down pos_ratio
794 *
de1fff37
TH
795 * if (wb_dirty < wb_setpoint) scale up pos_ratio
796 * if (wb_dirty > wb_setpoint) scale down pos_ratio
6c14ae1e
WF
797 *
798 * task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
799 *
800 * (o) global control line
801 *
802 * ^ pos_ratio
803 * |
804 * | |<===== global dirty control scope ======>|
805 * 2.0 .............*
806 * | .*
807 * | . *
808 * | . *
809 * | . *
810 * | . *
811 * | . *
812 * 1.0 ................................*
813 * | . . *
814 * | . . *
815 * | . . *
816 * | . . *
817 * | . . *
818 * 0 +------------.------------------.----------------------*------------->
819 * freerun^ setpoint^ limit^ dirty pages
820 *
de1fff37 821 * (o) wb control line
6c14ae1e
WF
822 *
823 * ^ pos_ratio
824 * |
825 * | *
826 * | *
827 * | *
828 * | *
829 * | * |<=========== span ============>|
830 * 1.0 .......................*
831 * | . *
832 * | . *
833 * | . *
834 * | . *
835 * | . *
836 * | . *
837 * | . *
838 * | . *
839 * | . *
840 * | . *
841 * | . *
842 * 1/4 ...............................................* * * * * * * * * * * *
843 * | . .
844 * | . .
845 * | . .
846 * 0 +----------------------.-------------------------------.------------->
de1fff37 847 * wb_setpoint^ x_intercept^
6c14ae1e 848 *
de1fff37 849 * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can
6c14ae1e
WF
850 * be smoothly throttled down to normal if it starts high in situations like
851 * - start writing to a slow SD card and a fast disk at the same time. The SD
de1fff37
TH
852 * card's wb_dirty may rush to many times higher than wb_setpoint.
853 * - the wb dirty thresh drops quickly due to change of JBOD workload
6c14ae1e 854 */
daddfa3c 855static void wb_position_ratio(struct dirty_throttle_control *dtc)
6c14ae1e 856{
2bc00aef 857 struct bdi_writeback *wb = dtc->wb;
a88a341a 858 unsigned long write_bw = wb->avg_write_bandwidth;
2bc00aef 859 unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
c7981433 860 unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
2bc00aef 861 unsigned long wb_thresh = dtc->wb_thresh;
6c14ae1e
WF
862 unsigned long x_intercept;
863 unsigned long setpoint; /* dirty pages' target balance point */
de1fff37 864 unsigned long wb_setpoint;
6c14ae1e
WF
865 unsigned long span;
866 long long pos_ratio; /* for scaling up/down the rate limit */
867 long x;
868
daddfa3c
TH
869 dtc->pos_ratio = 0;
870
2bc00aef 871 if (unlikely(dtc->dirty >= limit))
daddfa3c 872 return;
6c14ae1e
WF
873
874 /*
875 * global setpoint
876 *
5a537485
MP
877 * See comment for pos_ratio_polynom().
878 */
879 setpoint = (freerun + limit) / 2;
2bc00aef 880 pos_ratio = pos_ratio_polynom(setpoint, dtc->dirty, limit);
5a537485
MP
881
882 /*
883 * The strictlimit feature is a tool preventing mistrusted filesystems
884 * from growing a large number of dirty pages before throttling. For
de1fff37
TH
885 * such filesystems balance_dirty_pages always checks wb counters
886 * against wb limits. Even if global "nr_dirty" is under "freerun".
5a537485
MP
887 * This is especially important for fuse which sets bdi->max_ratio to
888 * 1% by default. Without strictlimit feature, fuse writeback may
889 * consume arbitrary amount of RAM because it is accounted in
890 * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty".
6c14ae1e 891 *
a88a341a 892 * Here, in wb_position_ratio(), we calculate pos_ratio based on
de1fff37 893 * two values: wb_dirty and wb_thresh. Let's consider an example:
5a537485
MP
894 * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global
895 * limits are set by default to 10% and 20% (background and throttle).
de1fff37 896 * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages.
0d960a38 897 * wb_calc_thresh(wb, bg_thresh) is about ~4K pages. wb_setpoint is
de1fff37 898 * about ~6K pages (as the average of background and throttle wb
5a537485 899 * limits). The 3rd order polynomial will provide positive feedback if
de1fff37 900 * wb_dirty is under wb_setpoint and vice versa.
6c14ae1e 901 *
5a537485 902 * Note, that we cannot use global counters in these calculations
de1fff37 903 * because we want to throttle process writing to a strictlimit wb
5a537485
MP
904 * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB
905 * in the example above).
6c14ae1e 906 */
a88a341a 907 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
de1fff37 908 long long wb_pos_ratio;
5a537485 909
daddfa3c
TH
910 if (dtc->wb_dirty < 8) {
911 dtc->pos_ratio = min_t(long long, pos_ratio * 2,
912 2 << RATELIMIT_CALC_SHIFT);
913 return;
914 }
5a537485 915
2bc00aef 916 if (dtc->wb_dirty >= wb_thresh)
daddfa3c 917 return;
5a537485 918
970fb01a
TH
919 wb_setpoint = dirty_freerun_ceiling(wb_thresh,
920 dtc->wb_bg_thresh);
5a537485 921
de1fff37 922 if (wb_setpoint == 0 || wb_setpoint == wb_thresh)
daddfa3c 923 return;
5a537485 924
2bc00aef 925 wb_pos_ratio = pos_ratio_polynom(wb_setpoint, dtc->wb_dirty,
de1fff37 926 wb_thresh);
5a537485
MP
927
928 /*
de1fff37
TH
929 * Typically, for strictlimit case, wb_setpoint << setpoint
930 * and pos_ratio >> wb_pos_ratio. In the other words global
5a537485 931 * state ("dirty") is not limiting factor and we have to
de1fff37 932 * make decision based on wb counters. But there is an
5a537485
MP
933 * important case when global pos_ratio should get precedence:
934 * global limits are exceeded (e.g. due to activities on other
de1fff37 935 * wb's) while given strictlimit wb is below limit.
5a537485 936 *
de1fff37 937 * "pos_ratio * wb_pos_ratio" would work for the case above,
5a537485 938 * but it would look too non-natural for the case of all
de1fff37 939 * activity in the system coming from a single strictlimit wb
5a537485
MP
940 * with bdi->max_ratio == 100%.
941 *
942 * Note that min() below somewhat changes the dynamics of the
943 * control system. Normally, pos_ratio value can be well over 3
de1fff37 944 * (when globally we are at freerun and wb is well below wb
5a537485
MP
945 * setpoint). Now the maximum pos_ratio in the same situation
946 * is 2. We might want to tweak this if we observe the control
947 * system is too slow to adapt.
948 */
daddfa3c
TH
949 dtc->pos_ratio = min(pos_ratio, wb_pos_ratio);
950 return;
5a537485 951 }
6c14ae1e
WF
952
953 /*
954 * We have computed basic pos_ratio above based on global situation. If
de1fff37 955 * the wb is over/under its share of dirty pages, we want to scale
6c14ae1e
WF
956 * pos_ratio further down/up. That is done by the following mechanism.
957 */
958
959 /*
de1fff37 960 * wb setpoint
6c14ae1e 961 *
de1fff37 962 * f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint)
6c14ae1e 963 *
de1fff37 964 * x_intercept - wb_dirty
6c14ae1e 965 * := --------------------------
de1fff37 966 * x_intercept - wb_setpoint
6c14ae1e 967 *
de1fff37 968 * The main wb control line is a linear function that subjects to
6c14ae1e 969 *
de1fff37
TH
970 * (1) f(wb_setpoint) = 1.0
971 * (2) k = - 1 / (8 * write_bw) (in single wb case)
972 * or equally: x_intercept = wb_setpoint + 8 * write_bw
6c14ae1e 973 *
de1fff37 974 * For single wb case, the dirty pages are observed to fluctuate
6c14ae1e 975 * regularly within range
de1fff37 976 * [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2]
6c14ae1e
WF
977 * for various filesystems, where (2) can yield in a reasonable 12.5%
978 * fluctuation range for pos_ratio.
979 *
de1fff37 980 * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its
6c14ae1e 981 * own size, so move the slope over accordingly and choose a slope that
de1fff37 982 * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh.
6c14ae1e 983 */
2bc00aef
TH
984 if (unlikely(wb_thresh > dtc->thresh))
985 wb_thresh = dtc->thresh;
aed21ad2 986 /*
de1fff37 987 * It's very possible that wb_thresh is close to 0 not because the
aed21ad2
WF
988 * device is slow, but that it has remained inactive for long time.
989 * Honour such devices a reasonable good (hopefully IO efficient)
990 * threshold, so that the occasional writes won't be blocked and active
991 * writes can rampup the threshold quickly.
992 */
2bc00aef 993 wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8);
6c14ae1e 994 /*
de1fff37
TH
995 * scale global setpoint to wb's:
996 * wb_setpoint = setpoint * wb_thresh / thresh
6c14ae1e 997 */
e4bc13ad 998 x = div_u64((u64)wb_thresh << 16, dtc->thresh | 1);
de1fff37 999 wb_setpoint = setpoint * (u64)x >> 16;
6c14ae1e 1000 /*
de1fff37
TH
1001 * Use span=(8*write_bw) in single wb case as indicated by
1002 * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case.
6c14ae1e 1003 *
de1fff37
TH
1004 * wb_thresh thresh - wb_thresh
1005 * span = --------- * (8 * write_bw) + ------------------ * wb_thresh
1006 * thresh thresh
6c14ae1e 1007 */
2bc00aef 1008 span = (dtc->thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16;
de1fff37 1009 x_intercept = wb_setpoint + span;
6c14ae1e 1010
2bc00aef
TH
1011 if (dtc->wb_dirty < x_intercept - span / 4) {
1012 pos_ratio = div64_u64(pos_ratio * (x_intercept - dtc->wb_dirty),
e4bc13ad 1013 (x_intercept - wb_setpoint) | 1);
6c14ae1e
WF
1014 } else
1015 pos_ratio /= 4;
1016
8927f66c 1017 /*
de1fff37 1018 * wb reserve area, safeguard against dirty pool underrun and disk idle
8927f66c
WF
1019 * It may push the desired control point of global dirty pages higher
1020 * than setpoint.
1021 */
de1fff37 1022 x_intercept = wb_thresh / 2;
2bc00aef
TH
1023 if (dtc->wb_dirty < x_intercept) {
1024 if (dtc->wb_dirty > x_intercept / 8)
1025 pos_ratio = div_u64(pos_ratio * x_intercept,
1026 dtc->wb_dirty);
50657fc4 1027 else
8927f66c
WF
1028 pos_ratio *= 8;
1029 }
1030
daddfa3c 1031 dtc->pos_ratio = pos_ratio;
6c14ae1e
WF
1032}
1033
a88a341a
TH
1034static void wb_update_write_bandwidth(struct bdi_writeback *wb,
1035 unsigned long elapsed,
1036 unsigned long written)
e98be2d5
WF
1037{
1038 const unsigned long period = roundup_pow_of_two(3 * HZ);
a88a341a
TH
1039 unsigned long avg = wb->avg_write_bandwidth;
1040 unsigned long old = wb->write_bandwidth;
e98be2d5
WF
1041 u64 bw;
1042
1043 /*
1044 * bw = written * HZ / elapsed
1045 *
1046 * bw * elapsed + write_bandwidth * (period - elapsed)
1047 * write_bandwidth = ---------------------------------------------------
1048 * period
c72efb65
TH
1049 *
1050 * @written may have decreased due to account_page_redirty().
1051 * Avoid underflowing @bw calculation.
e98be2d5 1052 */
a88a341a 1053 bw = written - min(written, wb->written_stamp);
e98be2d5
WF
1054 bw *= HZ;
1055 if (unlikely(elapsed > period)) {
1056 do_div(bw, elapsed);
1057 avg = bw;
1058 goto out;
1059 }
a88a341a 1060 bw += (u64)wb->write_bandwidth * (period - elapsed);
e98be2d5
WF
1061 bw >>= ilog2(period);
1062
1063 /*
1064 * one more level of smoothing, for filtering out sudden spikes
1065 */
1066 if (avg > old && old >= (unsigned long)bw)
1067 avg -= (avg - old) >> 3;
1068
1069 if (avg < old && old <= (unsigned long)bw)
1070 avg += (old - avg) >> 3;
1071
1072out:
95a46c65
TH
1073 /* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */
1074 avg = max(avg, 1LU);
1075 if (wb_has_dirty_io(wb)) {
1076 long delta = avg - wb->avg_write_bandwidth;
1077 WARN_ON_ONCE(atomic_long_add_return(delta,
1078 &wb->bdi->tot_write_bandwidth) <= 0);
1079 }
a88a341a
TH
1080 wb->write_bandwidth = bw;
1081 wb->avg_write_bandwidth = avg;
e98be2d5
WF
1082}
1083
2bc00aef 1084static void update_dirty_limit(struct dirty_throttle_control *dtc)
c42843f2 1085{
e9f07dfd 1086 struct wb_domain *dom = dtc_dom(dtc);
2bc00aef 1087 unsigned long thresh = dtc->thresh;
dcc25ae7 1088 unsigned long limit = dom->dirty_limit;
c42843f2
WF
1089
1090 /*
1091 * Follow up in one step.
1092 */
1093 if (limit < thresh) {
1094 limit = thresh;
1095 goto update;
1096 }
1097
1098 /*
1099 * Follow down slowly. Use the higher one as the target, because thresh
1100 * may drop below dirty. This is exactly the reason to introduce
dcc25ae7 1101 * dom->dirty_limit which is guaranteed to lie above the dirty pages.
c42843f2 1102 */
2bc00aef 1103 thresh = max(thresh, dtc->dirty);
c42843f2
WF
1104 if (limit > thresh) {
1105 limit -= (limit - thresh) >> 5;
1106 goto update;
1107 }
1108 return;
1109update:
dcc25ae7 1110 dom->dirty_limit = limit;
c42843f2
WF
1111}
1112
e9f07dfd 1113static void domain_update_bandwidth(struct dirty_throttle_control *dtc,
c42843f2
WF
1114 unsigned long now)
1115{
e9f07dfd 1116 struct wb_domain *dom = dtc_dom(dtc);
c42843f2
WF
1117
1118 /*
1119 * check locklessly first to optimize away locking for the most time
1120 */
dcc25ae7 1121 if (time_before(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL))
c42843f2
WF
1122 return;
1123
dcc25ae7
TH
1124 spin_lock(&dom->lock);
1125 if (time_after_eq(now, dom->dirty_limit_tstamp + BANDWIDTH_INTERVAL)) {
2bc00aef 1126 update_dirty_limit(dtc);
dcc25ae7 1127 dom->dirty_limit_tstamp = now;
c42843f2 1128 }
dcc25ae7 1129 spin_unlock(&dom->lock);
c42843f2
WF
1130}
1131
be3ffa27 1132/*
de1fff37 1133 * Maintain wb->dirty_ratelimit, the base dirty throttle rate.
be3ffa27 1134 *
de1fff37 1135 * Normal wb tasks will be curbed at or below it in long term.
be3ffa27
WF
1136 * Obviously it should be around (write_bw / N) when there are N dd tasks.
1137 */
2bc00aef 1138static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc,
a88a341a
TH
1139 unsigned long dirtied,
1140 unsigned long elapsed)
be3ffa27 1141{
2bc00aef
TH
1142 struct bdi_writeback *wb = dtc->wb;
1143 unsigned long dirty = dtc->dirty;
1144 unsigned long freerun = dirty_freerun_ceiling(dtc->thresh, dtc->bg_thresh);
c7981433 1145 unsigned long limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh);
7381131c 1146 unsigned long setpoint = (freerun + limit) / 2;
a88a341a
TH
1147 unsigned long write_bw = wb->avg_write_bandwidth;
1148 unsigned long dirty_ratelimit = wb->dirty_ratelimit;
be3ffa27
WF
1149 unsigned long dirty_rate;
1150 unsigned long task_ratelimit;
1151 unsigned long balanced_dirty_ratelimit;
7381131c
WF
1152 unsigned long step;
1153 unsigned long x;
be3ffa27
WF
1154
1155 /*
1156 * The dirty rate will match the writeout rate in long term, except
1157 * when dirty pages are truncated by userspace or re-dirtied by FS.
1158 */
a88a341a 1159 dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed;
be3ffa27 1160
be3ffa27
WF
1161 /*
1162 * task_ratelimit reflects each dd's dirty rate for the past 200ms.
1163 */
1164 task_ratelimit = (u64)dirty_ratelimit *
daddfa3c 1165 dtc->pos_ratio >> RATELIMIT_CALC_SHIFT;
be3ffa27
WF
1166 task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
1167
1168 /*
1169 * A linear estimation of the "balanced" throttle rate. The theory is,
de1fff37 1170 * if there are N dd tasks, each throttled at task_ratelimit, the wb's
be3ffa27
WF
1171 * dirty_rate will be measured to be (N * task_ratelimit). So the below
1172 * formula will yield the balanced rate limit (write_bw / N).
1173 *
1174 * Note that the expanded form is not a pure rate feedback:
1175 * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) (1)
1176 * but also takes pos_ratio into account:
1177 * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio (2)
1178 *
1179 * (1) is not realistic because pos_ratio also takes part in balancing
1180 * the dirty rate. Consider the state
1181 * pos_ratio = 0.5 (3)
1182 * rate = 2 * (write_bw / N) (4)
1183 * If (1) is used, it will stuck in that state! Because each dd will
1184 * be throttled at
1185 * task_ratelimit = pos_ratio * rate = (write_bw / N) (5)
1186 * yielding
1187 * dirty_rate = N * task_ratelimit = write_bw (6)
1188 * put (6) into (1) we get
1189 * rate_(i+1) = rate_(i) (7)
1190 *
1191 * So we end up using (2) to always keep
1192 * rate_(i+1) ~= (write_bw / N) (8)
1193 * regardless of the value of pos_ratio. As long as (8) is satisfied,
1194 * pos_ratio is able to drive itself to 1.0, which is not only where
1195 * the dirty count meet the setpoint, but also where the slope of
1196 * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
1197 */
1198 balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
1199 dirty_rate | 1);
bdaac490
WF
1200 /*
1201 * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
1202 */
1203 if (unlikely(balanced_dirty_ratelimit > write_bw))
1204 balanced_dirty_ratelimit = write_bw;
be3ffa27 1205
7381131c
WF
1206 /*
1207 * We could safely do this and return immediately:
1208 *
de1fff37 1209 * wb->dirty_ratelimit = balanced_dirty_ratelimit;
7381131c
WF
1210 *
1211 * However to get a more stable dirty_ratelimit, the below elaborated
331cbdee 1212 * code makes use of task_ratelimit to filter out singular points and
7381131c
WF
1213 * limit the step size.
1214 *
1215 * The below code essentially only uses the relative value of
1216 *
1217 * task_ratelimit - dirty_ratelimit
1218 * = (pos_ratio - 1) * dirty_ratelimit
1219 *
1220 * which reflects the direction and size of dirty position error.
1221 */
1222
1223 /*
1224 * dirty_ratelimit will follow balanced_dirty_ratelimit iff
1225 * task_ratelimit is on the same side of dirty_ratelimit, too.
1226 * For example, when
1227 * - dirty_ratelimit > balanced_dirty_ratelimit
1228 * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
1229 * lowering dirty_ratelimit will help meet both the position and rate
1230 * control targets. Otherwise, don't update dirty_ratelimit if it will
1231 * only help meet the rate target. After all, what the users ultimately
1232 * feel and care are stable dirty rate and small position error.
1233 *
1234 * |task_ratelimit - dirty_ratelimit| is used to limit the step size
331cbdee 1235 * and filter out the singular points of balanced_dirty_ratelimit. Which
7381131c
WF
1236 * keeps jumping around randomly and can even leap far away at times
1237 * due to the small 200ms estimation period of dirty_rate (we want to
1238 * keep that period small to reduce time lags).
1239 */
1240 step = 0;
5a537485
MP
1241
1242 /*
de1fff37 1243 * For strictlimit case, calculations above were based on wb counters
a88a341a 1244 * and limits (starting from pos_ratio = wb_position_ratio() and up to
5a537485 1245 * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate).
de1fff37
TH
1246 * Hence, to calculate "step" properly, we have to use wb_dirty as
1247 * "dirty" and wb_setpoint as "setpoint".
5a537485 1248 *
de1fff37
TH
1249 * We rampup dirty_ratelimit forcibly if wb_dirty is low because
1250 * it's possible that wb_thresh is close to zero due to inactivity
970fb01a 1251 * of backing device.
5a537485 1252 */
a88a341a 1253 if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) {
2bc00aef
TH
1254 dirty = dtc->wb_dirty;
1255 if (dtc->wb_dirty < 8)
1256 setpoint = dtc->wb_dirty + 1;
5a537485 1257 else
970fb01a 1258 setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2;
5a537485
MP
1259 }
1260
7381131c 1261 if (dirty < setpoint) {
a88a341a 1262 x = min3(wb->balanced_dirty_ratelimit,
7c809968 1263 balanced_dirty_ratelimit, task_ratelimit);
7381131c
WF
1264 if (dirty_ratelimit < x)
1265 step = x - dirty_ratelimit;
1266 } else {
a88a341a 1267 x = max3(wb->balanced_dirty_ratelimit,
7c809968 1268 balanced_dirty_ratelimit, task_ratelimit);
7381131c
WF
1269 if (dirty_ratelimit > x)
1270 step = dirty_ratelimit - x;
1271 }
1272
1273 /*
1274 * Don't pursue 100% rate matching. It's impossible since the balanced
1275 * rate itself is constantly fluctuating. So decrease the track speed
1276 * when it gets close to the target. Helps eliminate pointless tremors.
1277 */
1278 step >>= dirty_ratelimit / (2 * step + 1);
1279 /*
1280 * Limit the tracking speed to avoid overshooting.
1281 */
1282 step = (step + 7) / 8;
1283
1284 if (dirty_ratelimit < balanced_dirty_ratelimit)
1285 dirty_ratelimit += step;
1286 else
1287 dirty_ratelimit -= step;
1288
a88a341a
TH
1289 wb->dirty_ratelimit = max(dirty_ratelimit, 1UL);
1290 wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
b48c104d 1291
a88a341a 1292 trace_bdi_dirty_ratelimit(wb->bdi, dirty_rate, task_ratelimit);
be3ffa27
WF
1293}
1294
c2aa723a
TH
1295static void __wb_update_bandwidth(struct dirty_throttle_control *gdtc,
1296 struct dirty_throttle_control *mdtc,
8a731799
TH
1297 unsigned long start_time,
1298 bool update_ratelimit)
e98be2d5 1299{
c2aa723a 1300 struct bdi_writeback *wb = gdtc->wb;
e98be2d5 1301 unsigned long now = jiffies;
a88a341a 1302 unsigned long elapsed = now - wb->bw_time_stamp;
be3ffa27 1303 unsigned long dirtied;
e98be2d5
WF
1304 unsigned long written;
1305
8a731799
TH
1306 lockdep_assert_held(&wb->list_lock);
1307
e98be2d5
WF
1308 /*
1309 * rate-limit, only update once every 200ms.
1310 */
1311 if (elapsed < BANDWIDTH_INTERVAL)
1312 return;
1313
a88a341a
TH
1314 dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]);
1315 written = percpu_counter_read(&wb->stat[WB_WRITTEN]);
e98be2d5
WF
1316
1317 /*
1318 * Skip quiet periods when disk bandwidth is under-utilized.
1319 * (at least 1s idle time between two flusher runs)
1320 */
a88a341a 1321 if (elapsed > HZ && time_before(wb->bw_time_stamp, start_time))
e98be2d5
WF
1322 goto snapshot;
1323
8a731799 1324 if (update_ratelimit) {
c2aa723a
TH
1325 domain_update_bandwidth(gdtc, now);
1326 wb_update_dirty_ratelimit(gdtc, dirtied, elapsed);
1327
1328 /*
1329 * @mdtc is always NULL if !CGROUP_WRITEBACK but the
1330 * compiler has no way to figure that out. Help it.
1331 */
1332 if (IS_ENABLED(CONFIG_CGROUP_WRITEBACK) && mdtc) {
1333 domain_update_bandwidth(mdtc, now);
1334 wb_update_dirty_ratelimit(mdtc, dirtied, elapsed);
1335 }
be3ffa27 1336 }
a88a341a 1337 wb_update_write_bandwidth(wb, elapsed, written);
e98be2d5
WF
1338
1339snapshot:
a88a341a
TH
1340 wb->dirtied_stamp = dirtied;
1341 wb->written_stamp = written;
1342 wb->bw_time_stamp = now;
e98be2d5
WF
1343}
1344
8a731799 1345void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time)
e98be2d5 1346{
2bc00aef
TH
1347 struct dirty_throttle_control gdtc = { GDTC_INIT(wb) };
1348
c2aa723a 1349 __wb_update_bandwidth(&gdtc, NULL, start_time, false);
e98be2d5
WF
1350}
1351
9d823e8f 1352/*
d0e1d66b 1353 * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
9d823e8f
WF
1354 * will look to see if it needs to start dirty throttling.
1355 *
1356 * If dirty_poll_interval is too low, big NUMA machines will call the expensive
1357 * global_page_state() too often. So scale it near-sqrt to the safety margin
1358 * (the number of pages we may dirty without exceeding the dirty limits).
1359 */
1360static unsigned long dirty_poll_interval(unsigned long dirty,
1361 unsigned long thresh)
1362{
1363 if (thresh > dirty)
1364 return 1UL << (ilog2(thresh - dirty) >> 1);
1365
1366 return 1;
1367}
1368
a88a341a 1369static unsigned long wb_max_pause(struct bdi_writeback *wb,
de1fff37 1370 unsigned long wb_dirty)
c8462cc9 1371{
a88a341a 1372 unsigned long bw = wb->avg_write_bandwidth;
e3b6c655 1373 unsigned long t;
c8462cc9 1374
7ccb9ad5
WF
1375 /*
1376 * Limit pause time for small memory systems. If sleeping for too long
1377 * time, a small pool of dirty/writeback pages may go empty and disk go
1378 * idle.
1379 *
1380 * 8 serves as the safety ratio.
1381 */
de1fff37 1382 t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
7ccb9ad5
WF
1383 t++;
1384
e3b6c655 1385 return min_t(unsigned long, t, MAX_PAUSE);
7ccb9ad5
WF
1386}
1387
a88a341a
TH
1388static long wb_min_pause(struct bdi_writeback *wb,
1389 long max_pause,
1390 unsigned long task_ratelimit,
1391 unsigned long dirty_ratelimit,
1392 int *nr_dirtied_pause)
c8462cc9 1393{
a88a341a
TH
1394 long hi = ilog2(wb->avg_write_bandwidth);
1395 long lo = ilog2(wb->dirty_ratelimit);
7ccb9ad5
WF
1396 long t; /* target pause */
1397 long pause; /* estimated next pause */
1398 int pages; /* target nr_dirtied_pause */
c8462cc9 1399
7ccb9ad5
WF
1400 /* target for 10ms pause on 1-dd case */
1401 t = max(1, HZ / 100);
c8462cc9
WF
1402
1403 /*
1404 * Scale up pause time for concurrent dirtiers in order to reduce CPU
1405 * overheads.
1406 *
7ccb9ad5 1407 * (N * 10ms) on 2^N concurrent tasks.
c8462cc9
WF
1408 */
1409 if (hi > lo)
7ccb9ad5 1410 t += (hi - lo) * (10 * HZ) / 1024;
c8462cc9
WF
1411
1412 /*
7ccb9ad5
WF
1413 * This is a bit convoluted. We try to base the next nr_dirtied_pause
1414 * on the much more stable dirty_ratelimit. However the next pause time
1415 * will be computed based on task_ratelimit and the two rate limits may
1416 * depart considerably at some time. Especially if task_ratelimit goes
1417 * below dirty_ratelimit/2 and the target pause is max_pause, the next
1418 * pause time will be max_pause*2 _trimmed down_ to max_pause. As a
1419 * result task_ratelimit won't be executed faithfully, which could
1420 * eventually bring down dirty_ratelimit.
c8462cc9 1421 *
7ccb9ad5
WF
1422 * We apply two rules to fix it up:
1423 * 1) try to estimate the next pause time and if necessary, use a lower
1424 * nr_dirtied_pause so as not to exceed max_pause. When this happens,
1425 * nr_dirtied_pause will be "dancing" with task_ratelimit.
1426 * 2) limit the target pause time to max_pause/2, so that the normal
1427 * small fluctuations of task_ratelimit won't trigger rule (1) and
1428 * nr_dirtied_pause will remain as stable as dirty_ratelimit.
c8462cc9 1429 */
7ccb9ad5
WF
1430 t = min(t, 1 + max_pause / 2);
1431 pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
c8462cc9
WF
1432
1433 /*
5b9b3574
WF
1434 * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
1435 * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
1436 * When the 16 consecutive reads are often interrupted by some dirty
1437 * throttling pause during the async writes, cfq will go into idles
1438 * (deadline is fine). So push nr_dirtied_pause as high as possible
1439 * until reaches DIRTY_POLL_THRESH=32 pages.
c8462cc9 1440 */
5b9b3574
WF
1441 if (pages < DIRTY_POLL_THRESH) {
1442 t = max_pause;
1443 pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1444 if (pages > DIRTY_POLL_THRESH) {
1445 pages = DIRTY_POLL_THRESH;
1446 t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
1447 }
1448 }
1449
7ccb9ad5
WF
1450 pause = HZ * pages / (task_ratelimit + 1);
1451 if (pause > max_pause) {
1452 t = max_pause;
1453 pages = task_ratelimit * t / roundup_pow_of_two(HZ);
1454 }
c8462cc9 1455
7ccb9ad5 1456 *nr_dirtied_pause = pages;
c8462cc9 1457 /*
7ccb9ad5 1458 * The minimal pause time will normally be half the target pause time.
c8462cc9 1459 */
5b9b3574 1460 return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
c8462cc9
WF
1461}
1462
970fb01a 1463static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
5a537485 1464{
2bc00aef 1465 struct bdi_writeback *wb = dtc->wb;
93f78d88 1466 unsigned long wb_reclaimable;
5a537485
MP
1467
1468 /*
de1fff37 1469 * wb_thresh is not treated as some limiting factor as
5a537485 1470 * dirty_thresh, due to reasons
de1fff37 1471 * - in JBOD setup, wb_thresh can fluctuate a lot
5a537485 1472 * - in a system with HDD and USB key, the USB key may somehow
de1fff37
TH
1473 * go into state (wb_dirty >> wb_thresh) either because
1474 * wb_dirty starts high, or because wb_thresh drops low.
5a537485 1475 * In this case we don't want to hard throttle the USB key
de1fff37
TH
1476 * dirtiers for 100 seconds until wb_dirty drops under
1477 * wb_thresh. Instead the auxiliary wb control line in
a88a341a 1478 * wb_position_ratio() will let the dirtier task progress
de1fff37 1479 * at some rate <= (write_bw / 2) for bringing down wb_dirty.
5a537485 1480 */
b1cbc6d4 1481 dtc->wb_thresh = __wb_calc_thresh(dtc);
970fb01a
TH
1482 dtc->wb_bg_thresh = dtc->thresh ?
1483 div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
5a537485
MP
1484
1485 /*
1486 * In order to avoid the stacked BDI deadlock we need
1487 * to ensure we accurately count the 'dirty' pages when
1488 * the threshold is low.
1489 *
1490 * Otherwise it would be possible to get thresh+n pages
1491 * reported dirty, even though there are thresh-m pages
1492 * actually dirty; with m+n sitting in the percpu
1493 * deltas.
1494 */
2bc00aef 1495 if (dtc->wb_thresh < 2 * wb_stat_error(wb)) {
93f78d88 1496 wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
2bc00aef 1497 dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
5a537485 1498 } else {
93f78d88 1499 wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
2bc00aef 1500 dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
5a537485
MP
1501 }
1502}
1503
1da177e4
LT
1504/*
1505 * balance_dirty_pages() must be called by processes which are generating dirty
1506 * data. It looks at the number of dirty pages in the machine and will force
143dfe86 1507 * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
5b0830cb
JA
1508 * If we're over `background_thresh' then the writeback threads are woken to
1509 * perform some writeout.
1da177e4 1510 */
3a2e9a5a 1511static void balance_dirty_pages(struct address_space *mapping,
dfb8ae56 1512 struct bdi_writeback *wb,
143dfe86 1513 unsigned long pages_dirtied)
1da177e4 1514{
2bc00aef 1515 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
c2aa723a 1516 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
2bc00aef 1517 struct dirty_throttle_control * const gdtc = &gdtc_stor;
c2aa723a
TH
1518 struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
1519 &mdtc_stor : NULL;
1520 struct dirty_throttle_control *sdtc;
143dfe86 1521 unsigned long nr_reclaimable; /* = file_dirty + unstable_nfs */
83712358 1522 long period;
7ccb9ad5
WF
1523 long pause;
1524 long max_pause;
1525 long min_pause;
1526 int nr_dirtied_pause;
e50e3720 1527 bool dirty_exceeded = false;
143dfe86 1528 unsigned long task_ratelimit;
7ccb9ad5 1529 unsigned long dirty_ratelimit;
dfb8ae56 1530 struct backing_dev_info *bdi = wb->bdi;
5a537485 1531 bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
e98be2d5 1532 unsigned long start_time = jiffies;
1da177e4
LT
1533
1534 for (;;) {
83712358 1535 unsigned long now = jiffies;
2bc00aef 1536 unsigned long dirty, thresh, bg_thresh;
c2aa723a 1537 unsigned long m_dirty, m_thresh, m_bg_thresh;
83712358 1538
143dfe86
WF
1539 /*
1540 * Unstable writes are a feature of certain networked
1541 * filesystems (i.e. NFS) in which data may have been
1542 * written to the server's write cache, but has not yet
1543 * been flushed to permanent storage.
1544 */
5fce25a9
PZ
1545 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
1546 global_page_state(NR_UNSTABLE_NFS);
9fc3a43e 1547 gdtc->avail = global_dirtyable_memory();
2bc00aef 1548 gdtc->dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
5fce25a9 1549
9fc3a43e 1550 domain_dirty_limits(gdtc);
16c4042f 1551
5a537485 1552 if (unlikely(strictlimit)) {
970fb01a 1553 wb_dirty_limits(gdtc);
5a537485 1554
2bc00aef
TH
1555 dirty = gdtc->wb_dirty;
1556 thresh = gdtc->wb_thresh;
970fb01a 1557 bg_thresh = gdtc->wb_bg_thresh;
5a537485 1558 } else {
2bc00aef
TH
1559 dirty = gdtc->dirty;
1560 thresh = gdtc->thresh;
1561 bg_thresh = gdtc->bg_thresh;
5a537485
MP
1562 }
1563
c2aa723a
TH
1564 if (mdtc) {
1565 unsigned long writeback;
1566
1567 /*
1568 * If @wb belongs to !root memcg, repeat the same
1569 * basic calculations for the memcg domain.
1570 */
1571 mem_cgroup_wb_stats(wb, &mdtc->avail, &mdtc->dirty,
1572 &writeback);
1573 mdtc_cap_avail(mdtc);
1574 mdtc->dirty += writeback;
1575
1576 domain_dirty_limits(mdtc);
1577
1578 if (unlikely(strictlimit)) {
1579 wb_dirty_limits(mdtc);
1580 m_dirty = mdtc->wb_dirty;
1581 m_thresh = mdtc->wb_thresh;
1582 m_bg_thresh = mdtc->wb_bg_thresh;
1583 } else {
1584 m_dirty = mdtc->dirty;
1585 m_thresh = mdtc->thresh;
1586 m_bg_thresh = mdtc->bg_thresh;
1587 }
5a537485
MP
1588 }
1589
16c4042f
WF
1590 /*
1591 * Throttle it only when the background writeback cannot
1592 * catch-up. This avoids (excessively) small writeouts
de1fff37 1593 * when the wb limits are ramping up in case of !strictlimit.
5a537485 1594 *
de1fff37
TH
1595 * In strictlimit case make decision based on the wb counters
1596 * and limits. Small writeouts when the wb limits are ramping
5a537485 1597 * up are the price we consciously pay for strictlimit-ing.
c2aa723a
TH
1598 *
1599 * If memcg domain is in effect, @dirty should be under
1600 * both global and memcg freerun ceilings.
16c4042f 1601 */
c2aa723a
TH
1602 if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh) &&
1603 (!mdtc ||
1604 m_dirty <= dirty_freerun_ceiling(m_thresh, m_bg_thresh))) {
1605 unsigned long intv = dirty_poll_interval(dirty, thresh);
1606 unsigned long m_intv = ULONG_MAX;
1607
83712358
WF
1608 current->dirty_paused_when = now;
1609 current->nr_dirtied = 0;
c2aa723a
TH
1610 if (mdtc)
1611 m_intv = dirty_poll_interval(m_dirty, m_thresh);
1612 current->nr_dirtied_pause = min(intv, m_intv);
16c4042f 1613 break;
83712358 1614 }
16c4042f 1615
bc05873d 1616 if (unlikely(!writeback_in_progress(wb)))
9ecf4866 1617 wb_start_background_writeback(wb);
143dfe86 1618
c2aa723a
TH
1619 /*
1620 * Calculate global domain's pos_ratio and select the
1621 * global dtc by default.
1622 */
5a537485 1623 if (!strictlimit)
970fb01a 1624 wb_dirty_limits(gdtc);
5fce25a9 1625
2bc00aef
TH
1626 dirty_exceeded = (gdtc->wb_dirty > gdtc->wb_thresh) &&
1627 ((gdtc->dirty > gdtc->thresh) || strictlimit);
daddfa3c
TH
1628
1629 wb_position_ratio(gdtc);
c2aa723a
TH
1630 sdtc = gdtc;
1631
1632 if (mdtc) {
1633 /*
1634 * If memcg domain is in effect, calculate its
1635 * pos_ratio. @wb should satisfy constraints from
1636 * both global and memcg domains. Choose the one
1637 * w/ lower pos_ratio.
1638 */
1639 if (!strictlimit)
1640 wb_dirty_limits(mdtc);
1641
1642 dirty_exceeded |= (mdtc->wb_dirty > mdtc->wb_thresh) &&
1643 ((mdtc->dirty > mdtc->thresh) || strictlimit);
1644
1645 wb_position_ratio(mdtc);
1646 if (mdtc->pos_ratio < gdtc->pos_ratio)
1647 sdtc = mdtc;
1648 }
daddfa3c 1649
a88a341a
TH
1650 if (dirty_exceeded && !wb->dirty_exceeded)
1651 wb->dirty_exceeded = 1;
1da177e4 1652
8a731799
TH
1653 if (time_is_before_jiffies(wb->bw_time_stamp +
1654 BANDWIDTH_INTERVAL)) {
1655 spin_lock(&wb->list_lock);
c2aa723a 1656 __wb_update_bandwidth(gdtc, mdtc, start_time, true);
8a731799
TH
1657 spin_unlock(&wb->list_lock);
1658 }
e98be2d5 1659
c2aa723a 1660 /* throttle according to the chosen dtc */
a88a341a 1661 dirty_ratelimit = wb->dirty_ratelimit;
c2aa723a 1662 task_ratelimit = ((u64)dirty_ratelimit * sdtc->pos_ratio) >>
3a73dbbc 1663 RATELIMIT_CALC_SHIFT;
c2aa723a 1664 max_pause = wb_max_pause(wb, sdtc->wb_dirty);
a88a341a
TH
1665 min_pause = wb_min_pause(wb, max_pause,
1666 task_ratelimit, dirty_ratelimit,
1667 &nr_dirtied_pause);
7ccb9ad5 1668
3a73dbbc 1669 if (unlikely(task_ratelimit == 0)) {
83712358 1670 period = max_pause;
c8462cc9 1671 pause = max_pause;
143dfe86 1672 goto pause;
04fbfdc1 1673 }
83712358
WF
1674 period = HZ * pages_dirtied / task_ratelimit;
1675 pause = period;
1676 if (current->dirty_paused_when)
1677 pause -= now - current->dirty_paused_when;
1678 /*
1679 * For less than 1s think time (ext3/4 may block the dirtier
1680 * for up to 800ms from time to time on 1-HDD; so does xfs,
1681 * however at much less frequency), try to compensate it in
1682 * future periods by updating the virtual time; otherwise just
1683 * do a reset, as it may be a light dirtier.
1684 */
7ccb9ad5 1685 if (pause < min_pause) {
ece13ac3 1686 trace_balance_dirty_pages(bdi,
c2aa723a
TH
1687 sdtc->thresh,
1688 sdtc->bg_thresh,
1689 sdtc->dirty,
1690 sdtc->wb_thresh,
1691 sdtc->wb_dirty,
ece13ac3
WF
1692 dirty_ratelimit,
1693 task_ratelimit,
1694 pages_dirtied,
83712358 1695 period,
7ccb9ad5 1696 min(pause, 0L),
ece13ac3 1697 start_time);
83712358
WF
1698 if (pause < -HZ) {
1699 current->dirty_paused_when = now;
1700 current->nr_dirtied = 0;
1701 } else if (period) {
1702 current->dirty_paused_when += period;
1703 current->nr_dirtied = 0;
7ccb9ad5
WF
1704 } else if (current->nr_dirtied_pause <= pages_dirtied)
1705 current->nr_dirtied_pause += pages_dirtied;
57fc978c 1706 break;
04fbfdc1 1707 }
7ccb9ad5
WF
1708 if (unlikely(pause > max_pause)) {
1709 /* for occasional dropped task_ratelimit */
1710 now += min(pause - max_pause, max_pause);
1711 pause = max_pause;
1712 }
143dfe86
WF
1713
1714pause:
ece13ac3 1715 trace_balance_dirty_pages(bdi,
c2aa723a
TH
1716 sdtc->thresh,
1717 sdtc->bg_thresh,
1718 sdtc->dirty,
1719 sdtc->wb_thresh,
1720 sdtc->wb_dirty,
ece13ac3
WF
1721 dirty_ratelimit,
1722 task_ratelimit,
1723 pages_dirtied,
83712358 1724 period,
ece13ac3
WF
1725 pause,
1726 start_time);
499d05ec 1727 __set_current_state(TASK_KILLABLE);
d25105e8 1728 io_schedule_timeout(pause);
87c6a9b2 1729
83712358
WF
1730 current->dirty_paused_when = now + pause;
1731 current->nr_dirtied = 0;
7ccb9ad5 1732 current->nr_dirtied_pause = nr_dirtied_pause;
83712358 1733
ffd1f609 1734 /*
2bc00aef
TH
1735 * This is typically equal to (dirty < thresh) and can also
1736 * keep "1000+ dd on a slow USB stick" under control.
ffd1f609 1737 */
1df64719 1738 if (task_ratelimit)
ffd1f609 1739 break;
499d05ec 1740
c5c6343c
WF
1741 /*
1742 * In the case of an unresponding NFS server and the NFS dirty
de1fff37 1743 * pages exceeds dirty_thresh, give the other good wb's a pipe
c5c6343c
WF
1744 * to go through, so that tasks on them still remain responsive.
1745 *
1746 * In theory 1 page is enough to keep the comsumer-producer
1747 * pipe going: the flusher cleans 1 page => the task dirties 1
de1fff37 1748 * more page. However wb_dirty has accounting errors. So use
93f78d88 1749 * the larger and more IO friendly wb_stat_error.
c5c6343c 1750 */
c2aa723a 1751 if (sdtc->wb_dirty <= wb_stat_error(wb))
c5c6343c
WF
1752 break;
1753
499d05ec
JK
1754 if (fatal_signal_pending(current))
1755 break;
1da177e4
LT
1756 }
1757
a88a341a
TH
1758 if (!dirty_exceeded && wb->dirty_exceeded)
1759 wb->dirty_exceeded = 0;
1da177e4 1760
bc05873d 1761 if (writeback_in_progress(wb))
5b0830cb 1762 return;
1da177e4
LT
1763
1764 /*
1765 * In laptop mode, we wait until hitting the higher threshold before
1766 * starting background writeout, and then write out all the way down
1767 * to the lower threshold. So slow writers cause minimal disk activity.
1768 *
1769 * In normal mode, we start background writeout at the lower
1770 * background_thresh, to keep the amount of dirty memory low.
1771 */
143dfe86
WF
1772 if (laptop_mode)
1773 return;
1774
2bc00aef 1775 if (nr_reclaimable > gdtc->bg_thresh)
9ecf4866 1776 wb_start_background_writeback(wb);
1da177e4
LT
1777}
1778
9d823e8f 1779static DEFINE_PER_CPU(int, bdp_ratelimits);
245b2e70 1780
54848d73
WF
1781/*
1782 * Normal tasks are throttled by
1783 * loop {
1784 * dirty tsk->nr_dirtied_pause pages;
1785 * take a snap in balance_dirty_pages();
1786 * }
1787 * However there is a worst case. If every task exit immediately when dirtied
1788 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
1789 * called to throttle the page dirties. The solution is to save the not yet
1790 * throttled page dirties in dirty_throttle_leaks on task exit and charge them
1791 * randomly into the running tasks. This works well for the above worst case,
1792 * as the new task will pick up and accumulate the old task's leaked dirty
1793 * count and eventually get throttled.
1794 */
1795DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
1796
1da177e4 1797/**
d0e1d66b 1798 * balance_dirty_pages_ratelimited - balance dirty memory state
67be2dd1 1799 * @mapping: address_space which was dirtied
1da177e4
LT
1800 *
1801 * Processes which are dirtying memory should call in here once for each page
1802 * which was newly dirtied. The function will periodically check the system's
1803 * dirty state and will initiate writeback if needed.
1804 *
1805 * On really big machines, get_writeback_state is expensive, so try to avoid
1806 * calling it too often (ratelimiting). But once we're over the dirty memory
1807 * limit we decrease the ratelimiting by a lot, to prevent individual processes
1808 * from overshooting the limit by (ratelimit_pages) each.
1809 */
d0e1d66b 1810void balance_dirty_pages_ratelimited(struct address_space *mapping)
1da177e4 1811{
dfb8ae56
TH
1812 struct inode *inode = mapping->host;
1813 struct backing_dev_info *bdi = inode_to_bdi(inode);
1814 struct bdi_writeback *wb = NULL;
9d823e8f
WF
1815 int ratelimit;
1816 int *p;
1da177e4 1817
36715cef
WF
1818 if (!bdi_cap_account_dirty(bdi))
1819 return;
1820
dfb8ae56
TH
1821 if (inode_cgwb_enabled(inode))
1822 wb = wb_get_create_current(bdi, GFP_KERNEL);
1823 if (!wb)
1824 wb = &bdi->wb;
1825
9d823e8f 1826 ratelimit = current->nr_dirtied_pause;
a88a341a 1827 if (wb->dirty_exceeded)
9d823e8f
WF
1828 ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
1829
9d823e8f 1830 preempt_disable();
1da177e4 1831 /*
9d823e8f
WF
1832 * This prevents one CPU to accumulate too many dirtied pages without
1833 * calling into balance_dirty_pages(), which can happen when there are
1834 * 1000+ tasks, all of them start dirtying pages at exactly the same
1835 * time, hence all honoured too large initial task->nr_dirtied_pause.
1da177e4 1836 */
7c8e0181 1837 p = this_cpu_ptr(&bdp_ratelimits);
9d823e8f 1838 if (unlikely(current->nr_dirtied >= ratelimit))
fa5a734e 1839 *p = 0;
d3bc1fef
WF
1840 else if (unlikely(*p >= ratelimit_pages)) {
1841 *p = 0;
1842 ratelimit = 0;
1da177e4 1843 }
54848d73
WF
1844 /*
1845 * Pick up the dirtied pages by the exited tasks. This avoids lots of
1846 * short-lived tasks (eg. gcc invocations in a kernel build) escaping
1847 * the dirty throttling and livelock other long-run dirtiers.
1848 */
7c8e0181 1849 p = this_cpu_ptr(&dirty_throttle_leaks);
54848d73 1850 if (*p > 0 && current->nr_dirtied < ratelimit) {
d0e1d66b 1851 unsigned long nr_pages_dirtied;
54848d73
WF
1852 nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
1853 *p -= nr_pages_dirtied;
1854 current->nr_dirtied += nr_pages_dirtied;
1da177e4 1855 }
fa5a734e 1856 preempt_enable();
9d823e8f
WF
1857
1858 if (unlikely(current->nr_dirtied >= ratelimit))
dfb8ae56
TH
1859 balance_dirty_pages(mapping, wb, current->nr_dirtied);
1860
1861 wb_put(wb);
1da177e4 1862}
d0e1d66b 1863EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
1da177e4 1864
aa661bbe
TH
1865/**
1866 * wb_over_bg_thresh - does @wb need to be written back?
1867 * @wb: bdi_writeback of interest
1868 *
1869 * Determines whether background writeback should keep writing @wb or it's
1870 * clean enough. Returns %true if writeback should continue.
1871 */
1872bool wb_over_bg_thresh(struct bdi_writeback *wb)
1873{
947e9762 1874 struct dirty_throttle_control gdtc_stor = { GDTC_INIT(wb) };
c2aa723a 1875 struct dirty_throttle_control mdtc_stor = { MDTC_INIT(wb, &gdtc_stor) };
947e9762 1876 struct dirty_throttle_control * const gdtc = &gdtc_stor;
c2aa723a
TH
1877 struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ?
1878 &mdtc_stor : NULL;
aa661bbe 1879
947e9762
TH
1880 /*
1881 * Similar to balance_dirty_pages() but ignores pages being written
1882 * as we're trying to decide whether to put more under writeback.
1883 */
1884 gdtc->avail = global_dirtyable_memory();
1885 gdtc->dirty = global_page_state(NR_FILE_DIRTY) +
1886 global_page_state(NR_UNSTABLE_NFS);
1887 domain_dirty_limits(gdtc);
aa661bbe 1888
947e9762 1889 if (gdtc->dirty > gdtc->bg_thresh)
aa661bbe
TH
1890 return true;
1891
947e9762 1892 if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc))
aa661bbe
TH
1893 return true;
1894
c2aa723a
TH
1895 if (mdtc) {
1896 unsigned long writeback;
1897
1898 mem_cgroup_wb_stats(wb, &mdtc->avail, &mdtc->dirty, &writeback);
1899 mdtc_cap_avail(mdtc);
1900 domain_dirty_limits(mdtc); /* ditto, ignore writeback */
1901
1902 if (mdtc->dirty > mdtc->bg_thresh)
1903 return true;
1904
1905 if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc))
1906 return true;
1907 }
1908
aa661bbe
TH
1909 return false;
1910}
1911
232ea4d6 1912void throttle_vm_writeout(gfp_t gfp_mask)
1da177e4 1913{
364aeb28
DR
1914 unsigned long background_thresh;
1915 unsigned long dirty_thresh;
1da177e4
LT
1916
1917 for ( ; ; ) {
16c4042f 1918 global_dirty_limits(&background_thresh, &dirty_thresh);
c7981433 1919 dirty_thresh = hard_dirty_limit(&global_wb_domain, dirty_thresh);
1da177e4
LT
1920
1921 /*
1922 * Boost the allowable dirty threshold a bit for page
1923 * allocators so they don't get DoS'ed by heavy writers
1924 */
1925 dirty_thresh += dirty_thresh / 10; /* wheeee... */
1926
c24f21bd
CL
1927 if (global_page_state(NR_UNSTABLE_NFS) +
1928 global_page_state(NR_WRITEBACK) <= dirty_thresh)
1929 break;
8aa7e847 1930 congestion_wait(BLK_RW_ASYNC, HZ/10);
369f2389
FW
1931
1932 /*
1933 * The caller might hold locks which can prevent IO completion
1934 * or progress in the filesystem. So we cannot just sit here
1935 * waiting for IO to complete.
1936 */
1937 if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
1938 break;
1da177e4
LT
1939 }
1940}
1941
1da177e4
LT
1942/*
1943 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
1944 */
cccad5b9 1945int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
8d65af78 1946 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 1947{
8d65af78 1948 proc_dointvec(table, write, buffer, length, ppos);
1da177e4
LT
1949 return 0;
1950}
1951
c2c4986e 1952#ifdef CONFIG_BLOCK
31373d09 1953void laptop_mode_timer_fn(unsigned long data)
1da177e4 1954{
31373d09
MG
1955 struct request_queue *q = (struct request_queue *)data;
1956 int nr_pages = global_page_state(NR_FILE_DIRTY) +
1957 global_page_state(NR_UNSTABLE_NFS);
a06fd6b1
TH
1958 struct bdi_writeback *wb;
1959 struct wb_iter iter;
1da177e4 1960
31373d09
MG
1961 /*
1962 * We want to write everything out, not just down to the dirty
1963 * threshold
1964 */
a06fd6b1
TH
1965 if (!bdi_has_dirty_io(&q->backing_dev_info))
1966 return;
1967
1968 bdi_for_each_wb(wb, &q->backing_dev_info, &iter, 0)
1969 if (wb_has_dirty_io(wb))
1970 wb_start_writeback(wb, nr_pages, true,
1971 WB_REASON_LAPTOP_TIMER);
1da177e4
LT
1972}
1973
1974/*
1975 * We've spun up the disk and we're in laptop mode: schedule writeback
1976 * of all dirty data a few seconds from now. If the flush is already scheduled
1977 * then push it back - the user is still using the disk.
1978 */
31373d09 1979void laptop_io_completion(struct backing_dev_info *info)
1da177e4 1980{
31373d09 1981 mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
1da177e4
LT
1982}
1983
1984/*
1985 * We're in laptop mode and we've just synced. The sync's writes will have
1986 * caused another writeback to be scheduled by laptop_io_completion.
1987 * Nothing needs to be written back anymore, so we unschedule the writeback.
1988 */
1989void laptop_sync_completion(void)
1990{
31373d09
MG
1991 struct backing_dev_info *bdi;
1992
1993 rcu_read_lock();
1994
1995 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
1996 del_timer(&bdi->laptop_mode_wb_timer);
1997
1998 rcu_read_unlock();
1da177e4 1999}
c2c4986e 2000#endif
1da177e4
LT
2001
2002/*
2003 * If ratelimit_pages is too high then we can get into dirty-data overload
2004 * if a large number of processes all perform writes at the same time.
2005 * If it is too low then SMP machines will call the (expensive)
2006 * get_writeback_state too often.
2007 *
2008 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
2009 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
9d823e8f 2010 * thresholds.
1da177e4
LT
2011 */
2012
2d1d43f6 2013void writeback_set_ratelimit(void)
1da177e4 2014{
dcc25ae7 2015 struct wb_domain *dom = &global_wb_domain;
9d823e8f
WF
2016 unsigned long background_thresh;
2017 unsigned long dirty_thresh;
dcc25ae7 2018
9d823e8f 2019 global_dirty_limits(&background_thresh, &dirty_thresh);
dcc25ae7 2020 dom->dirty_limit = dirty_thresh;
9d823e8f 2021 ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
1da177e4
LT
2022 if (ratelimit_pages < 16)
2023 ratelimit_pages = 16;
1da177e4
LT
2024}
2025
0db0628d 2026static int
2f60d628
SB
2027ratelimit_handler(struct notifier_block *self, unsigned long action,
2028 void *hcpu)
1da177e4 2029{
2f60d628
SB
2030
2031 switch (action & ~CPU_TASKS_FROZEN) {
2032 case CPU_ONLINE:
2033 case CPU_DEAD:
2034 writeback_set_ratelimit();
2035 return NOTIFY_OK;
2036 default:
2037 return NOTIFY_DONE;
2038 }
1da177e4
LT
2039}
2040
0db0628d 2041static struct notifier_block ratelimit_nb = {
1da177e4
LT
2042 .notifier_call = ratelimit_handler,
2043 .next = NULL,
2044};
2045
2046/*
dc6e29da
LT
2047 * Called early on to tune the page writeback dirty limits.
2048 *
2049 * We used to scale dirty pages according to how total memory
2050 * related to pages that could be allocated for buffers (by
2051 * comparing nr_free_buffer_pages() to vm_total_pages.
2052 *
2053 * However, that was when we used "dirty_ratio" to scale with
2054 * all memory, and we don't do that any more. "dirty_ratio"
2055 * is now applied to total non-HIGHPAGE memory (by subtracting
2056 * totalhigh_pages from vm_total_pages), and as such we can't
2057 * get into the old insane situation any more where we had
2058 * large amounts of dirty pages compared to a small amount of
2059 * non-HIGHMEM memory.
2060 *
2061 * But we might still want to scale the dirty_ratio by how
2062 * much memory the box has..
1da177e4
LT
2063 */
2064void __init page_writeback_init(void)
2065{
a50fcb51
RV
2066 BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2067
2d1d43f6 2068 writeback_set_ratelimit();
1da177e4
LT
2069 register_cpu_notifier(&ratelimit_nb);
2070}
2071
f446daae
JK
2072/**
2073 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
2074 * @mapping: address space structure to write
2075 * @start: starting page index
2076 * @end: ending page index (inclusive)
2077 *
2078 * This function scans the page range from @start to @end (inclusive) and tags
2079 * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
2080 * that write_cache_pages (or whoever calls this function) will then use
2081 * TOWRITE tag to identify pages eligible for writeback. This mechanism is
2082 * used to avoid livelocking of writeback by a process steadily creating new
2083 * dirty pages in the file (thus it is important for this function to be quick
2084 * so that it can tag pages faster than a dirtying process can create them).
2085 */
2086/*
2087 * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
2088 */
f446daae
JK
2089void tag_pages_for_writeback(struct address_space *mapping,
2090 pgoff_t start, pgoff_t end)
2091{
3c111a07 2092#define WRITEBACK_TAG_BATCH 4096
f446daae
JK
2093 unsigned long tagged;
2094
2095 do {
2096 spin_lock_irq(&mapping->tree_lock);
2097 tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
2098 &start, end, WRITEBACK_TAG_BATCH,
2099 PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
2100 spin_unlock_irq(&mapping->tree_lock);
2101 WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
2102 cond_resched();
d5ed3a4a
JK
2103 /* We check 'start' to handle wrapping when end == ~0UL */
2104 } while (tagged >= WRITEBACK_TAG_BATCH && start);
f446daae
JK
2105}
2106EXPORT_SYMBOL(tag_pages_for_writeback);
2107
811d736f 2108/**
0ea97180 2109 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
811d736f
DH
2110 * @mapping: address space structure to write
2111 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
0ea97180
MS
2112 * @writepage: function called for each page
2113 * @data: data passed to writepage function
811d736f 2114 *
0ea97180 2115 * If a page is already under I/O, write_cache_pages() skips it, even
811d736f
DH
2116 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
2117 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
2118 * and msync() need to guarantee that all the data which was dirty at the time
2119 * the call was made get new I/O started against them. If wbc->sync_mode is
2120 * WB_SYNC_ALL then we were called for data integrity and we must wait for
2121 * existing IO to complete.
f446daae
JK
2122 *
2123 * To avoid livelocks (when other process dirties new pages), we first tag
2124 * pages which should be written back with TOWRITE tag and only then start
2125 * writing them. For data-integrity sync we have to be careful so that we do
2126 * not miss some pages (e.g., because some other process has cleared TOWRITE
2127 * tag we set). The rule we follow is that TOWRITE tag can be cleared only
2128 * by the process clearing the DIRTY tag (and submitting the page for IO).
811d736f 2129 */
0ea97180
MS
2130int write_cache_pages(struct address_space *mapping,
2131 struct writeback_control *wbc, writepage_t writepage,
2132 void *data)
811d736f 2133{
811d736f
DH
2134 int ret = 0;
2135 int done = 0;
811d736f
DH
2136 struct pagevec pvec;
2137 int nr_pages;
31a12666 2138 pgoff_t uninitialized_var(writeback_index);
811d736f
DH
2139 pgoff_t index;
2140 pgoff_t end; /* Inclusive */
bd19e012 2141 pgoff_t done_index;
31a12666 2142 int cycled;
811d736f 2143 int range_whole = 0;
f446daae 2144 int tag;
811d736f 2145
811d736f
DH
2146 pagevec_init(&pvec, 0);
2147 if (wbc->range_cyclic) {
31a12666
NP
2148 writeback_index = mapping->writeback_index; /* prev offset */
2149 index = writeback_index;
2150 if (index == 0)
2151 cycled = 1;
2152 else
2153 cycled = 0;
811d736f
DH
2154 end = -1;
2155 } else {
2156 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2157 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2158 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2159 range_whole = 1;
31a12666 2160 cycled = 1; /* ignore range_cyclic tests */
811d736f 2161 }
6e6938b6 2162 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
f446daae
JK
2163 tag = PAGECACHE_TAG_TOWRITE;
2164 else
2165 tag = PAGECACHE_TAG_DIRTY;
811d736f 2166retry:
6e6938b6 2167 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
f446daae 2168 tag_pages_for_writeback(mapping, index, end);
bd19e012 2169 done_index = index;
5a3d5c98
NP
2170 while (!done && (index <= end)) {
2171 int i;
2172
f446daae 2173 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
5a3d5c98
NP
2174 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2175 if (nr_pages == 0)
2176 break;
811d736f 2177
811d736f
DH
2178 for (i = 0; i < nr_pages; i++) {
2179 struct page *page = pvec.pages[i];
2180
2181 /*
d5482cdf
NP
2182 * At this point, the page may be truncated or
2183 * invalidated (changing page->mapping to NULL), or
2184 * even swizzled back from swapper_space to tmpfs file
2185 * mapping. However, page->index will not change
2186 * because we have a reference on the page.
811d736f 2187 */
d5482cdf
NP
2188 if (page->index > end) {
2189 /*
2190 * can't be range_cyclic (1st pass) because
2191 * end == -1 in that case.
2192 */
2193 done = 1;
2194 break;
2195 }
2196
cf15b07c 2197 done_index = page->index;
d5482cdf 2198
811d736f
DH
2199 lock_page(page);
2200
5a3d5c98
NP
2201 /*
2202 * Page truncated or invalidated. We can freely skip it
2203 * then, even for data integrity operations: the page
2204 * has disappeared concurrently, so there could be no
2205 * real expectation of this data interity operation
2206 * even if there is now a new, dirty page at the same
2207 * pagecache address.
2208 */
811d736f 2209 if (unlikely(page->mapping != mapping)) {
5a3d5c98 2210continue_unlock:
811d736f
DH
2211 unlock_page(page);
2212 continue;
2213 }
2214
515f4a03
NP
2215 if (!PageDirty(page)) {
2216 /* someone wrote it for us */
2217 goto continue_unlock;
2218 }
2219
2220 if (PageWriteback(page)) {
2221 if (wbc->sync_mode != WB_SYNC_NONE)
2222 wait_on_page_writeback(page);
2223 else
2224 goto continue_unlock;
2225 }
811d736f 2226
515f4a03
NP
2227 BUG_ON(PageWriteback(page));
2228 if (!clear_page_dirty_for_io(page))
5a3d5c98 2229 goto continue_unlock;
811d736f 2230
de1414a6 2231 trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
0ea97180 2232 ret = (*writepage)(page, wbc, data);
00266770
NP
2233 if (unlikely(ret)) {
2234 if (ret == AOP_WRITEPAGE_ACTIVATE) {
2235 unlock_page(page);
2236 ret = 0;
2237 } else {
2238 /*
2239 * done_index is set past this page,
2240 * so media errors will not choke
2241 * background writeout for the entire
2242 * file. This has consequences for
2243 * range_cyclic semantics (ie. it may
2244 * not be suitable for data integrity
2245 * writeout).
2246 */
cf15b07c 2247 done_index = page->index + 1;
00266770
NP
2248 done = 1;
2249 break;
2250 }
0b564927 2251 }
00266770 2252
546a1924
DC
2253 /*
2254 * We stop writing back only if we are not doing
2255 * integrity sync. In case of integrity sync we have to
2256 * keep going until we have written all the pages
2257 * we tagged for writeback prior to entering this loop.
2258 */
2259 if (--wbc->nr_to_write <= 0 &&
2260 wbc->sync_mode == WB_SYNC_NONE) {
2261 done = 1;
2262 break;
05fe478d 2263 }
811d736f
DH
2264 }
2265 pagevec_release(&pvec);
2266 cond_resched();
2267 }
3a4c6800 2268 if (!cycled && !done) {
811d736f 2269 /*
31a12666 2270 * range_cyclic:
811d736f
DH
2271 * We hit the last page and there is more work to be done: wrap
2272 * back to the start of the file
2273 */
31a12666 2274 cycled = 1;
811d736f 2275 index = 0;
31a12666 2276 end = writeback_index - 1;
811d736f
DH
2277 goto retry;
2278 }
0b564927
DC
2279 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2280 mapping->writeback_index = done_index;
06d6cf69 2281
811d736f
DH
2282 return ret;
2283}
0ea97180
MS
2284EXPORT_SYMBOL(write_cache_pages);
2285
2286/*
2287 * Function used by generic_writepages to call the real writepage
2288 * function and set the mapping flags on error
2289 */
2290static int __writepage(struct page *page, struct writeback_control *wbc,
2291 void *data)
2292{
2293 struct address_space *mapping = data;
2294 int ret = mapping->a_ops->writepage(page, wbc);
2295 mapping_set_error(mapping, ret);
2296 return ret;
2297}
2298
2299/**
2300 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
2301 * @mapping: address space structure to write
2302 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
2303 *
2304 * This is a library function, which implements the writepages()
2305 * address_space_operation.
2306 */
2307int generic_writepages(struct address_space *mapping,
2308 struct writeback_control *wbc)
2309{
9b6096a6
SL
2310 struct blk_plug plug;
2311 int ret;
2312
0ea97180
MS
2313 /* deal with chardevs and other special file */
2314 if (!mapping->a_ops->writepage)
2315 return 0;
2316
9b6096a6
SL
2317 blk_start_plug(&plug);
2318 ret = write_cache_pages(mapping, wbc, __writepage, mapping);
2319 blk_finish_plug(&plug);
2320 return ret;
0ea97180 2321}
811d736f
DH
2322
2323EXPORT_SYMBOL(generic_writepages);
2324
1da177e4
LT
2325int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
2326{
22905f77
AM
2327 int ret;
2328
1da177e4
LT
2329 if (wbc->nr_to_write <= 0)
2330 return 0;
2331 if (mapping->a_ops->writepages)
d08b3851 2332 ret = mapping->a_ops->writepages(mapping, wbc);
22905f77
AM
2333 else
2334 ret = generic_writepages(mapping, wbc);
22905f77 2335 return ret;
1da177e4
LT
2336}
2337
2338/**
2339 * write_one_page - write out a single page and optionally wait on I/O
67be2dd1
MW
2340 * @page: the page to write
2341 * @wait: if true, wait on writeout
1da177e4
LT
2342 *
2343 * The page must be locked by the caller and will be unlocked upon return.
2344 *
2345 * write_one_page() returns a negative error code if I/O failed.
2346 */
2347int write_one_page(struct page *page, int wait)
2348{
2349 struct address_space *mapping = page->mapping;
2350 int ret = 0;
2351 struct writeback_control wbc = {
2352 .sync_mode = WB_SYNC_ALL,
2353 .nr_to_write = 1,
2354 };
2355
2356 BUG_ON(!PageLocked(page));
2357
2358 if (wait)
2359 wait_on_page_writeback(page);
2360
2361 if (clear_page_dirty_for_io(page)) {
2362 page_cache_get(page);
2363 ret = mapping->a_ops->writepage(page, &wbc);
2364 if (ret == 0 && wait) {
2365 wait_on_page_writeback(page);
2366 if (PageError(page))
2367 ret = -EIO;
2368 }
2369 page_cache_release(page);
2370 } else {
2371 unlock_page(page);
2372 }
2373 return ret;
2374}
2375EXPORT_SYMBOL(write_one_page);
2376
76719325
KC
2377/*
2378 * For address_spaces which do not use buffers nor write back.
2379 */
2380int __set_page_dirty_no_writeback(struct page *page)
2381{
2382 if (!PageDirty(page))
c3f0da63 2383 return !TestSetPageDirty(page);
76719325
KC
2384 return 0;
2385}
2386
e3a7cca1
ES
2387/*
2388 * Helper function for set_page_dirty family.
c4843a75
GT
2389 *
2390 * Caller must hold mem_cgroup_begin_page_stat().
2391 *
e3a7cca1
ES
2392 * NOTE: This relies on being atomic wrt interrupts.
2393 */
c4843a75
GT
2394void account_page_dirtied(struct page *page, struct address_space *mapping,
2395 struct mem_cgroup *memcg)
e3a7cca1 2396{
52ebea74
TH
2397 struct inode *inode = mapping->host;
2398
9fb0a7da
TH
2399 trace_writeback_dirty_page(page, mapping);
2400
e3a7cca1 2401 if (mapping_cap_account_dirty(mapping)) {
52ebea74 2402 struct bdi_writeback *wb;
de1414a6 2403
52ebea74
TH
2404 inode_attach_wb(inode, page);
2405 wb = inode_to_wb(inode);
de1414a6 2406
c4843a75 2407 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_DIRTY);
e3a7cca1 2408 __inc_zone_page_state(page, NR_FILE_DIRTY);
ea941f0e 2409 __inc_zone_page_state(page, NR_DIRTIED);
52ebea74
TH
2410 __inc_wb_stat(wb, WB_RECLAIMABLE);
2411 __inc_wb_stat(wb, WB_DIRTIED);
e3a7cca1 2412 task_io_account_write(PAGE_CACHE_SIZE);
d3bc1fef
WF
2413 current->nr_dirtied++;
2414 this_cpu_inc(bdp_ratelimits);
e3a7cca1
ES
2415 }
2416}
679ceace 2417EXPORT_SYMBOL(account_page_dirtied);
e3a7cca1 2418
b9ea2515
KK
2419/*
2420 * Helper function for deaccounting dirty page without writeback.
2421 *
c4843a75 2422 * Caller must hold mem_cgroup_begin_page_stat().
b9ea2515 2423 */
c4843a75 2424void account_page_cleaned(struct page *page, struct address_space *mapping,
682aa8e1 2425 struct mem_cgroup *memcg, struct bdi_writeback *wb)
b9ea2515
KK
2426{
2427 if (mapping_cap_account_dirty(mapping)) {
c4843a75 2428 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY);
b9ea2515 2429 dec_zone_page_state(page, NR_FILE_DIRTY);
682aa8e1 2430 dec_wb_stat(wb, WB_RECLAIMABLE);
b9ea2515
KK
2431 task_io_account_cancelled_write(PAGE_CACHE_SIZE);
2432 }
2433}
b9ea2515 2434
1da177e4
LT
2435/*
2436 * For address_spaces which do not use buffers. Just tag the page as dirty in
2437 * its radix tree.
2438 *
2439 * This is also used when a single buffer is being dirtied: we want to set the
2440 * page dirty in that case, but not all the buffers. This is a "bottom-up"
2441 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
2442 *
2d6d7f98
JW
2443 * The caller must ensure this doesn't race with truncation. Most will simply
2444 * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and
2445 * the pte lock held, which also locks out truncation.
1da177e4
LT
2446 */
2447int __set_page_dirty_nobuffers(struct page *page)
2448{
c4843a75
GT
2449 struct mem_cgroup *memcg;
2450
2451 memcg = mem_cgroup_begin_page_stat(page);
1da177e4
LT
2452 if (!TestSetPageDirty(page)) {
2453 struct address_space *mapping = page_mapping(page);
a85d9df1 2454 unsigned long flags;
1da177e4 2455
c4843a75
GT
2456 if (!mapping) {
2457 mem_cgroup_end_page_stat(memcg);
8c08540f 2458 return 1;
c4843a75 2459 }
8c08540f 2460
a85d9df1 2461 spin_lock_irqsave(&mapping->tree_lock, flags);
2d6d7f98
JW
2462 BUG_ON(page_mapping(page) != mapping);
2463 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
c4843a75 2464 account_page_dirtied(page, mapping, memcg);
2d6d7f98
JW
2465 radix_tree_tag_set(&mapping->page_tree, page_index(page),
2466 PAGECACHE_TAG_DIRTY);
a85d9df1 2467 spin_unlock_irqrestore(&mapping->tree_lock, flags);
c4843a75
GT
2468 mem_cgroup_end_page_stat(memcg);
2469
8c08540f
AM
2470 if (mapping->host) {
2471 /* !PageAnon && !swapper_space */
2472 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1da177e4 2473 }
4741c9fd 2474 return 1;
1da177e4 2475 }
c4843a75 2476 mem_cgroup_end_page_stat(memcg);
4741c9fd 2477 return 0;
1da177e4
LT
2478}
2479EXPORT_SYMBOL(__set_page_dirty_nobuffers);
2480
2f800fbd
WF
2481/*
2482 * Call this whenever redirtying a page, to de-account the dirty counters
2483 * (NR_DIRTIED, BDI_DIRTIED, tsk->nr_dirtied), so that they match the written
2484 * counters (NR_WRITTEN, BDI_WRITTEN) in long term. The mismatches will lead to
2485 * systematic errors in balanced_dirty_ratelimit and the dirty pages position
2486 * control.
2487 */
2488void account_page_redirty(struct page *page)
2489{
2490 struct address_space *mapping = page->mapping;
91018134 2491
2f800fbd 2492 if (mapping && mapping_cap_account_dirty(mapping)) {
682aa8e1
TH
2493 struct inode *inode = mapping->host;
2494 struct bdi_writeback *wb;
2495 bool locked;
91018134 2496
682aa8e1 2497 wb = unlocked_inode_to_wb_begin(inode, &locked);
2f800fbd
WF
2498 current->nr_dirtied--;
2499 dec_zone_page_state(page, NR_DIRTIED);
91018134 2500 dec_wb_stat(wb, WB_DIRTIED);
682aa8e1 2501 unlocked_inode_to_wb_end(inode, locked);
2f800fbd
WF
2502 }
2503}
2504EXPORT_SYMBOL(account_page_redirty);
2505
1da177e4
LT
2506/*
2507 * When a writepage implementation decides that it doesn't want to write this
2508 * page for some reason, it should redirty the locked page via
2509 * redirty_page_for_writepage() and it should then unlock the page and return 0
2510 */
2511int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
2512{
8d38633c
KK
2513 int ret;
2514
1da177e4 2515 wbc->pages_skipped++;
8d38633c 2516 ret = __set_page_dirty_nobuffers(page);
2f800fbd 2517 account_page_redirty(page);
8d38633c 2518 return ret;
1da177e4
LT
2519}
2520EXPORT_SYMBOL(redirty_page_for_writepage);
2521
2522/*
6746aff7
WF
2523 * Dirty a page.
2524 *
2525 * For pages with a mapping this should be done under the page lock
2526 * for the benefit of asynchronous memory errors who prefer a consistent
2527 * dirty state. This rule can be broken in some special cases,
2528 * but should be better not to.
2529 *
1da177e4
LT
2530 * If the mapping doesn't provide a set_page_dirty a_op, then
2531 * just fall through and assume that it wants buffer_heads.
2532 */
1cf6e7d8 2533int set_page_dirty(struct page *page)
1da177e4
LT
2534{
2535 struct address_space *mapping = page_mapping(page);
2536
2537 if (likely(mapping)) {
2538 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
278df9f4
MK
2539 /*
2540 * readahead/lru_deactivate_page could remain
2541 * PG_readahead/PG_reclaim due to race with end_page_writeback
2542 * About readahead, if the page is written, the flags would be
2543 * reset. So no problem.
2544 * About lru_deactivate_page, if the page is redirty, the flag
2545 * will be reset. So no problem. but if the page is used by readahead
2546 * it will confuse readahead and make it restart the size rampup
2547 * process. But it's a trivial problem.
2548 */
a4bb3ecd
NH
2549 if (PageReclaim(page))
2550 ClearPageReclaim(page);
9361401e
DH
2551#ifdef CONFIG_BLOCK
2552 if (!spd)
2553 spd = __set_page_dirty_buffers;
2554#endif
2555 return (*spd)(page);
1da177e4 2556 }
4741c9fd
AM
2557 if (!PageDirty(page)) {
2558 if (!TestSetPageDirty(page))
2559 return 1;
2560 }
1da177e4
LT
2561 return 0;
2562}
2563EXPORT_SYMBOL(set_page_dirty);
2564
2565/*
2566 * set_page_dirty() is racy if the caller has no reference against
2567 * page->mapping->host, and if the page is unlocked. This is because another
2568 * CPU could truncate the page off the mapping and then free the mapping.
2569 *
2570 * Usually, the page _is_ locked, or the caller is a user-space process which
2571 * holds a reference on the inode by having an open file.
2572 *
2573 * In other cases, the page should be locked before running set_page_dirty().
2574 */
2575int set_page_dirty_lock(struct page *page)
2576{
2577 int ret;
2578
7eaceacc 2579 lock_page(page);
1da177e4
LT
2580 ret = set_page_dirty(page);
2581 unlock_page(page);
2582 return ret;
2583}
2584EXPORT_SYMBOL(set_page_dirty_lock);
2585
11f81bec
TH
2586/*
2587 * This cancels just the dirty bit on the kernel page itself, it does NOT
2588 * actually remove dirty bits on any mmap's that may be around. It also
2589 * leaves the page tagged dirty, so any sync activity will still find it on
2590 * the dirty lists, and in particular, clear_page_dirty_for_io() will still
2591 * look at the dirty bits in the VM.
2592 *
2593 * Doing this should *normally* only ever be done when a page is truncated,
2594 * and is not actually mapped anywhere at all. However, fs/buffer.c does
2595 * this when it notices that somebody has cleaned out all the buffers on a
2596 * page without actually doing it through the VM. Can you say "ext3 is
2597 * horribly ugly"? Thought you could.
2598 */
2599void cancel_dirty_page(struct page *page)
2600{
c4843a75
GT
2601 struct address_space *mapping = page_mapping(page);
2602
2603 if (mapping_cap_account_dirty(mapping)) {
682aa8e1
TH
2604 struct inode *inode = mapping->host;
2605 struct bdi_writeback *wb;
c4843a75 2606 struct mem_cgroup *memcg;
682aa8e1 2607 bool locked;
c4843a75
GT
2608
2609 memcg = mem_cgroup_begin_page_stat(page);
682aa8e1 2610 wb = unlocked_inode_to_wb_begin(inode, &locked);
c4843a75
GT
2611
2612 if (TestClearPageDirty(page))
682aa8e1 2613 account_page_cleaned(page, mapping, memcg, wb);
c4843a75 2614
682aa8e1 2615 unlocked_inode_to_wb_end(inode, locked);
c4843a75
GT
2616 mem_cgroup_end_page_stat(memcg);
2617 } else {
2618 ClearPageDirty(page);
2619 }
11f81bec
TH
2620}
2621EXPORT_SYMBOL(cancel_dirty_page);
2622
1da177e4
LT
2623/*
2624 * Clear a page's dirty flag, while caring for dirty memory accounting.
2625 * Returns true if the page was previously dirty.
2626 *
2627 * This is for preparing to put the page under writeout. We leave the page
2628 * tagged as dirty in the radix tree so that a concurrent write-for-sync
2629 * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage
2630 * implementation will run either set_page_writeback() or set_page_dirty(),
2631 * at which stage we bring the page's dirty flag and radix-tree dirty tag
2632 * back into sync.
2633 *
2634 * This incoherency between the page's dirty flag and radix-tree tag is
2635 * unfortunate, but it only exists while the page is locked.
2636 */
2637int clear_page_dirty_for_io(struct page *page)
2638{
2639 struct address_space *mapping = page_mapping(page);
c4843a75 2640 int ret = 0;
1da177e4 2641
79352894
NP
2642 BUG_ON(!PageLocked(page));
2643
7658cc28 2644 if (mapping && mapping_cap_account_dirty(mapping)) {
682aa8e1
TH
2645 struct inode *inode = mapping->host;
2646 struct bdi_writeback *wb;
2647 struct mem_cgroup *memcg;
2648 bool locked;
2649
7658cc28
LT
2650 /*
2651 * Yes, Virginia, this is indeed insane.
2652 *
2653 * We use this sequence to make sure that
2654 * (a) we account for dirty stats properly
2655 * (b) we tell the low-level filesystem to
2656 * mark the whole page dirty if it was
2657 * dirty in a pagetable. Only to then
2658 * (c) clean the page again and return 1 to
2659 * cause the writeback.
2660 *
2661 * This way we avoid all nasty races with the
2662 * dirty bit in multiple places and clearing
2663 * them concurrently from different threads.
2664 *
2665 * Note! Normally the "set_page_dirty(page)"
2666 * has no effect on the actual dirty bit - since
2667 * that will already usually be set. But we
2668 * need the side effects, and it can help us
2669 * avoid races.
2670 *
2671 * We basically use the page "master dirty bit"
2672 * as a serialization point for all the different
2673 * threads doing their things.
7658cc28
LT
2674 */
2675 if (page_mkclean(page))
2676 set_page_dirty(page);
79352894
NP
2677 /*
2678 * We carefully synchronise fault handlers against
2679 * installing a dirty pte and marking the page dirty
2d6d7f98
JW
2680 * at this point. We do this by having them hold the
2681 * page lock while dirtying the page, and pages are
2682 * always locked coming in here, so we get the desired
2683 * exclusion.
79352894 2684 */
c4843a75 2685 memcg = mem_cgroup_begin_page_stat(page);
682aa8e1 2686 wb = unlocked_inode_to_wb_begin(inode, &locked);
7658cc28 2687 if (TestClearPageDirty(page)) {
c4843a75 2688 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY);
8c08540f 2689 dec_zone_page_state(page, NR_FILE_DIRTY);
682aa8e1 2690 dec_wb_stat(wb, WB_RECLAIMABLE);
c4843a75 2691 ret = 1;
1da177e4 2692 }
682aa8e1 2693 unlocked_inode_to_wb_end(inode, locked);
c4843a75
GT
2694 mem_cgroup_end_page_stat(memcg);
2695 return ret;
1da177e4 2696 }
7658cc28 2697 return TestClearPageDirty(page);
1da177e4 2698}
58bb01a9 2699EXPORT_SYMBOL(clear_page_dirty_for_io);
1da177e4
LT
2700
2701int test_clear_page_writeback(struct page *page)
2702{
2703 struct address_space *mapping = page_mapping(page);
d7365e78 2704 struct mem_cgroup *memcg;
d7365e78 2705 int ret;
1da177e4 2706
6de22619 2707 memcg = mem_cgroup_begin_page_stat(page);
1da177e4 2708 if (mapping) {
91018134
TH
2709 struct inode *inode = mapping->host;
2710 struct backing_dev_info *bdi = inode_to_bdi(inode);
1da177e4
LT
2711 unsigned long flags;
2712
19fd6231 2713 spin_lock_irqsave(&mapping->tree_lock, flags);
1da177e4 2714 ret = TestClearPageWriteback(page);
69cb51d1 2715 if (ret) {
1da177e4
LT
2716 radix_tree_tag_clear(&mapping->page_tree,
2717 page_index(page),
2718 PAGECACHE_TAG_WRITEBACK);
e4ad08fe 2719 if (bdi_cap_account_writeback(bdi)) {
91018134
TH
2720 struct bdi_writeback *wb = inode_to_wb(inode);
2721
2722 __dec_wb_stat(wb, WB_WRITEBACK);
2723 __wb_writeout_inc(wb);
04fbfdc1 2724 }
69cb51d1 2725 }
19fd6231 2726 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1da177e4
LT
2727 } else {
2728 ret = TestClearPageWriteback(page);
2729 }
99b12e3d 2730 if (ret) {
d7365e78 2731 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
d688abf5 2732 dec_zone_page_state(page, NR_WRITEBACK);
99b12e3d
WF
2733 inc_zone_page_state(page, NR_WRITTEN);
2734 }
6de22619 2735 mem_cgroup_end_page_stat(memcg);
1da177e4
LT
2736 return ret;
2737}
2738
1c8349a1 2739int __test_set_page_writeback(struct page *page, bool keep_write)
1da177e4
LT
2740{
2741 struct address_space *mapping = page_mapping(page);
d7365e78 2742 struct mem_cgroup *memcg;
d7365e78 2743 int ret;
1da177e4 2744
6de22619 2745 memcg = mem_cgroup_begin_page_stat(page);
1da177e4 2746 if (mapping) {
91018134
TH
2747 struct inode *inode = mapping->host;
2748 struct backing_dev_info *bdi = inode_to_bdi(inode);
1da177e4
LT
2749 unsigned long flags;
2750
19fd6231 2751 spin_lock_irqsave(&mapping->tree_lock, flags);
1da177e4 2752 ret = TestSetPageWriteback(page);
69cb51d1 2753 if (!ret) {
1da177e4
LT
2754 radix_tree_tag_set(&mapping->page_tree,
2755 page_index(page),
2756 PAGECACHE_TAG_WRITEBACK);
e4ad08fe 2757 if (bdi_cap_account_writeback(bdi))
91018134 2758 __inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK);
69cb51d1 2759 }
1da177e4
LT
2760 if (!PageDirty(page))
2761 radix_tree_tag_clear(&mapping->page_tree,
2762 page_index(page),
2763 PAGECACHE_TAG_DIRTY);
1c8349a1
NJ
2764 if (!keep_write)
2765 radix_tree_tag_clear(&mapping->page_tree,
2766 page_index(page),
2767 PAGECACHE_TAG_TOWRITE);
19fd6231 2768 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1da177e4
LT
2769 } else {
2770 ret = TestSetPageWriteback(page);
2771 }
3a3c02ec 2772 if (!ret) {
d7365e78 2773 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
3a3c02ec
JW
2774 inc_zone_page_state(page, NR_WRITEBACK);
2775 }
6de22619 2776 mem_cgroup_end_page_stat(memcg);
1da177e4
LT
2777 return ret;
2778
2779}
1c8349a1 2780EXPORT_SYMBOL(__test_set_page_writeback);
1da177e4
LT
2781
2782/*
00128188 2783 * Return true if any of the pages in the mapping are marked with the
1da177e4
LT
2784 * passed tag.
2785 */
2786int mapping_tagged(struct address_space *mapping, int tag)
2787{
72c47832 2788 return radix_tree_tagged(&mapping->page_tree, tag);
1da177e4
LT
2789}
2790EXPORT_SYMBOL(mapping_tagged);
1d1d1a76
DW
2791
2792/**
2793 * wait_for_stable_page() - wait for writeback to finish, if necessary.
2794 * @page: The page to wait on.
2795 *
2796 * This function determines if the given page is related to a backing device
2797 * that requires page contents to be held stable during writeback. If so, then
2798 * it will wait for any pending writeback to complete.
2799 */
2800void wait_for_stable_page(struct page *page)
2801{
de1414a6
CH
2802 if (bdi_cap_stable_pages_required(inode_to_bdi(page->mapping->host)))
2803 wait_on_page_writeback(page);
1d1d1a76
DW
2804}
2805EXPORT_SYMBOL_GPL(wait_for_stable_page);