Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
f30c2269 | 2 | * mm/page-writeback.c |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2002, Linus Torvalds. | |
04fbfdc1 | 5 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
1da177e4 LT |
6 | * |
7 | * Contains functions related to writing back dirty pages at the | |
8 | * address_space level. | |
9 | * | |
e1f8e874 | 10 | * 10Apr2002 Andrew Morton |
1da177e4 LT |
11 | * Initial version |
12 | */ | |
13 | ||
14 | #include <linux/kernel.h> | |
b95f1b31 | 15 | #include <linux/export.h> |
1da177e4 LT |
16 | #include <linux/spinlock.h> |
17 | #include <linux/fs.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/swap.h> | |
20 | #include <linux/slab.h> | |
21 | #include <linux/pagemap.h> | |
22 | #include <linux/writeback.h> | |
23 | #include <linux/init.h> | |
24 | #include <linux/backing-dev.h> | |
55e829af | 25 | #include <linux/task_io_accounting_ops.h> |
1da177e4 LT |
26 | #include <linux/blkdev.h> |
27 | #include <linux/mpage.h> | |
d08b3851 | 28 | #include <linux/rmap.h> |
1da177e4 LT |
29 | #include <linux/percpu.h> |
30 | #include <linux/notifier.h> | |
31 | #include <linux/smp.h> | |
32 | #include <linux/sysctl.h> | |
33 | #include <linux/cpu.h> | |
34 | #include <linux/syscalls.h> | |
ff01bb48 | 35 | #include <linux/buffer_head.h> /* __set_page_dirty_buffers */ |
811d736f | 36 | #include <linux/pagevec.h> |
eb608e3a | 37 | #include <linux/timer.h> |
8bd75c77 | 38 | #include <linux/sched/rt.h> |
6e543d57 | 39 | #include <linux/mm_inline.h> |
028c2dd1 | 40 | #include <trace/events/writeback.h> |
1da177e4 | 41 | |
6e543d57 LD |
42 | #include "internal.h" |
43 | ||
ffd1f609 WF |
44 | /* |
45 | * Sleep at most 200ms at a time in balance_dirty_pages(). | |
46 | */ | |
47 | #define MAX_PAUSE max(HZ/5, 1) | |
48 | ||
5b9b3574 WF |
49 | /* |
50 | * Try to keep balance_dirty_pages() call intervals higher than this many pages | |
51 | * by raising pause time to max_pause when falls below it. | |
52 | */ | |
53 | #define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10)) | |
54 | ||
e98be2d5 WF |
55 | /* |
56 | * Estimate write bandwidth at 200ms intervals. | |
57 | */ | |
58 | #define BANDWIDTH_INTERVAL max(HZ/5, 1) | |
59 | ||
6c14ae1e WF |
60 | #define RATELIMIT_CALC_SHIFT 10 |
61 | ||
1da177e4 LT |
62 | /* |
63 | * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited | |
64 | * will look to see if it needs to force writeback or throttling. | |
65 | */ | |
66 | static long ratelimit_pages = 32; | |
67 | ||
1da177e4 LT |
68 | /* The following parameters are exported via /proc/sys/vm */ |
69 | ||
70 | /* | |
5b0830cb | 71 | * Start background writeback (via writeback threads) at this percentage |
1da177e4 | 72 | */ |
1b5e62b4 | 73 | int dirty_background_ratio = 10; |
1da177e4 | 74 | |
2da02997 DR |
75 | /* |
76 | * dirty_background_bytes starts at 0 (disabled) so that it is a function of | |
77 | * dirty_background_ratio * the amount of dirtyable memory | |
78 | */ | |
79 | unsigned long dirty_background_bytes; | |
80 | ||
195cf453 BG |
81 | /* |
82 | * free highmem will not be subtracted from the total free memory | |
83 | * for calculating free ratios if vm_highmem_is_dirtyable is true | |
84 | */ | |
85 | int vm_highmem_is_dirtyable; | |
86 | ||
1da177e4 LT |
87 | /* |
88 | * The generator of dirty data starts writeback at this percentage | |
89 | */ | |
1b5e62b4 | 90 | int vm_dirty_ratio = 20; |
1da177e4 | 91 | |
2da02997 DR |
92 | /* |
93 | * vm_dirty_bytes starts at 0 (disabled) so that it is a function of | |
94 | * vm_dirty_ratio * the amount of dirtyable memory | |
95 | */ | |
96 | unsigned long vm_dirty_bytes; | |
97 | ||
1da177e4 | 98 | /* |
704503d8 | 99 | * The interval between `kupdate'-style writebacks |
1da177e4 | 100 | */ |
22ef37ee | 101 | unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */ |
1da177e4 | 102 | |
91913a29 AB |
103 | EXPORT_SYMBOL_GPL(dirty_writeback_interval); |
104 | ||
1da177e4 | 105 | /* |
704503d8 | 106 | * The longest time for which data is allowed to remain dirty |
1da177e4 | 107 | */ |
22ef37ee | 108 | unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */ |
1da177e4 LT |
109 | |
110 | /* | |
111 | * Flag that makes the machine dump writes/reads and block dirtyings. | |
112 | */ | |
113 | int block_dump; | |
114 | ||
115 | /* | |
ed5b43f1 BS |
116 | * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies: |
117 | * a full sync is triggered after this time elapses without any disk activity. | |
1da177e4 LT |
118 | */ |
119 | int laptop_mode; | |
120 | ||
121 | EXPORT_SYMBOL(laptop_mode); | |
122 | ||
123 | /* End of sysctl-exported parameters */ | |
124 | ||
c42843f2 | 125 | unsigned long global_dirty_limit; |
1da177e4 | 126 | |
04fbfdc1 PZ |
127 | /* |
128 | * Scale the writeback cache size proportional to the relative writeout speeds. | |
129 | * | |
130 | * We do this by keeping a floating proportion between BDIs, based on page | |
131 | * writeback completions [end_page_writeback()]. Those devices that write out | |
132 | * pages fastest will get the larger share, while the slower will get a smaller | |
133 | * share. | |
134 | * | |
135 | * We use page writeout completions because we are interested in getting rid of | |
136 | * dirty pages. Having them written out is the primary goal. | |
137 | * | |
138 | * We introduce a concept of time, a period over which we measure these events, | |
139 | * because demand can/will vary over time. The length of this period itself is | |
140 | * measured in page writeback completions. | |
141 | * | |
142 | */ | |
eb608e3a JK |
143 | static struct fprop_global writeout_completions; |
144 | ||
145 | static void writeout_period(unsigned long t); | |
146 | /* Timer for aging of writeout_completions */ | |
147 | static struct timer_list writeout_period_timer = | |
148 | TIMER_DEFERRED_INITIALIZER(writeout_period, 0, 0); | |
149 | static unsigned long writeout_period_time = 0; | |
150 | ||
151 | /* | |
152 | * Length of period for aging writeout fractions of bdis. This is an | |
153 | * arbitrarily chosen number. The longer the period, the slower fractions will | |
154 | * reflect changes in current writeout rate. | |
155 | */ | |
156 | #define VM_COMPLETIONS_PERIOD_LEN (3*HZ) | |
04fbfdc1 | 157 | |
693108a8 TH |
158 | #ifdef CONFIG_CGROUP_WRITEBACK |
159 | ||
160 | static void wb_min_max_ratio(struct bdi_writeback *wb, | |
161 | unsigned long *minp, unsigned long *maxp) | |
162 | { | |
163 | unsigned long this_bw = wb->avg_write_bandwidth; | |
164 | unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth); | |
165 | unsigned long long min = wb->bdi->min_ratio; | |
166 | unsigned long long max = wb->bdi->max_ratio; | |
167 | ||
168 | /* | |
169 | * @wb may already be clean by the time control reaches here and | |
170 | * the total may not include its bw. | |
171 | */ | |
172 | if (this_bw < tot_bw) { | |
173 | if (min) { | |
174 | min *= this_bw; | |
175 | do_div(min, tot_bw); | |
176 | } | |
177 | if (max < 100) { | |
178 | max *= this_bw; | |
179 | do_div(max, tot_bw); | |
180 | } | |
181 | } | |
182 | ||
183 | *minp = min; | |
184 | *maxp = max; | |
185 | } | |
186 | ||
187 | #else /* CONFIG_CGROUP_WRITEBACK */ | |
188 | ||
189 | static void wb_min_max_ratio(struct bdi_writeback *wb, | |
190 | unsigned long *minp, unsigned long *maxp) | |
191 | { | |
192 | *minp = wb->bdi->min_ratio; | |
193 | *maxp = wb->bdi->max_ratio; | |
194 | } | |
195 | ||
196 | #endif /* CONFIG_CGROUP_WRITEBACK */ | |
197 | ||
a756cf59 JW |
198 | /* |
199 | * In a memory zone, there is a certain amount of pages we consider | |
200 | * available for the page cache, which is essentially the number of | |
201 | * free and reclaimable pages, minus some zone reserves to protect | |
202 | * lowmem and the ability to uphold the zone's watermarks without | |
203 | * requiring writeback. | |
204 | * | |
205 | * This number of dirtyable pages is the base value of which the | |
206 | * user-configurable dirty ratio is the effictive number of pages that | |
207 | * are allowed to be actually dirtied. Per individual zone, or | |
208 | * globally by using the sum of dirtyable pages over all zones. | |
209 | * | |
210 | * Because the user is allowed to specify the dirty limit globally as | |
211 | * absolute number of bytes, calculating the per-zone dirty limit can | |
212 | * require translating the configured limit into a percentage of | |
213 | * global dirtyable memory first. | |
214 | */ | |
215 | ||
a804552b JW |
216 | /** |
217 | * zone_dirtyable_memory - number of dirtyable pages in a zone | |
218 | * @zone: the zone | |
219 | * | |
220 | * Returns the zone's number of pages potentially available for dirty | |
221 | * page cache. This is the base value for the per-zone dirty limits. | |
222 | */ | |
223 | static unsigned long zone_dirtyable_memory(struct zone *zone) | |
224 | { | |
225 | unsigned long nr_pages; | |
226 | ||
227 | nr_pages = zone_page_state(zone, NR_FREE_PAGES); | |
228 | nr_pages -= min(nr_pages, zone->dirty_balance_reserve); | |
229 | ||
a1c3bfb2 JW |
230 | nr_pages += zone_page_state(zone, NR_INACTIVE_FILE); |
231 | nr_pages += zone_page_state(zone, NR_ACTIVE_FILE); | |
a804552b JW |
232 | |
233 | return nr_pages; | |
234 | } | |
235 | ||
1edf2234 JW |
236 | static unsigned long highmem_dirtyable_memory(unsigned long total) |
237 | { | |
238 | #ifdef CONFIG_HIGHMEM | |
239 | int node; | |
240 | unsigned long x = 0; | |
241 | ||
242 | for_each_node_state(node, N_HIGH_MEMORY) { | |
a804552b | 243 | struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; |
1edf2234 | 244 | |
a804552b | 245 | x += zone_dirtyable_memory(z); |
1edf2234 | 246 | } |
c8b74c2f SR |
247 | /* |
248 | * Unreclaimable memory (kernel memory or anonymous memory | |
249 | * without swap) can bring down the dirtyable pages below | |
250 | * the zone's dirty balance reserve and the above calculation | |
251 | * will underflow. However we still want to add in nodes | |
252 | * which are below threshold (negative values) to get a more | |
253 | * accurate calculation but make sure that the total never | |
254 | * underflows. | |
255 | */ | |
256 | if ((long)x < 0) | |
257 | x = 0; | |
258 | ||
1edf2234 JW |
259 | /* |
260 | * Make sure that the number of highmem pages is never larger | |
261 | * than the number of the total dirtyable memory. This can only | |
262 | * occur in very strange VM situations but we want to make sure | |
263 | * that this does not occur. | |
264 | */ | |
265 | return min(x, total); | |
266 | #else | |
267 | return 0; | |
268 | #endif | |
269 | } | |
270 | ||
271 | /** | |
ccafa287 | 272 | * global_dirtyable_memory - number of globally dirtyable pages |
1edf2234 | 273 | * |
ccafa287 JW |
274 | * Returns the global number of pages potentially available for dirty |
275 | * page cache. This is the base value for the global dirty limits. | |
1edf2234 | 276 | */ |
18cf8cf8 | 277 | static unsigned long global_dirtyable_memory(void) |
1edf2234 JW |
278 | { |
279 | unsigned long x; | |
280 | ||
a804552b | 281 | x = global_page_state(NR_FREE_PAGES); |
c8b74c2f | 282 | x -= min(x, dirty_balance_reserve); |
1edf2234 | 283 | |
a1c3bfb2 JW |
284 | x += global_page_state(NR_INACTIVE_FILE); |
285 | x += global_page_state(NR_ACTIVE_FILE); | |
a804552b | 286 | |
1edf2234 JW |
287 | if (!vm_highmem_is_dirtyable) |
288 | x -= highmem_dirtyable_memory(x); | |
289 | ||
290 | return x + 1; /* Ensure that we never return 0 */ | |
291 | } | |
292 | ||
ccafa287 JW |
293 | /* |
294 | * global_dirty_limits - background-writeback and dirty-throttling thresholds | |
295 | * | |
296 | * Calculate the dirty thresholds based on sysctl parameters | |
297 | * - vm.dirty_background_ratio or vm.dirty_background_bytes | |
298 | * - vm.dirty_ratio or vm.dirty_bytes | |
299 | * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and | |
300 | * real-time tasks. | |
301 | */ | |
302 | void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) | |
303 | { | |
9ef0a0ff | 304 | const unsigned long available_memory = global_dirtyable_memory(); |
ccafa287 JW |
305 | unsigned long background; |
306 | unsigned long dirty; | |
ccafa287 JW |
307 | struct task_struct *tsk; |
308 | ||
ccafa287 JW |
309 | if (vm_dirty_bytes) |
310 | dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); | |
311 | else | |
312 | dirty = (vm_dirty_ratio * available_memory) / 100; | |
313 | ||
314 | if (dirty_background_bytes) | |
315 | background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE); | |
316 | else | |
317 | background = (dirty_background_ratio * available_memory) / 100; | |
318 | ||
319 | if (background >= dirty) | |
320 | background = dirty / 2; | |
321 | tsk = current; | |
322 | if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { | |
323 | background += background / 4; | |
324 | dirty += dirty / 4; | |
325 | } | |
326 | *pbackground = background; | |
327 | *pdirty = dirty; | |
328 | trace_global_dirty_state(background, dirty); | |
329 | } | |
330 | ||
a756cf59 JW |
331 | /** |
332 | * zone_dirty_limit - maximum number of dirty pages allowed in a zone | |
333 | * @zone: the zone | |
334 | * | |
335 | * Returns the maximum number of dirty pages allowed in a zone, based | |
336 | * on the zone's dirtyable memory. | |
337 | */ | |
338 | static unsigned long zone_dirty_limit(struct zone *zone) | |
339 | { | |
340 | unsigned long zone_memory = zone_dirtyable_memory(zone); | |
341 | struct task_struct *tsk = current; | |
342 | unsigned long dirty; | |
343 | ||
344 | if (vm_dirty_bytes) | |
345 | dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) * | |
346 | zone_memory / global_dirtyable_memory(); | |
347 | else | |
348 | dirty = vm_dirty_ratio * zone_memory / 100; | |
349 | ||
350 | if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) | |
351 | dirty += dirty / 4; | |
352 | ||
353 | return dirty; | |
354 | } | |
355 | ||
356 | /** | |
357 | * zone_dirty_ok - tells whether a zone is within its dirty limits | |
358 | * @zone: the zone to check | |
359 | * | |
360 | * Returns %true when the dirty pages in @zone are within the zone's | |
361 | * dirty limit, %false if the limit is exceeded. | |
362 | */ | |
363 | bool zone_dirty_ok(struct zone *zone) | |
364 | { | |
365 | unsigned long limit = zone_dirty_limit(zone); | |
366 | ||
367 | return zone_page_state(zone, NR_FILE_DIRTY) + | |
368 | zone_page_state(zone, NR_UNSTABLE_NFS) + | |
369 | zone_page_state(zone, NR_WRITEBACK) <= limit; | |
370 | } | |
371 | ||
2da02997 | 372 | int dirty_background_ratio_handler(struct ctl_table *table, int write, |
8d65af78 | 373 | void __user *buffer, size_t *lenp, |
2da02997 DR |
374 | loff_t *ppos) |
375 | { | |
376 | int ret; | |
377 | ||
8d65af78 | 378 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
2da02997 DR |
379 | if (ret == 0 && write) |
380 | dirty_background_bytes = 0; | |
381 | return ret; | |
382 | } | |
383 | ||
384 | int dirty_background_bytes_handler(struct ctl_table *table, int write, | |
8d65af78 | 385 | void __user *buffer, size_t *lenp, |
2da02997 DR |
386 | loff_t *ppos) |
387 | { | |
388 | int ret; | |
389 | ||
8d65af78 | 390 | ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); |
2da02997 DR |
391 | if (ret == 0 && write) |
392 | dirty_background_ratio = 0; | |
393 | return ret; | |
394 | } | |
395 | ||
04fbfdc1 | 396 | int dirty_ratio_handler(struct ctl_table *table, int write, |
8d65af78 | 397 | void __user *buffer, size_t *lenp, |
04fbfdc1 PZ |
398 | loff_t *ppos) |
399 | { | |
400 | int old_ratio = vm_dirty_ratio; | |
2da02997 DR |
401 | int ret; |
402 | ||
8d65af78 | 403 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
04fbfdc1 | 404 | if (ret == 0 && write && vm_dirty_ratio != old_ratio) { |
eb608e3a | 405 | writeback_set_ratelimit(); |
2da02997 DR |
406 | vm_dirty_bytes = 0; |
407 | } | |
408 | return ret; | |
409 | } | |
410 | ||
2da02997 | 411 | int dirty_bytes_handler(struct ctl_table *table, int write, |
8d65af78 | 412 | void __user *buffer, size_t *lenp, |
2da02997 DR |
413 | loff_t *ppos) |
414 | { | |
fc3501d4 | 415 | unsigned long old_bytes = vm_dirty_bytes; |
2da02997 DR |
416 | int ret; |
417 | ||
8d65af78 | 418 | ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); |
2da02997 | 419 | if (ret == 0 && write && vm_dirty_bytes != old_bytes) { |
eb608e3a | 420 | writeback_set_ratelimit(); |
2da02997 | 421 | vm_dirty_ratio = 0; |
04fbfdc1 PZ |
422 | } |
423 | return ret; | |
424 | } | |
425 | ||
eb608e3a JK |
426 | static unsigned long wp_next_time(unsigned long cur_time) |
427 | { | |
428 | cur_time += VM_COMPLETIONS_PERIOD_LEN; | |
429 | /* 0 has a special meaning... */ | |
430 | if (!cur_time) | |
431 | return 1; | |
432 | return cur_time; | |
433 | } | |
434 | ||
04fbfdc1 PZ |
435 | /* |
436 | * Increment the BDI's writeout completion count and the global writeout | |
437 | * completion count. Called from test_clear_page_writeback(). | |
438 | */ | |
93f78d88 | 439 | static inline void __wb_writeout_inc(struct bdi_writeback *wb) |
04fbfdc1 | 440 | { |
93f78d88 | 441 | __inc_wb_stat(wb, WB_WRITTEN); |
a88a341a | 442 | __fprop_inc_percpu_max(&writeout_completions, &wb->completions, |
93f78d88 | 443 | wb->bdi->max_prop_frac); |
eb608e3a JK |
444 | /* First event after period switching was turned off? */ |
445 | if (!unlikely(writeout_period_time)) { | |
446 | /* | |
447 | * We can race with other __bdi_writeout_inc calls here but | |
448 | * it does not cause any harm since the resulting time when | |
449 | * timer will fire and what is in writeout_period_time will be | |
450 | * roughly the same. | |
451 | */ | |
452 | writeout_period_time = wp_next_time(jiffies); | |
453 | mod_timer(&writeout_period_timer, writeout_period_time); | |
454 | } | |
04fbfdc1 PZ |
455 | } |
456 | ||
93f78d88 | 457 | void wb_writeout_inc(struct bdi_writeback *wb) |
dd5656e5 MS |
458 | { |
459 | unsigned long flags; | |
460 | ||
461 | local_irq_save(flags); | |
93f78d88 | 462 | __wb_writeout_inc(wb); |
dd5656e5 MS |
463 | local_irq_restore(flags); |
464 | } | |
93f78d88 | 465 | EXPORT_SYMBOL_GPL(wb_writeout_inc); |
dd5656e5 | 466 | |
04fbfdc1 PZ |
467 | /* |
468 | * Obtain an accurate fraction of the BDI's portion. | |
469 | */ | |
a88a341a TH |
470 | static void wb_writeout_fraction(struct bdi_writeback *wb, |
471 | long *numerator, long *denominator) | |
04fbfdc1 | 472 | { |
a88a341a | 473 | fprop_fraction_percpu(&writeout_completions, &wb->completions, |
04fbfdc1 | 474 | numerator, denominator); |
04fbfdc1 PZ |
475 | } |
476 | ||
eb608e3a JK |
477 | /* |
478 | * On idle system, we can be called long after we scheduled because we use | |
479 | * deferred timers so count with missed periods. | |
480 | */ | |
481 | static void writeout_period(unsigned long t) | |
482 | { | |
483 | int miss_periods = (jiffies - writeout_period_time) / | |
484 | VM_COMPLETIONS_PERIOD_LEN; | |
485 | ||
486 | if (fprop_new_period(&writeout_completions, miss_periods + 1)) { | |
487 | writeout_period_time = wp_next_time(writeout_period_time + | |
488 | miss_periods * VM_COMPLETIONS_PERIOD_LEN); | |
489 | mod_timer(&writeout_period_timer, writeout_period_time); | |
490 | } else { | |
491 | /* | |
492 | * Aging has zeroed all fractions. Stop wasting CPU on period | |
493 | * updates. | |
494 | */ | |
495 | writeout_period_time = 0; | |
496 | } | |
497 | } | |
498 | ||
189d3c4a | 499 | /* |
d08c429b JW |
500 | * bdi_min_ratio keeps the sum of the minimum dirty shares of all |
501 | * registered backing devices, which, for obvious reasons, can not | |
502 | * exceed 100%. | |
189d3c4a | 503 | */ |
189d3c4a PZ |
504 | static unsigned int bdi_min_ratio; |
505 | ||
506 | int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) | |
507 | { | |
508 | int ret = 0; | |
189d3c4a | 509 | |
cfc4ba53 | 510 | spin_lock_bh(&bdi_lock); |
a42dde04 | 511 | if (min_ratio > bdi->max_ratio) { |
189d3c4a | 512 | ret = -EINVAL; |
a42dde04 PZ |
513 | } else { |
514 | min_ratio -= bdi->min_ratio; | |
515 | if (bdi_min_ratio + min_ratio < 100) { | |
516 | bdi_min_ratio += min_ratio; | |
517 | bdi->min_ratio += min_ratio; | |
518 | } else { | |
519 | ret = -EINVAL; | |
520 | } | |
521 | } | |
cfc4ba53 | 522 | spin_unlock_bh(&bdi_lock); |
a42dde04 PZ |
523 | |
524 | return ret; | |
525 | } | |
526 | ||
527 | int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio) | |
528 | { | |
a42dde04 PZ |
529 | int ret = 0; |
530 | ||
531 | if (max_ratio > 100) | |
532 | return -EINVAL; | |
533 | ||
cfc4ba53 | 534 | spin_lock_bh(&bdi_lock); |
a42dde04 PZ |
535 | if (bdi->min_ratio > max_ratio) { |
536 | ret = -EINVAL; | |
537 | } else { | |
538 | bdi->max_ratio = max_ratio; | |
eb608e3a | 539 | bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100; |
a42dde04 | 540 | } |
cfc4ba53 | 541 | spin_unlock_bh(&bdi_lock); |
189d3c4a PZ |
542 | |
543 | return ret; | |
544 | } | |
a42dde04 | 545 | EXPORT_SYMBOL(bdi_set_max_ratio); |
189d3c4a | 546 | |
6c14ae1e WF |
547 | static unsigned long dirty_freerun_ceiling(unsigned long thresh, |
548 | unsigned long bg_thresh) | |
549 | { | |
550 | return (thresh + bg_thresh) / 2; | |
551 | } | |
552 | ||
ffd1f609 WF |
553 | static unsigned long hard_dirty_limit(unsigned long thresh) |
554 | { | |
555 | return max(thresh, global_dirty_limit); | |
556 | } | |
557 | ||
6f718656 | 558 | /** |
a88a341a TH |
559 | * wb_dirty_limit - @wb's share of dirty throttling threshold |
560 | * @wb: bdi_writeback to query | |
6f718656 | 561 | * @dirty: global dirty limit in pages |
1babe183 | 562 | * |
a88a341a | 563 | * Returns @wb's dirty limit in pages. The term "dirty" in the context of |
6f718656 | 564 | * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages. |
aed21ad2 WF |
565 | * |
566 | * Note that balance_dirty_pages() will only seriously take it as a hard limit | |
567 | * when sleeping max_pause per page is not enough to keep the dirty pages under | |
568 | * control. For example, when the device is completely stalled due to some error | |
569 | * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key. | |
570 | * In the other normal situations, it acts more gently by throttling the tasks | |
a88a341a | 571 | * more (rather than completely block them) when the wb dirty pages go high. |
1babe183 | 572 | * |
6f718656 | 573 | * It allocates high/low dirty limits to fast/slow devices, in order to prevent |
1babe183 WF |
574 | * - starving fast devices |
575 | * - piling up dirty pages (that will take long time to sync) on slow devices | |
576 | * | |
a88a341a | 577 | * The wb's share of dirty limit will be adapting to its throughput and |
1babe183 WF |
578 | * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set. |
579 | */ | |
a88a341a | 580 | unsigned long wb_dirty_limit(struct bdi_writeback *wb, unsigned long dirty) |
16c4042f | 581 | { |
a88a341a | 582 | u64 wb_dirty; |
16c4042f | 583 | long numerator, denominator; |
693108a8 | 584 | unsigned long wb_min_ratio, wb_max_ratio; |
04fbfdc1 | 585 | |
16c4042f WF |
586 | /* |
587 | * Calculate this BDI's share of the dirty ratio. | |
588 | */ | |
a88a341a | 589 | wb_writeout_fraction(wb, &numerator, &denominator); |
04fbfdc1 | 590 | |
a88a341a TH |
591 | wb_dirty = (dirty * (100 - bdi_min_ratio)) / 100; |
592 | wb_dirty *= numerator; | |
593 | do_div(wb_dirty, denominator); | |
04fbfdc1 | 594 | |
693108a8 TH |
595 | wb_min_max_ratio(wb, &wb_min_ratio, &wb_max_ratio); |
596 | ||
597 | wb_dirty += (dirty * wb_min_ratio) / 100; | |
598 | if (wb_dirty > (dirty * wb_max_ratio) / 100) | |
599 | wb_dirty = dirty * wb_max_ratio / 100; | |
16c4042f | 600 | |
a88a341a | 601 | return wb_dirty; |
1da177e4 LT |
602 | } |
603 | ||
5a537485 MP |
604 | /* |
605 | * setpoint - dirty 3 | |
606 | * f(dirty) := 1.0 + (----------------) | |
607 | * limit - setpoint | |
608 | * | |
609 | * it's a 3rd order polynomial that subjects to | |
610 | * | |
611 | * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast | |
612 | * (2) f(setpoint) = 1.0 => the balance point | |
613 | * (3) f(limit) = 0 => the hard limit | |
614 | * (4) df/dx <= 0 => negative feedback control | |
615 | * (5) the closer to setpoint, the smaller |df/dx| (and the reverse) | |
616 | * => fast response on large errors; small oscillation near setpoint | |
617 | */ | |
d5c9fde3 | 618 | static long long pos_ratio_polynom(unsigned long setpoint, |
5a537485 MP |
619 | unsigned long dirty, |
620 | unsigned long limit) | |
621 | { | |
622 | long long pos_ratio; | |
623 | long x; | |
624 | ||
d5c9fde3 | 625 | x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, |
5a537485 MP |
626 | limit - setpoint + 1); |
627 | pos_ratio = x; | |
628 | pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; | |
629 | pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; | |
630 | pos_ratio += 1 << RATELIMIT_CALC_SHIFT; | |
631 | ||
632 | return clamp(pos_ratio, 0LL, 2LL << RATELIMIT_CALC_SHIFT); | |
633 | } | |
634 | ||
6c14ae1e WF |
635 | /* |
636 | * Dirty position control. | |
637 | * | |
638 | * (o) global/bdi setpoints | |
639 | * | |
de1fff37 | 640 | * We want the dirty pages be balanced around the global/wb setpoints. |
6c14ae1e WF |
641 | * When the number of dirty pages is higher/lower than the setpoint, the |
642 | * dirty position control ratio (and hence task dirty ratelimit) will be | |
643 | * decreased/increased to bring the dirty pages back to the setpoint. | |
644 | * | |
645 | * pos_ratio = 1 << RATELIMIT_CALC_SHIFT | |
646 | * | |
647 | * if (dirty < setpoint) scale up pos_ratio | |
648 | * if (dirty > setpoint) scale down pos_ratio | |
649 | * | |
de1fff37 TH |
650 | * if (wb_dirty < wb_setpoint) scale up pos_ratio |
651 | * if (wb_dirty > wb_setpoint) scale down pos_ratio | |
6c14ae1e WF |
652 | * |
653 | * task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT | |
654 | * | |
655 | * (o) global control line | |
656 | * | |
657 | * ^ pos_ratio | |
658 | * | | |
659 | * | |<===== global dirty control scope ======>| | |
660 | * 2.0 .............* | |
661 | * | .* | |
662 | * | . * | |
663 | * | . * | |
664 | * | . * | |
665 | * | . * | |
666 | * | . * | |
667 | * 1.0 ................................* | |
668 | * | . . * | |
669 | * | . . * | |
670 | * | . . * | |
671 | * | . . * | |
672 | * | . . * | |
673 | * 0 +------------.------------------.----------------------*-------------> | |
674 | * freerun^ setpoint^ limit^ dirty pages | |
675 | * | |
de1fff37 | 676 | * (o) wb control line |
6c14ae1e WF |
677 | * |
678 | * ^ pos_ratio | |
679 | * | | |
680 | * | * | |
681 | * | * | |
682 | * | * | |
683 | * | * | |
684 | * | * |<=========== span ============>| | |
685 | * 1.0 .......................* | |
686 | * | . * | |
687 | * | . * | |
688 | * | . * | |
689 | * | . * | |
690 | * | . * | |
691 | * | . * | |
692 | * | . * | |
693 | * | . * | |
694 | * | . * | |
695 | * | . * | |
696 | * | . * | |
697 | * 1/4 ...............................................* * * * * * * * * * * * | |
698 | * | . . | |
699 | * | . . | |
700 | * | . . | |
701 | * 0 +----------------------.-------------------------------.-------------> | |
de1fff37 | 702 | * wb_setpoint^ x_intercept^ |
6c14ae1e | 703 | * |
de1fff37 | 704 | * The wb control line won't drop below pos_ratio=1/4, so that wb_dirty can |
6c14ae1e WF |
705 | * be smoothly throttled down to normal if it starts high in situations like |
706 | * - start writing to a slow SD card and a fast disk at the same time. The SD | |
de1fff37 TH |
707 | * card's wb_dirty may rush to many times higher than wb_setpoint. |
708 | * - the wb dirty thresh drops quickly due to change of JBOD workload | |
6c14ae1e | 709 | */ |
a88a341a TH |
710 | static unsigned long wb_position_ratio(struct bdi_writeback *wb, |
711 | unsigned long thresh, | |
712 | unsigned long bg_thresh, | |
713 | unsigned long dirty, | |
de1fff37 TH |
714 | unsigned long wb_thresh, |
715 | unsigned long wb_dirty) | |
6c14ae1e | 716 | { |
a88a341a | 717 | unsigned long write_bw = wb->avg_write_bandwidth; |
6c14ae1e WF |
718 | unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh); |
719 | unsigned long limit = hard_dirty_limit(thresh); | |
720 | unsigned long x_intercept; | |
721 | unsigned long setpoint; /* dirty pages' target balance point */ | |
de1fff37 | 722 | unsigned long wb_setpoint; |
6c14ae1e WF |
723 | unsigned long span; |
724 | long long pos_ratio; /* for scaling up/down the rate limit */ | |
725 | long x; | |
726 | ||
727 | if (unlikely(dirty >= limit)) | |
728 | return 0; | |
729 | ||
730 | /* | |
731 | * global setpoint | |
732 | * | |
5a537485 MP |
733 | * See comment for pos_ratio_polynom(). |
734 | */ | |
735 | setpoint = (freerun + limit) / 2; | |
736 | pos_ratio = pos_ratio_polynom(setpoint, dirty, limit); | |
737 | ||
738 | /* | |
739 | * The strictlimit feature is a tool preventing mistrusted filesystems | |
740 | * from growing a large number of dirty pages before throttling. For | |
de1fff37 TH |
741 | * such filesystems balance_dirty_pages always checks wb counters |
742 | * against wb limits. Even if global "nr_dirty" is under "freerun". | |
5a537485 MP |
743 | * This is especially important for fuse which sets bdi->max_ratio to |
744 | * 1% by default. Without strictlimit feature, fuse writeback may | |
745 | * consume arbitrary amount of RAM because it is accounted in | |
746 | * NR_WRITEBACK_TEMP which is not involved in calculating "nr_dirty". | |
6c14ae1e | 747 | * |
a88a341a | 748 | * Here, in wb_position_ratio(), we calculate pos_ratio based on |
de1fff37 | 749 | * two values: wb_dirty and wb_thresh. Let's consider an example: |
5a537485 MP |
750 | * total amount of RAM is 16GB, bdi->max_ratio is equal to 1%, global |
751 | * limits are set by default to 10% and 20% (background and throttle). | |
de1fff37 TH |
752 | * Then wb_thresh is 1% of 20% of 16GB. This amounts to ~8K pages. |
753 | * wb_dirty_limit(wb, bg_thresh) is about ~4K pages. wb_setpoint is | |
754 | * about ~6K pages (as the average of background and throttle wb | |
5a537485 | 755 | * limits). The 3rd order polynomial will provide positive feedback if |
de1fff37 | 756 | * wb_dirty is under wb_setpoint and vice versa. |
6c14ae1e | 757 | * |
5a537485 | 758 | * Note, that we cannot use global counters in these calculations |
de1fff37 | 759 | * because we want to throttle process writing to a strictlimit wb |
5a537485 MP |
760 | * much earlier than global "freerun" is reached (~23MB vs. ~2.3GB |
761 | * in the example above). | |
6c14ae1e | 762 | */ |
a88a341a | 763 | if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { |
de1fff37 TH |
764 | long long wb_pos_ratio; |
765 | unsigned long wb_bg_thresh; | |
5a537485 | 766 | |
de1fff37 | 767 | if (wb_dirty < 8) |
5a537485 MP |
768 | return min_t(long long, pos_ratio * 2, |
769 | 2 << RATELIMIT_CALC_SHIFT); | |
770 | ||
de1fff37 | 771 | if (wb_dirty >= wb_thresh) |
5a537485 MP |
772 | return 0; |
773 | ||
de1fff37 TH |
774 | wb_bg_thresh = div_u64((u64)wb_thresh * bg_thresh, thresh); |
775 | wb_setpoint = dirty_freerun_ceiling(wb_thresh, wb_bg_thresh); | |
5a537485 | 776 | |
de1fff37 | 777 | if (wb_setpoint == 0 || wb_setpoint == wb_thresh) |
5a537485 MP |
778 | return 0; |
779 | ||
de1fff37 TH |
780 | wb_pos_ratio = pos_ratio_polynom(wb_setpoint, wb_dirty, |
781 | wb_thresh); | |
5a537485 MP |
782 | |
783 | /* | |
de1fff37 TH |
784 | * Typically, for strictlimit case, wb_setpoint << setpoint |
785 | * and pos_ratio >> wb_pos_ratio. In the other words global | |
5a537485 | 786 | * state ("dirty") is not limiting factor and we have to |
de1fff37 | 787 | * make decision based on wb counters. But there is an |
5a537485 MP |
788 | * important case when global pos_ratio should get precedence: |
789 | * global limits are exceeded (e.g. due to activities on other | |
de1fff37 | 790 | * wb's) while given strictlimit wb is below limit. |
5a537485 | 791 | * |
de1fff37 | 792 | * "pos_ratio * wb_pos_ratio" would work for the case above, |
5a537485 | 793 | * but it would look too non-natural for the case of all |
de1fff37 | 794 | * activity in the system coming from a single strictlimit wb |
5a537485 MP |
795 | * with bdi->max_ratio == 100%. |
796 | * | |
797 | * Note that min() below somewhat changes the dynamics of the | |
798 | * control system. Normally, pos_ratio value can be well over 3 | |
de1fff37 | 799 | * (when globally we are at freerun and wb is well below wb |
5a537485 MP |
800 | * setpoint). Now the maximum pos_ratio in the same situation |
801 | * is 2. We might want to tweak this if we observe the control | |
802 | * system is too slow to adapt. | |
803 | */ | |
de1fff37 | 804 | return min(pos_ratio, wb_pos_ratio); |
5a537485 | 805 | } |
6c14ae1e WF |
806 | |
807 | /* | |
808 | * We have computed basic pos_ratio above based on global situation. If | |
de1fff37 | 809 | * the wb is over/under its share of dirty pages, we want to scale |
6c14ae1e WF |
810 | * pos_ratio further down/up. That is done by the following mechanism. |
811 | */ | |
812 | ||
813 | /* | |
de1fff37 | 814 | * wb setpoint |
6c14ae1e | 815 | * |
de1fff37 | 816 | * f(wb_dirty) := 1.0 + k * (wb_dirty - wb_setpoint) |
6c14ae1e | 817 | * |
de1fff37 | 818 | * x_intercept - wb_dirty |
6c14ae1e | 819 | * := -------------------------- |
de1fff37 | 820 | * x_intercept - wb_setpoint |
6c14ae1e | 821 | * |
de1fff37 | 822 | * The main wb control line is a linear function that subjects to |
6c14ae1e | 823 | * |
de1fff37 TH |
824 | * (1) f(wb_setpoint) = 1.0 |
825 | * (2) k = - 1 / (8 * write_bw) (in single wb case) | |
826 | * or equally: x_intercept = wb_setpoint + 8 * write_bw | |
6c14ae1e | 827 | * |
de1fff37 | 828 | * For single wb case, the dirty pages are observed to fluctuate |
6c14ae1e | 829 | * regularly within range |
de1fff37 | 830 | * [wb_setpoint - write_bw/2, wb_setpoint + write_bw/2] |
6c14ae1e WF |
831 | * for various filesystems, where (2) can yield in a reasonable 12.5% |
832 | * fluctuation range for pos_ratio. | |
833 | * | |
de1fff37 | 834 | * For JBOD case, wb_thresh (not wb_dirty!) could fluctuate up to its |
6c14ae1e | 835 | * own size, so move the slope over accordingly and choose a slope that |
de1fff37 | 836 | * yields 100% pos_ratio fluctuation on suddenly doubled wb_thresh. |
6c14ae1e | 837 | */ |
de1fff37 TH |
838 | if (unlikely(wb_thresh > thresh)) |
839 | wb_thresh = thresh; | |
aed21ad2 | 840 | /* |
de1fff37 | 841 | * It's very possible that wb_thresh is close to 0 not because the |
aed21ad2 WF |
842 | * device is slow, but that it has remained inactive for long time. |
843 | * Honour such devices a reasonable good (hopefully IO efficient) | |
844 | * threshold, so that the occasional writes won't be blocked and active | |
845 | * writes can rampup the threshold quickly. | |
846 | */ | |
de1fff37 | 847 | wb_thresh = max(wb_thresh, (limit - dirty) / 8); |
6c14ae1e | 848 | /* |
de1fff37 TH |
849 | * scale global setpoint to wb's: |
850 | * wb_setpoint = setpoint * wb_thresh / thresh | |
6c14ae1e | 851 | */ |
de1fff37 TH |
852 | x = div_u64((u64)wb_thresh << 16, thresh + 1); |
853 | wb_setpoint = setpoint * (u64)x >> 16; | |
6c14ae1e | 854 | /* |
de1fff37 TH |
855 | * Use span=(8*write_bw) in single wb case as indicated by |
856 | * (thresh - wb_thresh ~= 0) and transit to wb_thresh in JBOD case. | |
6c14ae1e | 857 | * |
de1fff37 TH |
858 | * wb_thresh thresh - wb_thresh |
859 | * span = --------- * (8 * write_bw) + ------------------ * wb_thresh | |
860 | * thresh thresh | |
6c14ae1e | 861 | */ |
de1fff37 TH |
862 | span = (thresh - wb_thresh + 8 * write_bw) * (u64)x >> 16; |
863 | x_intercept = wb_setpoint + span; | |
6c14ae1e | 864 | |
de1fff37 TH |
865 | if (wb_dirty < x_intercept - span / 4) { |
866 | pos_ratio = div64_u64(pos_ratio * (x_intercept - wb_dirty), | |
867 | x_intercept - wb_setpoint + 1); | |
6c14ae1e WF |
868 | } else |
869 | pos_ratio /= 4; | |
870 | ||
8927f66c | 871 | /* |
de1fff37 | 872 | * wb reserve area, safeguard against dirty pool underrun and disk idle |
8927f66c WF |
873 | * It may push the desired control point of global dirty pages higher |
874 | * than setpoint. | |
875 | */ | |
de1fff37 TH |
876 | x_intercept = wb_thresh / 2; |
877 | if (wb_dirty < x_intercept) { | |
878 | if (wb_dirty > x_intercept / 8) | |
879 | pos_ratio = div_u64(pos_ratio * x_intercept, wb_dirty); | |
50657fc4 | 880 | else |
8927f66c WF |
881 | pos_ratio *= 8; |
882 | } | |
883 | ||
6c14ae1e WF |
884 | return pos_ratio; |
885 | } | |
886 | ||
a88a341a TH |
887 | static void wb_update_write_bandwidth(struct bdi_writeback *wb, |
888 | unsigned long elapsed, | |
889 | unsigned long written) | |
e98be2d5 WF |
890 | { |
891 | const unsigned long period = roundup_pow_of_two(3 * HZ); | |
a88a341a TH |
892 | unsigned long avg = wb->avg_write_bandwidth; |
893 | unsigned long old = wb->write_bandwidth; | |
e98be2d5 WF |
894 | u64 bw; |
895 | ||
896 | /* | |
897 | * bw = written * HZ / elapsed | |
898 | * | |
899 | * bw * elapsed + write_bandwidth * (period - elapsed) | |
900 | * write_bandwidth = --------------------------------------------------- | |
901 | * period | |
c72efb65 TH |
902 | * |
903 | * @written may have decreased due to account_page_redirty(). | |
904 | * Avoid underflowing @bw calculation. | |
e98be2d5 | 905 | */ |
a88a341a | 906 | bw = written - min(written, wb->written_stamp); |
e98be2d5 WF |
907 | bw *= HZ; |
908 | if (unlikely(elapsed > period)) { | |
909 | do_div(bw, elapsed); | |
910 | avg = bw; | |
911 | goto out; | |
912 | } | |
a88a341a | 913 | bw += (u64)wb->write_bandwidth * (period - elapsed); |
e98be2d5 WF |
914 | bw >>= ilog2(period); |
915 | ||
916 | /* | |
917 | * one more level of smoothing, for filtering out sudden spikes | |
918 | */ | |
919 | if (avg > old && old >= (unsigned long)bw) | |
920 | avg -= (avg - old) >> 3; | |
921 | ||
922 | if (avg < old && old <= (unsigned long)bw) | |
923 | avg += (old - avg) >> 3; | |
924 | ||
925 | out: | |
95a46c65 TH |
926 | /* keep avg > 0 to guarantee that tot > 0 if there are dirty wbs */ |
927 | avg = max(avg, 1LU); | |
928 | if (wb_has_dirty_io(wb)) { | |
929 | long delta = avg - wb->avg_write_bandwidth; | |
930 | WARN_ON_ONCE(atomic_long_add_return(delta, | |
931 | &wb->bdi->tot_write_bandwidth) <= 0); | |
932 | } | |
a88a341a TH |
933 | wb->write_bandwidth = bw; |
934 | wb->avg_write_bandwidth = avg; | |
e98be2d5 WF |
935 | } |
936 | ||
c42843f2 WF |
937 | /* |
938 | * The global dirtyable memory and dirty threshold could be suddenly knocked | |
939 | * down by a large amount (eg. on the startup of KVM in a swapless system). | |
940 | * This may throw the system into deep dirty exceeded state and throttle | |
941 | * heavy/light dirtiers alike. To retain good responsiveness, maintain | |
942 | * global_dirty_limit for tracking slowly down to the knocked down dirty | |
943 | * threshold. | |
944 | */ | |
945 | static void update_dirty_limit(unsigned long thresh, unsigned long dirty) | |
946 | { | |
947 | unsigned long limit = global_dirty_limit; | |
948 | ||
949 | /* | |
950 | * Follow up in one step. | |
951 | */ | |
952 | if (limit < thresh) { | |
953 | limit = thresh; | |
954 | goto update; | |
955 | } | |
956 | ||
957 | /* | |
958 | * Follow down slowly. Use the higher one as the target, because thresh | |
959 | * may drop below dirty. This is exactly the reason to introduce | |
960 | * global_dirty_limit which is guaranteed to lie above the dirty pages. | |
961 | */ | |
962 | thresh = max(thresh, dirty); | |
963 | if (limit > thresh) { | |
964 | limit -= (limit - thresh) >> 5; | |
965 | goto update; | |
966 | } | |
967 | return; | |
968 | update: | |
969 | global_dirty_limit = limit; | |
970 | } | |
971 | ||
972 | static void global_update_bandwidth(unsigned long thresh, | |
973 | unsigned long dirty, | |
974 | unsigned long now) | |
975 | { | |
976 | static DEFINE_SPINLOCK(dirty_lock); | |
7d70e154 | 977 | static unsigned long update_time = INITIAL_JIFFIES; |
c42843f2 WF |
978 | |
979 | /* | |
980 | * check locklessly first to optimize away locking for the most time | |
981 | */ | |
982 | if (time_before(now, update_time + BANDWIDTH_INTERVAL)) | |
983 | return; | |
984 | ||
985 | spin_lock(&dirty_lock); | |
986 | if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) { | |
987 | update_dirty_limit(thresh, dirty); | |
988 | update_time = now; | |
989 | } | |
990 | spin_unlock(&dirty_lock); | |
991 | } | |
992 | ||
be3ffa27 | 993 | /* |
de1fff37 | 994 | * Maintain wb->dirty_ratelimit, the base dirty throttle rate. |
be3ffa27 | 995 | * |
de1fff37 | 996 | * Normal wb tasks will be curbed at or below it in long term. |
be3ffa27 WF |
997 | * Obviously it should be around (write_bw / N) when there are N dd tasks. |
998 | */ | |
a88a341a TH |
999 | static void wb_update_dirty_ratelimit(struct bdi_writeback *wb, |
1000 | unsigned long thresh, | |
1001 | unsigned long bg_thresh, | |
1002 | unsigned long dirty, | |
de1fff37 TH |
1003 | unsigned long wb_thresh, |
1004 | unsigned long wb_dirty, | |
a88a341a TH |
1005 | unsigned long dirtied, |
1006 | unsigned long elapsed) | |
be3ffa27 | 1007 | { |
7381131c WF |
1008 | unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh); |
1009 | unsigned long limit = hard_dirty_limit(thresh); | |
1010 | unsigned long setpoint = (freerun + limit) / 2; | |
a88a341a TH |
1011 | unsigned long write_bw = wb->avg_write_bandwidth; |
1012 | unsigned long dirty_ratelimit = wb->dirty_ratelimit; | |
be3ffa27 WF |
1013 | unsigned long dirty_rate; |
1014 | unsigned long task_ratelimit; | |
1015 | unsigned long balanced_dirty_ratelimit; | |
1016 | unsigned long pos_ratio; | |
7381131c WF |
1017 | unsigned long step; |
1018 | unsigned long x; | |
be3ffa27 WF |
1019 | |
1020 | /* | |
1021 | * The dirty rate will match the writeout rate in long term, except | |
1022 | * when dirty pages are truncated by userspace or re-dirtied by FS. | |
1023 | */ | |
a88a341a | 1024 | dirty_rate = (dirtied - wb->dirtied_stamp) * HZ / elapsed; |
be3ffa27 | 1025 | |
a88a341a | 1026 | pos_ratio = wb_position_ratio(wb, thresh, bg_thresh, dirty, |
de1fff37 | 1027 | wb_thresh, wb_dirty); |
be3ffa27 WF |
1028 | /* |
1029 | * task_ratelimit reflects each dd's dirty rate for the past 200ms. | |
1030 | */ | |
1031 | task_ratelimit = (u64)dirty_ratelimit * | |
1032 | pos_ratio >> RATELIMIT_CALC_SHIFT; | |
1033 | task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */ | |
1034 | ||
1035 | /* | |
1036 | * A linear estimation of the "balanced" throttle rate. The theory is, | |
de1fff37 | 1037 | * if there are N dd tasks, each throttled at task_ratelimit, the wb's |
be3ffa27 WF |
1038 | * dirty_rate will be measured to be (N * task_ratelimit). So the below |
1039 | * formula will yield the balanced rate limit (write_bw / N). | |
1040 | * | |
1041 | * Note that the expanded form is not a pure rate feedback: | |
1042 | * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) (1) | |
1043 | * but also takes pos_ratio into account: | |
1044 | * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio (2) | |
1045 | * | |
1046 | * (1) is not realistic because pos_ratio also takes part in balancing | |
1047 | * the dirty rate. Consider the state | |
1048 | * pos_ratio = 0.5 (3) | |
1049 | * rate = 2 * (write_bw / N) (4) | |
1050 | * If (1) is used, it will stuck in that state! Because each dd will | |
1051 | * be throttled at | |
1052 | * task_ratelimit = pos_ratio * rate = (write_bw / N) (5) | |
1053 | * yielding | |
1054 | * dirty_rate = N * task_ratelimit = write_bw (6) | |
1055 | * put (6) into (1) we get | |
1056 | * rate_(i+1) = rate_(i) (7) | |
1057 | * | |
1058 | * So we end up using (2) to always keep | |
1059 | * rate_(i+1) ~= (write_bw / N) (8) | |
1060 | * regardless of the value of pos_ratio. As long as (8) is satisfied, | |
1061 | * pos_ratio is able to drive itself to 1.0, which is not only where | |
1062 | * the dirty count meet the setpoint, but also where the slope of | |
1063 | * pos_ratio is most flat and hence task_ratelimit is least fluctuated. | |
1064 | */ | |
1065 | balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw, | |
1066 | dirty_rate | 1); | |
bdaac490 WF |
1067 | /* |
1068 | * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw | |
1069 | */ | |
1070 | if (unlikely(balanced_dirty_ratelimit > write_bw)) | |
1071 | balanced_dirty_ratelimit = write_bw; | |
be3ffa27 | 1072 | |
7381131c WF |
1073 | /* |
1074 | * We could safely do this and return immediately: | |
1075 | * | |
de1fff37 | 1076 | * wb->dirty_ratelimit = balanced_dirty_ratelimit; |
7381131c WF |
1077 | * |
1078 | * However to get a more stable dirty_ratelimit, the below elaborated | |
331cbdee | 1079 | * code makes use of task_ratelimit to filter out singular points and |
7381131c WF |
1080 | * limit the step size. |
1081 | * | |
1082 | * The below code essentially only uses the relative value of | |
1083 | * | |
1084 | * task_ratelimit - dirty_ratelimit | |
1085 | * = (pos_ratio - 1) * dirty_ratelimit | |
1086 | * | |
1087 | * which reflects the direction and size of dirty position error. | |
1088 | */ | |
1089 | ||
1090 | /* | |
1091 | * dirty_ratelimit will follow balanced_dirty_ratelimit iff | |
1092 | * task_ratelimit is on the same side of dirty_ratelimit, too. | |
1093 | * For example, when | |
1094 | * - dirty_ratelimit > balanced_dirty_ratelimit | |
1095 | * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint) | |
1096 | * lowering dirty_ratelimit will help meet both the position and rate | |
1097 | * control targets. Otherwise, don't update dirty_ratelimit if it will | |
1098 | * only help meet the rate target. After all, what the users ultimately | |
1099 | * feel and care are stable dirty rate and small position error. | |
1100 | * | |
1101 | * |task_ratelimit - dirty_ratelimit| is used to limit the step size | |
331cbdee | 1102 | * and filter out the singular points of balanced_dirty_ratelimit. Which |
7381131c WF |
1103 | * keeps jumping around randomly and can even leap far away at times |
1104 | * due to the small 200ms estimation period of dirty_rate (we want to | |
1105 | * keep that period small to reduce time lags). | |
1106 | */ | |
1107 | step = 0; | |
5a537485 MP |
1108 | |
1109 | /* | |
de1fff37 | 1110 | * For strictlimit case, calculations above were based on wb counters |
a88a341a | 1111 | * and limits (starting from pos_ratio = wb_position_ratio() and up to |
5a537485 | 1112 | * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate). |
de1fff37 TH |
1113 | * Hence, to calculate "step" properly, we have to use wb_dirty as |
1114 | * "dirty" and wb_setpoint as "setpoint". | |
5a537485 | 1115 | * |
de1fff37 TH |
1116 | * We rampup dirty_ratelimit forcibly if wb_dirty is low because |
1117 | * it's possible that wb_thresh is close to zero due to inactivity | |
a88a341a | 1118 | * of backing device (see the implementation of wb_dirty_limit()). |
5a537485 | 1119 | */ |
a88a341a | 1120 | if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { |
de1fff37 TH |
1121 | dirty = wb_dirty; |
1122 | if (wb_dirty < 8) | |
1123 | setpoint = wb_dirty + 1; | |
5a537485 | 1124 | else |
de1fff37 | 1125 | setpoint = (wb_thresh + |
a88a341a | 1126 | wb_dirty_limit(wb, bg_thresh)) / 2; |
5a537485 MP |
1127 | } |
1128 | ||
7381131c | 1129 | if (dirty < setpoint) { |
a88a341a | 1130 | x = min3(wb->balanced_dirty_ratelimit, |
7c809968 | 1131 | balanced_dirty_ratelimit, task_ratelimit); |
7381131c WF |
1132 | if (dirty_ratelimit < x) |
1133 | step = x - dirty_ratelimit; | |
1134 | } else { | |
a88a341a | 1135 | x = max3(wb->balanced_dirty_ratelimit, |
7c809968 | 1136 | balanced_dirty_ratelimit, task_ratelimit); |
7381131c WF |
1137 | if (dirty_ratelimit > x) |
1138 | step = dirty_ratelimit - x; | |
1139 | } | |
1140 | ||
1141 | /* | |
1142 | * Don't pursue 100% rate matching. It's impossible since the balanced | |
1143 | * rate itself is constantly fluctuating. So decrease the track speed | |
1144 | * when it gets close to the target. Helps eliminate pointless tremors. | |
1145 | */ | |
1146 | step >>= dirty_ratelimit / (2 * step + 1); | |
1147 | /* | |
1148 | * Limit the tracking speed to avoid overshooting. | |
1149 | */ | |
1150 | step = (step + 7) / 8; | |
1151 | ||
1152 | if (dirty_ratelimit < balanced_dirty_ratelimit) | |
1153 | dirty_ratelimit += step; | |
1154 | else | |
1155 | dirty_ratelimit -= step; | |
1156 | ||
a88a341a TH |
1157 | wb->dirty_ratelimit = max(dirty_ratelimit, 1UL); |
1158 | wb->balanced_dirty_ratelimit = balanced_dirty_ratelimit; | |
b48c104d | 1159 | |
a88a341a | 1160 | trace_bdi_dirty_ratelimit(wb->bdi, dirty_rate, task_ratelimit); |
be3ffa27 WF |
1161 | } |
1162 | ||
a88a341a TH |
1163 | void __wb_update_bandwidth(struct bdi_writeback *wb, |
1164 | unsigned long thresh, | |
1165 | unsigned long bg_thresh, | |
1166 | unsigned long dirty, | |
de1fff37 TH |
1167 | unsigned long wb_thresh, |
1168 | unsigned long wb_dirty, | |
a88a341a | 1169 | unsigned long start_time) |
e98be2d5 WF |
1170 | { |
1171 | unsigned long now = jiffies; | |
a88a341a | 1172 | unsigned long elapsed = now - wb->bw_time_stamp; |
be3ffa27 | 1173 | unsigned long dirtied; |
e98be2d5 WF |
1174 | unsigned long written; |
1175 | ||
1176 | /* | |
1177 | * rate-limit, only update once every 200ms. | |
1178 | */ | |
1179 | if (elapsed < BANDWIDTH_INTERVAL) | |
1180 | return; | |
1181 | ||
a88a341a TH |
1182 | dirtied = percpu_counter_read(&wb->stat[WB_DIRTIED]); |
1183 | written = percpu_counter_read(&wb->stat[WB_WRITTEN]); | |
e98be2d5 WF |
1184 | |
1185 | /* | |
1186 | * Skip quiet periods when disk bandwidth is under-utilized. | |
1187 | * (at least 1s idle time between two flusher runs) | |
1188 | */ | |
a88a341a | 1189 | if (elapsed > HZ && time_before(wb->bw_time_stamp, start_time)) |
e98be2d5 WF |
1190 | goto snapshot; |
1191 | ||
be3ffa27 | 1192 | if (thresh) { |
c42843f2 | 1193 | global_update_bandwidth(thresh, dirty, now); |
a88a341a | 1194 | wb_update_dirty_ratelimit(wb, thresh, bg_thresh, dirty, |
de1fff37 | 1195 | wb_thresh, wb_dirty, |
a88a341a | 1196 | dirtied, elapsed); |
be3ffa27 | 1197 | } |
a88a341a | 1198 | wb_update_write_bandwidth(wb, elapsed, written); |
e98be2d5 WF |
1199 | |
1200 | snapshot: | |
a88a341a TH |
1201 | wb->dirtied_stamp = dirtied; |
1202 | wb->written_stamp = written; | |
1203 | wb->bw_time_stamp = now; | |
e98be2d5 WF |
1204 | } |
1205 | ||
a88a341a TH |
1206 | static void wb_update_bandwidth(struct bdi_writeback *wb, |
1207 | unsigned long thresh, | |
1208 | unsigned long bg_thresh, | |
1209 | unsigned long dirty, | |
de1fff37 TH |
1210 | unsigned long wb_thresh, |
1211 | unsigned long wb_dirty, | |
a88a341a | 1212 | unsigned long start_time) |
e98be2d5 | 1213 | { |
a88a341a | 1214 | if (time_is_after_eq_jiffies(wb->bw_time_stamp + BANDWIDTH_INTERVAL)) |
e98be2d5 | 1215 | return; |
a88a341a TH |
1216 | spin_lock(&wb->list_lock); |
1217 | __wb_update_bandwidth(wb, thresh, bg_thresh, dirty, | |
de1fff37 | 1218 | wb_thresh, wb_dirty, start_time); |
a88a341a | 1219 | spin_unlock(&wb->list_lock); |
e98be2d5 WF |
1220 | } |
1221 | ||
9d823e8f | 1222 | /* |
d0e1d66b | 1223 | * After a task dirtied this many pages, balance_dirty_pages_ratelimited() |
9d823e8f WF |
1224 | * will look to see if it needs to start dirty throttling. |
1225 | * | |
1226 | * If dirty_poll_interval is too low, big NUMA machines will call the expensive | |
1227 | * global_page_state() too often. So scale it near-sqrt to the safety margin | |
1228 | * (the number of pages we may dirty without exceeding the dirty limits). | |
1229 | */ | |
1230 | static unsigned long dirty_poll_interval(unsigned long dirty, | |
1231 | unsigned long thresh) | |
1232 | { | |
1233 | if (thresh > dirty) | |
1234 | return 1UL << (ilog2(thresh - dirty) >> 1); | |
1235 | ||
1236 | return 1; | |
1237 | } | |
1238 | ||
a88a341a | 1239 | static unsigned long wb_max_pause(struct bdi_writeback *wb, |
de1fff37 | 1240 | unsigned long wb_dirty) |
c8462cc9 | 1241 | { |
a88a341a | 1242 | unsigned long bw = wb->avg_write_bandwidth; |
e3b6c655 | 1243 | unsigned long t; |
c8462cc9 | 1244 | |
7ccb9ad5 WF |
1245 | /* |
1246 | * Limit pause time for small memory systems. If sleeping for too long | |
1247 | * time, a small pool of dirty/writeback pages may go empty and disk go | |
1248 | * idle. | |
1249 | * | |
1250 | * 8 serves as the safety ratio. | |
1251 | */ | |
de1fff37 | 1252 | t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8)); |
7ccb9ad5 WF |
1253 | t++; |
1254 | ||
e3b6c655 | 1255 | return min_t(unsigned long, t, MAX_PAUSE); |
7ccb9ad5 WF |
1256 | } |
1257 | ||
a88a341a TH |
1258 | static long wb_min_pause(struct bdi_writeback *wb, |
1259 | long max_pause, | |
1260 | unsigned long task_ratelimit, | |
1261 | unsigned long dirty_ratelimit, | |
1262 | int *nr_dirtied_pause) | |
c8462cc9 | 1263 | { |
a88a341a TH |
1264 | long hi = ilog2(wb->avg_write_bandwidth); |
1265 | long lo = ilog2(wb->dirty_ratelimit); | |
7ccb9ad5 WF |
1266 | long t; /* target pause */ |
1267 | long pause; /* estimated next pause */ | |
1268 | int pages; /* target nr_dirtied_pause */ | |
c8462cc9 | 1269 | |
7ccb9ad5 WF |
1270 | /* target for 10ms pause on 1-dd case */ |
1271 | t = max(1, HZ / 100); | |
c8462cc9 WF |
1272 | |
1273 | /* | |
1274 | * Scale up pause time for concurrent dirtiers in order to reduce CPU | |
1275 | * overheads. | |
1276 | * | |
7ccb9ad5 | 1277 | * (N * 10ms) on 2^N concurrent tasks. |
c8462cc9 WF |
1278 | */ |
1279 | if (hi > lo) | |
7ccb9ad5 | 1280 | t += (hi - lo) * (10 * HZ) / 1024; |
c8462cc9 WF |
1281 | |
1282 | /* | |
7ccb9ad5 WF |
1283 | * This is a bit convoluted. We try to base the next nr_dirtied_pause |
1284 | * on the much more stable dirty_ratelimit. However the next pause time | |
1285 | * will be computed based on task_ratelimit and the two rate limits may | |
1286 | * depart considerably at some time. Especially if task_ratelimit goes | |
1287 | * below dirty_ratelimit/2 and the target pause is max_pause, the next | |
1288 | * pause time will be max_pause*2 _trimmed down_ to max_pause. As a | |
1289 | * result task_ratelimit won't be executed faithfully, which could | |
1290 | * eventually bring down dirty_ratelimit. | |
c8462cc9 | 1291 | * |
7ccb9ad5 WF |
1292 | * We apply two rules to fix it up: |
1293 | * 1) try to estimate the next pause time and if necessary, use a lower | |
1294 | * nr_dirtied_pause so as not to exceed max_pause. When this happens, | |
1295 | * nr_dirtied_pause will be "dancing" with task_ratelimit. | |
1296 | * 2) limit the target pause time to max_pause/2, so that the normal | |
1297 | * small fluctuations of task_ratelimit won't trigger rule (1) and | |
1298 | * nr_dirtied_pause will remain as stable as dirty_ratelimit. | |
c8462cc9 | 1299 | */ |
7ccb9ad5 WF |
1300 | t = min(t, 1 + max_pause / 2); |
1301 | pages = dirty_ratelimit * t / roundup_pow_of_two(HZ); | |
c8462cc9 WF |
1302 | |
1303 | /* | |
5b9b3574 WF |
1304 | * Tiny nr_dirtied_pause is found to hurt I/O performance in the test |
1305 | * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}. | |
1306 | * When the 16 consecutive reads are often interrupted by some dirty | |
1307 | * throttling pause during the async writes, cfq will go into idles | |
1308 | * (deadline is fine). So push nr_dirtied_pause as high as possible | |
1309 | * until reaches DIRTY_POLL_THRESH=32 pages. | |
c8462cc9 | 1310 | */ |
5b9b3574 WF |
1311 | if (pages < DIRTY_POLL_THRESH) { |
1312 | t = max_pause; | |
1313 | pages = dirty_ratelimit * t / roundup_pow_of_two(HZ); | |
1314 | if (pages > DIRTY_POLL_THRESH) { | |
1315 | pages = DIRTY_POLL_THRESH; | |
1316 | t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit; | |
1317 | } | |
1318 | } | |
1319 | ||
7ccb9ad5 WF |
1320 | pause = HZ * pages / (task_ratelimit + 1); |
1321 | if (pause > max_pause) { | |
1322 | t = max_pause; | |
1323 | pages = task_ratelimit * t / roundup_pow_of_two(HZ); | |
1324 | } | |
c8462cc9 | 1325 | |
7ccb9ad5 | 1326 | *nr_dirtied_pause = pages; |
c8462cc9 | 1327 | /* |
7ccb9ad5 | 1328 | * The minimal pause time will normally be half the target pause time. |
c8462cc9 | 1329 | */ |
5b9b3574 | 1330 | return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t; |
c8462cc9 WF |
1331 | } |
1332 | ||
a88a341a TH |
1333 | static inline void wb_dirty_limits(struct bdi_writeback *wb, |
1334 | unsigned long dirty_thresh, | |
1335 | unsigned long background_thresh, | |
de1fff37 TH |
1336 | unsigned long *wb_dirty, |
1337 | unsigned long *wb_thresh, | |
1338 | unsigned long *wb_bg_thresh) | |
5a537485 | 1339 | { |
93f78d88 | 1340 | unsigned long wb_reclaimable; |
5a537485 MP |
1341 | |
1342 | /* | |
de1fff37 | 1343 | * wb_thresh is not treated as some limiting factor as |
5a537485 | 1344 | * dirty_thresh, due to reasons |
de1fff37 | 1345 | * - in JBOD setup, wb_thresh can fluctuate a lot |
5a537485 | 1346 | * - in a system with HDD and USB key, the USB key may somehow |
de1fff37 TH |
1347 | * go into state (wb_dirty >> wb_thresh) either because |
1348 | * wb_dirty starts high, or because wb_thresh drops low. | |
5a537485 | 1349 | * In this case we don't want to hard throttle the USB key |
de1fff37 TH |
1350 | * dirtiers for 100 seconds until wb_dirty drops under |
1351 | * wb_thresh. Instead the auxiliary wb control line in | |
a88a341a | 1352 | * wb_position_ratio() will let the dirtier task progress |
de1fff37 | 1353 | * at some rate <= (write_bw / 2) for bringing down wb_dirty. |
5a537485 | 1354 | */ |
de1fff37 | 1355 | *wb_thresh = wb_dirty_limit(wb, dirty_thresh); |
5a537485 | 1356 | |
de1fff37 TH |
1357 | if (wb_bg_thresh) |
1358 | *wb_bg_thresh = dirty_thresh ? div_u64((u64)*wb_thresh * | |
1359 | background_thresh, | |
1360 | dirty_thresh) : 0; | |
5a537485 MP |
1361 | |
1362 | /* | |
1363 | * In order to avoid the stacked BDI deadlock we need | |
1364 | * to ensure we accurately count the 'dirty' pages when | |
1365 | * the threshold is low. | |
1366 | * | |
1367 | * Otherwise it would be possible to get thresh+n pages | |
1368 | * reported dirty, even though there are thresh-m pages | |
1369 | * actually dirty; with m+n sitting in the percpu | |
1370 | * deltas. | |
1371 | */ | |
de1fff37 | 1372 | if (*wb_thresh < 2 * wb_stat_error(wb)) { |
93f78d88 | 1373 | wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE); |
de1fff37 | 1374 | *wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK); |
5a537485 | 1375 | } else { |
93f78d88 | 1376 | wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE); |
de1fff37 | 1377 | *wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK); |
5a537485 MP |
1378 | } |
1379 | } | |
1380 | ||
1da177e4 LT |
1381 | /* |
1382 | * balance_dirty_pages() must be called by processes which are generating dirty | |
1383 | * data. It looks at the number of dirty pages in the machine and will force | |
143dfe86 | 1384 | * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2. |
5b0830cb JA |
1385 | * If we're over `background_thresh' then the writeback threads are woken to |
1386 | * perform some writeout. | |
1da177e4 | 1387 | */ |
3a2e9a5a | 1388 | static void balance_dirty_pages(struct address_space *mapping, |
dfb8ae56 | 1389 | struct bdi_writeback *wb, |
143dfe86 | 1390 | unsigned long pages_dirtied) |
1da177e4 | 1391 | { |
143dfe86 | 1392 | unsigned long nr_reclaimable; /* = file_dirty + unstable_nfs */ |
7762741e | 1393 | unsigned long nr_dirty; /* = file_dirty + writeback + unstable_nfs */ |
364aeb28 DR |
1394 | unsigned long background_thresh; |
1395 | unsigned long dirty_thresh; | |
83712358 | 1396 | long period; |
7ccb9ad5 WF |
1397 | long pause; |
1398 | long max_pause; | |
1399 | long min_pause; | |
1400 | int nr_dirtied_pause; | |
e50e3720 | 1401 | bool dirty_exceeded = false; |
143dfe86 | 1402 | unsigned long task_ratelimit; |
7ccb9ad5 | 1403 | unsigned long dirty_ratelimit; |
143dfe86 | 1404 | unsigned long pos_ratio; |
dfb8ae56 | 1405 | struct backing_dev_info *bdi = wb->bdi; |
5a537485 | 1406 | bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT; |
e98be2d5 | 1407 | unsigned long start_time = jiffies; |
1da177e4 LT |
1408 | |
1409 | for (;;) { | |
83712358 | 1410 | unsigned long now = jiffies; |
de1fff37 | 1411 | unsigned long uninitialized_var(wb_thresh); |
5a537485 | 1412 | unsigned long thresh; |
de1fff37 | 1413 | unsigned long uninitialized_var(wb_dirty); |
5a537485 MP |
1414 | unsigned long dirty; |
1415 | unsigned long bg_thresh; | |
83712358 | 1416 | |
143dfe86 WF |
1417 | /* |
1418 | * Unstable writes are a feature of certain networked | |
1419 | * filesystems (i.e. NFS) in which data may have been | |
1420 | * written to the server's write cache, but has not yet | |
1421 | * been flushed to permanent storage. | |
1422 | */ | |
5fce25a9 PZ |
1423 | nr_reclaimable = global_page_state(NR_FILE_DIRTY) + |
1424 | global_page_state(NR_UNSTABLE_NFS); | |
7762741e | 1425 | nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK); |
5fce25a9 | 1426 | |
16c4042f WF |
1427 | global_dirty_limits(&background_thresh, &dirty_thresh); |
1428 | ||
5a537485 | 1429 | if (unlikely(strictlimit)) { |
a88a341a | 1430 | wb_dirty_limits(wb, dirty_thresh, background_thresh, |
de1fff37 | 1431 | &wb_dirty, &wb_thresh, &bg_thresh); |
5a537485 | 1432 | |
de1fff37 TH |
1433 | dirty = wb_dirty; |
1434 | thresh = wb_thresh; | |
5a537485 MP |
1435 | } else { |
1436 | dirty = nr_dirty; | |
1437 | thresh = dirty_thresh; | |
1438 | bg_thresh = background_thresh; | |
1439 | } | |
1440 | ||
16c4042f WF |
1441 | /* |
1442 | * Throttle it only when the background writeback cannot | |
1443 | * catch-up. This avoids (excessively) small writeouts | |
de1fff37 | 1444 | * when the wb limits are ramping up in case of !strictlimit. |
5a537485 | 1445 | * |
de1fff37 TH |
1446 | * In strictlimit case make decision based on the wb counters |
1447 | * and limits. Small writeouts when the wb limits are ramping | |
5a537485 | 1448 | * up are the price we consciously pay for strictlimit-ing. |
16c4042f | 1449 | */ |
5a537485 | 1450 | if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh)) { |
83712358 WF |
1451 | current->dirty_paused_when = now; |
1452 | current->nr_dirtied = 0; | |
7ccb9ad5 | 1453 | current->nr_dirtied_pause = |
5a537485 | 1454 | dirty_poll_interval(dirty, thresh); |
16c4042f | 1455 | break; |
83712358 | 1456 | } |
16c4042f | 1457 | |
143dfe86 WF |
1458 | if (unlikely(!writeback_in_progress(bdi))) |
1459 | bdi_start_background_writeback(bdi); | |
1460 | ||
5a537485 | 1461 | if (!strictlimit) |
a88a341a | 1462 | wb_dirty_limits(wb, dirty_thresh, background_thresh, |
de1fff37 | 1463 | &wb_dirty, &wb_thresh, NULL); |
5fce25a9 | 1464 | |
de1fff37 | 1465 | dirty_exceeded = (wb_dirty > wb_thresh) && |
5a537485 | 1466 | ((nr_dirty > dirty_thresh) || strictlimit); |
a88a341a TH |
1467 | if (dirty_exceeded && !wb->dirty_exceeded) |
1468 | wb->dirty_exceeded = 1; | |
1da177e4 | 1469 | |
a88a341a | 1470 | wb_update_bandwidth(wb, dirty_thresh, background_thresh, |
de1fff37 | 1471 | nr_dirty, wb_thresh, wb_dirty, start_time); |
e98be2d5 | 1472 | |
a88a341a TH |
1473 | dirty_ratelimit = wb->dirty_ratelimit; |
1474 | pos_ratio = wb_position_ratio(wb, dirty_thresh, | |
1475 | background_thresh, nr_dirty, | |
de1fff37 | 1476 | wb_thresh, wb_dirty); |
3a73dbbc WF |
1477 | task_ratelimit = ((u64)dirty_ratelimit * pos_ratio) >> |
1478 | RATELIMIT_CALC_SHIFT; | |
de1fff37 | 1479 | max_pause = wb_max_pause(wb, wb_dirty); |
a88a341a TH |
1480 | min_pause = wb_min_pause(wb, max_pause, |
1481 | task_ratelimit, dirty_ratelimit, | |
1482 | &nr_dirtied_pause); | |
7ccb9ad5 | 1483 | |
3a73dbbc | 1484 | if (unlikely(task_ratelimit == 0)) { |
83712358 | 1485 | period = max_pause; |
c8462cc9 | 1486 | pause = max_pause; |
143dfe86 | 1487 | goto pause; |
04fbfdc1 | 1488 | } |
83712358 WF |
1489 | period = HZ * pages_dirtied / task_ratelimit; |
1490 | pause = period; | |
1491 | if (current->dirty_paused_when) | |
1492 | pause -= now - current->dirty_paused_when; | |
1493 | /* | |
1494 | * For less than 1s think time (ext3/4 may block the dirtier | |
1495 | * for up to 800ms from time to time on 1-HDD; so does xfs, | |
1496 | * however at much less frequency), try to compensate it in | |
1497 | * future periods by updating the virtual time; otherwise just | |
1498 | * do a reset, as it may be a light dirtier. | |
1499 | */ | |
7ccb9ad5 | 1500 | if (pause < min_pause) { |
ece13ac3 WF |
1501 | trace_balance_dirty_pages(bdi, |
1502 | dirty_thresh, | |
1503 | background_thresh, | |
1504 | nr_dirty, | |
de1fff37 TH |
1505 | wb_thresh, |
1506 | wb_dirty, | |
ece13ac3 WF |
1507 | dirty_ratelimit, |
1508 | task_ratelimit, | |
1509 | pages_dirtied, | |
83712358 | 1510 | period, |
7ccb9ad5 | 1511 | min(pause, 0L), |
ece13ac3 | 1512 | start_time); |
83712358 WF |
1513 | if (pause < -HZ) { |
1514 | current->dirty_paused_when = now; | |
1515 | current->nr_dirtied = 0; | |
1516 | } else if (period) { | |
1517 | current->dirty_paused_when += period; | |
1518 | current->nr_dirtied = 0; | |
7ccb9ad5 WF |
1519 | } else if (current->nr_dirtied_pause <= pages_dirtied) |
1520 | current->nr_dirtied_pause += pages_dirtied; | |
57fc978c | 1521 | break; |
04fbfdc1 | 1522 | } |
7ccb9ad5 WF |
1523 | if (unlikely(pause > max_pause)) { |
1524 | /* for occasional dropped task_ratelimit */ | |
1525 | now += min(pause - max_pause, max_pause); | |
1526 | pause = max_pause; | |
1527 | } | |
143dfe86 WF |
1528 | |
1529 | pause: | |
ece13ac3 WF |
1530 | trace_balance_dirty_pages(bdi, |
1531 | dirty_thresh, | |
1532 | background_thresh, | |
1533 | nr_dirty, | |
de1fff37 TH |
1534 | wb_thresh, |
1535 | wb_dirty, | |
ece13ac3 WF |
1536 | dirty_ratelimit, |
1537 | task_ratelimit, | |
1538 | pages_dirtied, | |
83712358 | 1539 | period, |
ece13ac3 WF |
1540 | pause, |
1541 | start_time); | |
499d05ec | 1542 | __set_current_state(TASK_KILLABLE); |
d25105e8 | 1543 | io_schedule_timeout(pause); |
87c6a9b2 | 1544 | |
83712358 WF |
1545 | current->dirty_paused_when = now + pause; |
1546 | current->nr_dirtied = 0; | |
7ccb9ad5 | 1547 | current->nr_dirtied_pause = nr_dirtied_pause; |
83712358 | 1548 | |
ffd1f609 | 1549 | /* |
1df64719 WF |
1550 | * This is typically equal to (nr_dirty < dirty_thresh) and can |
1551 | * also keep "1000+ dd on a slow USB stick" under control. | |
ffd1f609 | 1552 | */ |
1df64719 | 1553 | if (task_ratelimit) |
ffd1f609 | 1554 | break; |
499d05ec | 1555 | |
c5c6343c WF |
1556 | /* |
1557 | * In the case of an unresponding NFS server and the NFS dirty | |
de1fff37 | 1558 | * pages exceeds dirty_thresh, give the other good wb's a pipe |
c5c6343c WF |
1559 | * to go through, so that tasks on them still remain responsive. |
1560 | * | |
1561 | * In theory 1 page is enough to keep the comsumer-producer | |
1562 | * pipe going: the flusher cleans 1 page => the task dirties 1 | |
de1fff37 | 1563 | * more page. However wb_dirty has accounting errors. So use |
93f78d88 | 1564 | * the larger and more IO friendly wb_stat_error. |
c5c6343c | 1565 | */ |
de1fff37 | 1566 | if (wb_dirty <= wb_stat_error(wb)) |
c5c6343c WF |
1567 | break; |
1568 | ||
499d05ec JK |
1569 | if (fatal_signal_pending(current)) |
1570 | break; | |
1da177e4 LT |
1571 | } |
1572 | ||
a88a341a TH |
1573 | if (!dirty_exceeded && wb->dirty_exceeded) |
1574 | wb->dirty_exceeded = 0; | |
1da177e4 LT |
1575 | |
1576 | if (writeback_in_progress(bdi)) | |
5b0830cb | 1577 | return; |
1da177e4 LT |
1578 | |
1579 | /* | |
1580 | * In laptop mode, we wait until hitting the higher threshold before | |
1581 | * starting background writeout, and then write out all the way down | |
1582 | * to the lower threshold. So slow writers cause minimal disk activity. | |
1583 | * | |
1584 | * In normal mode, we start background writeout at the lower | |
1585 | * background_thresh, to keep the amount of dirty memory low. | |
1586 | */ | |
143dfe86 WF |
1587 | if (laptop_mode) |
1588 | return; | |
1589 | ||
1590 | if (nr_reclaimable > background_thresh) | |
c5444198 | 1591 | bdi_start_background_writeback(bdi); |
1da177e4 LT |
1592 | } |
1593 | ||
9d823e8f | 1594 | static DEFINE_PER_CPU(int, bdp_ratelimits); |
245b2e70 | 1595 | |
54848d73 WF |
1596 | /* |
1597 | * Normal tasks are throttled by | |
1598 | * loop { | |
1599 | * dirty tsk->nr_dirtied_pause pages; | |
1600 | * take a snap in balance_dirty_pages(); | |
1601 | * } | |
1602 | * However there is a worst case. If every task exit immediately when dirtied | |
1603 | * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be | |
1604 | * called to throttle the page dirties. The solution is to save the not yet | |
1605 | * throttled page dirties in dirty_throttle_leaks on task exit and charge them | |
1606 | * randomly into the running tasks. This works well for the above worst case, | |
1607 | * as the new task will pick up and accumulate the old task's leaked dirty | |
1608 | * count and eventually get throttled. | |
1609 | */ | |
1610 | DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0; | |
1611 | ||
1da177e4 | 1612 | /** |
d0e1d66b | 1613 | * balance_dirty_pages_ratelimited - balance dirty memory state |
67be2dd1 | 1614 | * @mapping: address_space which was dirtied |
1da177e4 LT |
1615 | * |
1616 | * Processes which are dirtying memory should call in here once for each page | |
1617 | * which was newly dirtied. The function will periodically check the system's | |
1618 | * dirty state and will initiate writeback if needed. | |
1619 | * | |
1620 | * On really big machines, get_writeback_state is expensive, so try to avoid | |
1621 | * calling it too often (ratelimiting). But once we're over the dirty memory | |
1622 | * limit we decrease the ratelimiting by a lot, to prevent individual processes | |
1623 | * from overshooting the limit by (ratelimit_pages) each. | |
1624 | */ | |
d0e1d66b | 1625 | void balance_dirty_pages_ratelimited(struct address_space *mapping) |
1da177e4 | 1626 | { |
dfb8ae56 TH |
1627 | struct inode *inode = mapping->host; |
1628 | struct backing_dev_info *bdi = inode_to_bdi(inode); | |
1629 | struct bdi_writeback *wb = NULL; | |
9d823e8f WF |
1630 | int ratelimit; |
1631 | int *p; | |
1da177e4 | 1632 | |
36715cef WF |
1633 | if (!bdi_cap_account_dirty(bdi)) |
1634 | return; | |
1635 | ||
dfb8ae56 TH |
1636 | if (inode_cgwb_enabled(inode)) |
1637 | wb = wb_get_create_current(bdi, GFP_KERNEL); | |
1638 | if (!wb) | |
1639 | wb = &bdi->wb; | |
1640 | ||
9d823e8f | 1641 | ratelimit = current->nr_dirtied_pause; |
a88a341a | 1642 | if (wb->dirty_exceeded) |
9d823e8f WF |
1643 | ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10)); |
1644 | ||
9d823e8f | 1645 | preempt_disable(); |
1da177e4 | 1646 | /* |
9d823e8f WF |
1647 | * This prevents one CPU to accumulate too many dirtied pages without |
1648 | * calling into balance_dirty_pages(), which can happen when there are | |
1649 | * 1000+ tasks, all of them start dirtying pages at exactly the same | |
1650 | * time, hence all honoured too large initial task->nr_dirtied_pause. | |
1da177e4 | 1651 | */ |
7c8e0181 | 1652 | p = this_cpu_ptr(&bdp_ratelimits); |
9d823e8f | 1653 | if (unlikely(current->nr_dirtied >= ratelimit)) |
fa5a734e | 1654 | *p = 0; |
d3bc1fef WF |
1655 | else if (unlikely(*p >= ratelimit_pages)) { |
1656 | *p = 0; | |
1657 | ratelimit = 0; | |
1da177e4 | 1658 | } |
54848d73 WF |
1659 | /* |
1660 | * Pick up the dirtied pages by the exited tasks. This avoids lots of | |
1661 | * short-lived tasks (eg. gcc invocations in a kernel build) escaping | |
1662 | * the dirty throttling and livelock other long-run dirtiers. | |
1663 | */ | |
7c8e0181 | 1664 | p = this_cpu_ptr(&dirty_throttle_leaks); |
54848d73 | 1665 | if (*p > 0 && current->nr_dirtied < ratelimit) { |
d0e1d66b | 1666 | unsigned long nr_pages_dirtied; |
54848d73 WF |
1667 | nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); |
1668 | *p -= nr_pages_dirtied; | |
1669 | current->nr_dirtied += nr_pages_dirtied; | |
1da177e4 | 1670 | } |
fa5a734e | 1671 | preempt_enable(); |
9d823e8f WF |
1672 | |
1673 | if (unlikely(current->nr_dirtied >= ratelimit)) | |
dfb8ae56 TH |
1674 | balance_dirty_pages(mapping, wb, current->nr_dirtied); |
1675 | ||
1676 | wb_put(wb); | |
1da177e4 | 1677 | } |
d0e1d66b | 1678 | EXPORT_SYMBOL(balance_dirty_pages_ratelimited); |
1da177e4 | 1679 | |
232ea4d6 | 1680 | void throttle_vm_writeout(gfp_t gfp_mask) |
1da177e4 | 1681 | { |
364aeb28 DR |
1682 | unsigned long background_thresh; |
1683 | unsigned long dirty_thresh; | |
1da177e4 LT |
1684 | |
1685 | for ( ; ; ) { | |
16c4042f | 1686 | global_dirty_limits(&background_thresh, &dirty_thresh); |
47a13333 | 1687 | dirty_thresh = hard_dirty_limit(dirty_thresh); |
1da177e4 LT |
1688 | |
1689 | /* | |
1690 | * Boost the allowable dirty threshold a bit for page | |
1691 | * allocators so they don't get DoS'ed by heavy writers | |
1692 | */ | |
1693 | dirty_thresh += dirty_thresh / 10; /* wheeee... */ | |
1694 | ||
c24f21bd CL |
1695 | if (global_page_state(NR_UNSTABLE_NFS) + |
1696 | global_page_state(NR_WRITEBACK) <= dirty_thresh) | |
1697 | break; | |
8aa7e847 | 1698 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
369f2389 FW |
1699 | |
1700 | /* | |
1701 | * The caller might hold locks which can prevent IO completion | |
1702 | * or progress in the filesystem. So we cannot just sit here | |
1703 | * waiting for IO to complete. | |
1704 | */ | |
1705 | if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) | |
1706 | break; | |
1da177e4 LT |
1707 | } |
1708 | } | |
1709 | ||
1da177e4 LT |
1710 | /* |
1711 | * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs | |
1712 | */ | |
cccad5b9 | 1713 | int dirty_writeback_centisecs_handler(struct ctl_table *table, int write, |
8d65af78 | 1714 | void __user *buffer, size_t *length, loff_t *ppos) |
1da177e4 | 1715 | { |
8d65af78 | 1716 | proc_dointvec(table, write, buffer, length, ppos); |
1da177e4 LT |
1717 | return 0; |
1718 | } | |
1719 | ||
c2c4986e | 1720 | #ifdef CONFIG_BLOCK |
31373d09 | 1721 | void laptop_mode_timer_fn(unsigned long data) |
1da177e4 | 1722 | { |
31373d09 MG |
1723 | struct request_queue *q = (struct request_queue *)data; |
1724 | int nr_pages = global_page_state(NR_FILE_DIRTY) + | |
1725 | global_page_state(NR_UNSTABLE_NFS); | |
a06fd6b1 TH |
1726 | struct bdi_writeback *wb; |
1727 | struct wb_iter iter; | |
1da177e4 | 1728 | |
31373d09 MG |
1729 | /* |
1730 | * We want to write everything out, not just down to the dirty | |
1731 | * threshold | |
1732 | */ | |
a06fd6b1 TH |
1733 | if (!bdi_has_dirty_io(&q->backing_dev_info)) |
1734 | return; | |
1735 | ||
1736 | bdi_for_each_wb(wb, &q->backing_dev_info, &iter, 0) | |
1737 | if (wb_has_dirty_io(wb)) | |
1738 | wb_start_writeback(wb, nr_pages, true, | |
1739 | WB_REASON_LAPTOP_TIMER); | |
1da177e4 LT |
1740 | } |
1741 | ||
1742 | /* | |
1743 | * We've spun up the disk and we're in laptop mode: schedule writeback | |
1744 | * of all dirty data a few seconds from now. If the flush is already scheduled | |
1745 | * then push it back - the user is still using the disk. | |
1746 | */ | |
31373d09 | 1747 | void laptop_io_completion(struct backing_dev_info *info) |
1da177e4 | 1748 | { |
31373d09 | 1749 | mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode); |
1da177e4 LT |
1750 | } |
1751 | ||
1752 | /* | |
1753 | * We're in laptop mode and we've just synced. The sync's writes will have | |
1754 | * caused another writeback to be scheduled by laptop_io_completion. | |
1755 | * Nothing needs to be written back anymore, so we unschedule the writeback. | |
1756 | */ | |
1757 | void laptop_sync_completion(void) | |
1758 | { | |
31373d09 MG |
1759 | struct backing_dev_info *bdi; |
1760 | ||
1761 | rcu_read_lock(); | |
1762 | ||
1763 | list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) | |
1764 | del_timer(&bdi->laptop_mode_wb_timer); | |
1765 | ||
1766 | rcu_read_unlock(); | |
1da177e4 | 1767 | } |
c2c4986e | 1768 | #endif |
1da177e4 LT |
1769 | |
1770 | /* | |
1771 | * If ratelimit_pages is too high then we can get into dirty-data overload | |
1772 | * if a large number of processes all perform writes at the same time. | |
1773 | * If it is too low then SMP machines will call the (expensive) | |
1774 | * get_writeback_state too often. | |
1775 | * | |
1776 | * Here we set ratelimit_pages to a level which ensures that when all CPUs are | |
1777 | * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory | |
9d823e8f | 1778 | * thresholds. |
1da177e4 LT |
1779 | */ |
1780 | ||
2d1d43f6 | 1781 | void writeback_set_ratelimit(void) |
1da177e4 | 1782 | { |
9d823e8f WF |
1783 | unsigned long background_thresh; |
1784 | unsigned long dirty_thresh; | |
1785 | global_dirty_limits(&background_thresh, &dirty_thresh); | |
68809c71 | 1786 | global_dirty_limit = dirty_thresh; |
9d823e8f | 1787 | ratelimit_pages = dirty_thresh / (num_online_cpus() * 32); |
1da177e4 LT |
1788 | if (ratelimit_pages < 16) |
1789 | ratelimit_pages = 16; | |
1da177e4 LT |
1790 | } |
1791 | ||
0db0628d | 1792 | static int |
2f60d628 SB |
1793 | ratelimit_handler(struct notifier_block *self, unsigned long action, |
1794 | void *hcpu) | |
1da177e4 | 1795 | { |
2f60d628 SB |
1796 | |
1797 | switch (action & ~CPU_TASKS_FROZEN) { | |
1798 | case CPU_ONLINE: | |
1799 | case CPU_DEAD: | |
1800 | writeback_set_ratelimit(); | |
1801 | return NOTIFY_OK; | |
1802 | default: | |
1803 | return NOTIFY_DONE; | |
1804 | } | |
1da177e4 LT |
1805 | } |
1806 | ||
0db0628d | 1807 | static struct notifier_block ratelimit_nb = { |
1da177e4 LT |
1808 | .notifier_call = ratelimit_handler, |
1809 | .next = NULL, | |
1810 | }; | |
1811 | ||
1812 | /* | |
dc6e29da LT |
1813 | * Called early on to tune the page writeback dirty limits. |
1814 | * | |
1815 | * We used to scale dirty pages according to how total memory | |
1816 | * related to pages that could be allocated for buffers (by | |
1817 | * comparing nr_free_buffer_pages() to vm_total_pages. | |
1818 | * | |
1819 | * However, that was when we used "dirty_ratio" to scale with | |
1820 | * all memory, and we don't do that any more. "dirty_ratio" | |
1821 | * is now applied to total non-HIGHPAGE memory (by subtracting | |
1822 | * totalhigh_pages from vm_total_pages), and as such we can't | |
1823 | * get into the old insane situation any more where we had | |
1824 | * large amounts of dirty pages compared to a small amount of | |
1825 | * non-HIGHMEM memory. | |
1826 | * | |
1827 | * But we might still want to scale the dirty_ratio by how | |
1828 | * much memory the box has.. | |
1da177e4 LT |
1829 | */ |
1830 | void __init page_writeback_init(void) | |
1831 | { | |
2d1d43f6 | 1832 | writeback_set_ratelimit(); |
1da177e4 | 1833 | register_cpu_notifier(&ratelimit_nb); |
04fbfdc1 | 1834 | |
20ae0079 | 1835 | fprop_global_init(&writeout_completions, GFP_KERNEL); |
1da177e4 LT |
1836 | } |
1837 | ||
f446daae JK |
1838 | /** |
1839 | * tag_pages_for_writeback - tag pages to be written by write_cache_pages | |
1840 | * @mapping: address space structure to write | |
1841 | * @start: starting page index | |
1842 | * @end: ending page index (inclusive) | |
1843 | * | |
1844 | * This function scans the page range from @start to @end (inclusive) and tags | |
1845 | * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is | |
1846 | * that write_cache_pages (or whoever calls this function) will then use | |
1847 | * TOWRITE tag to identify pages eligible for writeback. This mechanism is | |
1848 | * used to avoid livelocking of writeback by a process steadily creating new | |
1849 | * dirty pages in the file (thus it is important for this function to be quick | |
1850 | * so that it can tag pages faster than a dirtying process can create them). | |
1851 | */ | |
1852 | /* | |
1853 | * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency. | |
1854 | */ | |
f446daae JK |
1855 | void tag_pages_for_writeback(struct address_space *mapping, |
1856 | pgoff_t start, pgoff_t end) | |
1857 | { | |
3c111a07 | 1858 | #define WRITEBACK_TAG_BATCH 4096 |
f446daae JK |
1859 | unsigned long tagged; |
1860 | ||
1861 | do { | |
1862 | spin_lock_irq(&mapping->tree_lock); | |
1863 | tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree, | |
1864 | &start, end, WRITEBACK_TAG_BATCH, | |
1865 | PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE); | |
1866 | spin_unlock_irq(&mapping->tree_lock); | |
1867 | WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH); | |
1868 | cond_resched(); | |
d5ed3a4a JK |
1869 | /* We check 'start' to handle wrapping when end == ~0UL */ |
1870 | } while (tagged >= WRITEBACK_TAG_BATCH && start); | |
f446daae JK |
1871 | } |
1872 | EXPORT_SYMBOL(tag_pages_for_writeback); | |
1873 | ||
811d736f | 1874 | /** |
0ea97180 | 1875 | * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. |
811d736f DH |
1876 | * @mapping: address space structure to write |
1877 | * @wbc: subtract the number of written pages from *@wbc->nr_to_write | |
0ea97180 MS |
1878 | * @writepage: function called for each page |
1879 | * @data: data passed to writepage function | |
811d736f | 1880 | * |
0ea97180 | 1881 | * If a page is already under I/O, write_cache_pages() skips it, even |
811d736f DH |
1882 | * if it's dirty. This is desirable behaviour for memory-cleaning writeback, |
1883 | * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() | |
1884 | * and msync() need to guarantee that all the data which was dirty at the time | |
1885 | * the call was made get new I/O started against them. If wbc->sync_mode is | |
1886 | * WB_SYNC_ALL then we were called for data integrity and we must wait for | |
1887 | * existing IO to complete. | |
f446daae JK |
1888 | * |
1889 | * To avoid livelocks (when other process dirties new pages), we first tag | |
1890 | * pages which should be written back with TOWRITE tag and only then start | |
1891 | * writing them. For data-integrity sync we have to be careful so that we do | |
1892 | * not miss some pages (e.g., because some other process has cleared TOWRITE | |
1893 | * tag we set). The rule we follow is that TOWRITE tag can be cleared only | |
1894 | * by the process clearing the DIRTY tag (and submitting the page for IO). | |
811d736f | 1895 | */ |
0ea97180 MS |
1896 | int write_cache_pages(struct address_space *mapping, |
1897 | struct writeback_control *wbc, writepage_t writepage, | |
1898 | void *data) | |
811d736f | 1899 | { |
811d736f DH |
1900 | int ret = 0; |
1901 | int done = 0; | |
811d736f DH |
1902 | struct pagevec pvec; |
1903 | int nr_pages; | |
31a12666 | 1904 | pgoff_t uninitialized_var(writeback_index); |
811d736f DH |
1905 | pgoff_t index; |
1906 | pgoff_t end; /* Inclusive */ | |
bd19e012 | 1907 | pgoff_t done_index; |
31a12666 | 1908 | int cycled; |
811d736f | 1909 | int range_whole = 0; |
f446daae | 1910 | int tag; |
811d736f | 1911 | |
811d736f DH |
1912 | pagevec_init(&pvec, 0); |
1913 | if (wbc->range_cyclic) { | |
31a12666 NP |
1914 | writeback_index = mapping->writeback_index; /* prev offset */ |
1915 | index = writeback_index; | |
1916 | if (index == 0) | |
1917 | cycled = 1; | |
1918 | else | |
1919 | cycled = 0; | |
811d736f DH |
1920 | end = -1; |
1921 | } else { | |
1922 | index = wbc->range_start >> PAGE_CACHE_SHIFT; | |
1923 | end = wbc->range_end >> PAGE_CACHE_SHIFT; | |
1924 | if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) | |
1925 | range_whole = 1; | |
31a12666 | 1926 | cycled = 1; /* ignore range_cyclic tests */ |
811d736f | 1927 | } |
6e6938b6 | 1928 | if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) |
f446daae JK |
1929 | tag = PAGECACHE_TAG_TOWRITE; |
1930 | else | |
1931 | tag = PAGECACHE_TAG_DIRTY; | |
811d736f | 1932 | retry: |
6e6938b6 | 1933 | if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) |
f446daae | 1934 | tag_pages_for_writeback(mapping, index, end); |
bd19e012 | 1935 | done_index = index; |
5a3d5c98 NP |
1936 | while (!done && (index <= end)) { |
1937 | int i; | |
1938 | ||
f446daae | 1939 | nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, |
5a3d5c98 NP |
1940 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); |
1941 | if (nr_pages == 0) | |
1942 | break; | |
811d736f | 1943 | |
811d736f DH |
1944 | for (i = 0; i < nr_pages; i++) { |
1945 | struct page *page = pvec.pages[i]; | |
1946 | ||
1947 | /* | |
d5482cdf NP |
1948 | * At this point, the page may be truncated or |
1949 | * invalidated (changing page->mapping to NULL), or | |
1950 | * even swizzled back from swapper_space to tmpfs file | |
1951 | * mapping. However, page->index will not change | |
1952 | * because we have a reference on the page. | |
811d736f | 1953 | */ |
d5482cdf NP |
1954 | if (page->index > end) { |
1955 | /* | |
1956 | * can't be range_cyclic (1st pass) because | |
1957 | * end == -1 in that case. | |
1958 | */ | |
1959 | done = 1; | |
1960 | break; | |
1961 | } | |
1962 | ||
cf15b07c | 1963 | done_index = page->index; |
d5482cdf | 1964 | |
811d736f DH |
1965 | lock_page(page); |
1966 | ||
5a3d5c98 NP |
1967 | /* |
1968 | * Page truncated or invalidated. We can freely skip it | |
1969 | * then, even for data integrity operations: the page | |
1970 | * has disappeared concurrently, so there could be no | |
1971 | * real expectation of this data interity operation | |
1972 | * even if there is now a new, dirty page at the same | |
1973 | * pagecache address. | |
1974 | */ | |
811d736f | 1975 | if (unlikely(page->mapping != mapping)) { |
5a3d5c98 | 1976 | continue_unlock: |
811d736f DH |
1977 | unlock_page(page); |
1978 | continue; | |
1979 | } | |
1980 | ||
515f4a03 NP |
1981 | if (!PageDirty(page)) { |
1982 | /* someone wrote it for us */ | |
1983 | goto continue_unlock; | |
1984 | } | |
1985 | ||
1986 | if (PageWriteback(page)) { | |
1987 | if (wbc->sync_mode != WB_SYNC_NONE) | |
1988 | wait_on_page_writeback(page); | |
1989 | else | |
1990 | goto continue_unlock; | |
1991 | } | |
811d736f | 1992 | |
515f4a03 NP |
1993 | BUG_ON(PageWriteback(page)); |
1994 | if (!clear_page_dirty_for_io(page)) | |
5a3d5c98 | 1995 | goto continue_unlock; |
811d736f | 1996 | |
de1414a6 | 1997 | trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); |
0ea97180 | 1998 | ret = (*writepage)(page, wbc, data); |
00266770 NP |
1999 | if (unlikely(ret)) { |
2000 | if (ret == AOP_WRITEPAGE_ACTIVATE) { | |
2001 | unlock_page(page); | |
2002 | ret = 0; | |
2003 | } else { | |
2004 | /* | |
2005 | * done_index is set past this page, | |
2006 | * so media errors will not choke | |
2007 | * background writeout for the entire | |
2008 | * file. This has consequences for | |
2009 | * range_cyclic semantics (ie. it may | |
2010 | * not be suitable for data integrity | |
2011 | * writeout). | |
2012 | */ | |
cf15b07c | 2013 | done_index = page->index + 1; |
00266770 NP |
2014 | done = 1; |
2015 | break; | |
2016 | } | |
0b564927 | 2017 | } |
00266770 | 2018 | |
546a1924 DC |
2019 | /* |
2020 | * We stop writing back only if we are not doing | |
2021 | * integrity sync. In case of integrity sync we have to | |
2022 | * keep going until we have written all the pages | |
2023 | * we tagged for writeback prior to entering this loop. | |
2024 | */ | |
2025 | if (--wbc->nr_to_write <= 0 && | |
2026 | wbc->sync_mode == WB_SYNC_NONE) { | |
2027 | done = 1; | |
2028 | break; | |
05fe478d | 2029 | } |
811d736f DH |
2030 | } |
2031 | pagevec_release(&pvec); | |
2032 | cond_resched(); | |
2033 | } | |
3a4c6800 | 2034 | if (!cycled && !done) { |
811d736f | 2035 | /* |
31a12666 | 2036 | * range_cyclic: |
811d736f DH |
2037 | * We hit the last page and there is more work to be done: wrap |
2038 | * back to the start of the file | |
2039 | */ | |
31a12666 | 2040 | cycled = 1; |
811d736f | 2041 | index = 0; |
31a12666 | 2042 | end = writeback_index - 1; |
811d736f DH |
2043 | goto retry; |
2044 | } | |
0b564927 DC |
2045 | if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) |
2046 | mapping->writeback_index = done_index; | |
06d6cf69 | 2047 | |
811d736f DH |
2048 | return ret; |
2049 | } | |
0ea97180 MS |
2050 | EXPORT_SYMBOL(write_cache_pages); |
2051 | ||
2052 | /* | |
2053 | * Function used by generic_writepages to call the real writepage | |
2054 | * function and set the mapping flags on error | |
2055 | */ | |
2056 | static int __writepage(struct page *page, struct writeback_control *wbc, | |
2057 | void *data) | |
2058 | { | |
2059 | struct address_space *mapping = data; | |
2060 | int ret = mapping->a_ops->writepage(page, wbc); | |
2061 | mapping_set_error(mapping, ret); | |
2062 | return ret; | |
2063 | } | |
2064 | ||
2065 | /** | |
2066 | * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them. | |
2067 | * @mapping: address space structure to write | |
2068 | * @wbc: subtract the number of written pages from *@wbc->nr_to_write | |
2069 | * | |
2070 | * This is a library function, which implements the writepages() | |
2071 | * address_space_operation. | |
2072 | */ | |
2073 | int generic_writepages(struct address_space *mapping, | |
2074 | struct writeback_control *wbc) | |
2075 | { | |
9b6096a6 SL |
2076 | struct blk_plug plug; |
2077 | int ret; | |
2078 | ||
0ea97180 MS |
2079 | /* deal with chardevs and other special file */ |
2080 | if (!mapping->a_ops->writepage) | |
2081 | return 0; | |
2082 | ||
9b6096a6 SL |
2083 | blk_start_plug(&plug); |
2084 | ret = write_cache_pages(mapping, wbc, __writepage, mapping); | |
2085 | blk_finish_plug(&plug); | |
2086 | return ret; | |
0ea97180 | 2087 | } |
811d736f DH |
2088 | |
2089 | EXPORT_SYMBOL(generic_writepages); | |
2090 | ||
1da177e4 LT |
2091 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc) |
2092 | { | |
22905f77 AM |
2093 | int ret; |
2094 | ||
1da177e4 LT |
2095 | if (wbc->nr_to_write <= 0) |
2096 | return 0; | |
2097 | if (mapping->a_ops->writepages) | |
d08b3851 | 2098 | ret = mapping->a_ops->writepages(mapping, wbc); |
22905f77 AM |
2099 | else |
2100 | ret = generic_writepages(mapping, wbc); | |
22905f77 | 2101 | return ret; |
1da177e4 LT |
2102 | } |
2103 | ||
2104 | /** | |
2105 | * write_one_page - write out a single page and optionally wait on I/O | |
67be2dd1 MW |
2106 | * @page: the page to write |
2107 | * @wait: if true, wait on writeout | |
1da177e4 LT |
2108 | * |
2109 | * The page must be locked by the caller and will be unlocked upon return. | |
2110 | * | |
2111 | * write_one_page() returns a negative error code if I/O failed. | |
2112 | */ | |
2113 | int write_one_page(struct page *page, int wait) | |
2114 | { | |
2115 | struct address_space *mapping = page->mapping; | |
2116 | int ret = 0; | |
2117 | struct writeback_control wbc = { | |
2118 | .sync_mode = WB_SYNC_ALL, | |
2119 | .nr_to_write = 1, | |
2120 | }; | |
2121 | ||
2122 | BUG_ON(!PageLocked(page)); | |
2123 | ||
2124 | if (wait) | |
2125 | wait_on_page_writeback(page); | |
2126 | ||
2127 | if (clear_page_dirty_for_io(page)) { | |
2128 | page_cache_get(page); | |
2129 | ret = mapping->a_ops->writepage(page, &wbc); | |
2130 | if (ret == 0 && wait) { | |
2131 | wait_on_page_writeback(page); | |
2132 | if (PageError(page)) | |
2133 | ret = -EIO; | |
2134 | } | |
2135 | page_cache_release(page); | |
2136 | } else { | |
2137 | unlock_page(page); | |
2138 | } | |
2139 | return ret; | |
2140 | } | |
2141 | EXPORT_SYMBOL(write_one_page); | |
2142 | ||
76719325 KC |
2143 | /* |
2144 | * For address_spaces which do not use buffers nor write back. | |
2145 | */ | |
2146 | int __set_page_dirty_no_writeback(struct page *page) | |
2147 | { | |
2148 | if (!PageDirty(page)) | |
c3f0da63 | 2149 | return !TestSetPageDirty(page); |
76719325 KC |
2150 | return 0; |
2151 | } | |
2152 | ||
e3a7cca1 ES |
2153 | /* |
2154 | * Helper function for set_page_dirty family. | |
c4843a75 GT |
2155 | * |
2156 | * Caller must hold mem_cgroup_begin_page_stat(). | |
2157 | * | |
e3a7cca1 ES |
2158 | * NOTE: This relies on being atomic wrt interrupts. |
2159 | */ | |
c4843a75 GT |
2160 | void account_page_dirtied(struct page *page, struct address_space *mapping, |
2161 | struct mem_cgroup *memcg) | |
e3a7cca1 | 2162 | { |
52ebea74 TH |
2163 | struct inode *inode = mapping->host; |
2164 | ||
9fb0a7da TH |
2165 | trace_writeback_dirty_page(page, mapping); |
2166 | ||
e3a7cca1 | 2167 | if (mapping_cap_account_dirty(mapping)) { |
52ebea74 TH |
2168 | struct bdi_writeback *wb; |
2169 | ||
2170 | inode_attach_wb(inode, page); | |
2171 | wb = inode_to_wb(inode); | |
de1414a6 | 2172 | |
c4843a75 | 2173 | mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_DIRTY); |
e3a7cca1 | 2174 | __inc_zone_page_state(page, NR_FILE_DIRTY); |
ea941f0e | 2175 | __inc_zone_page_state(page, NR_DIRTIED); |
52ebea74 TH |
2176 | __inc_wb_stat(wb, WB_RECLAIMABLE); |
2177 | __inc_wb_stat(wb, WB_DIRTIED); | |
e3a7cca1 | 2178 | task_io_account_write(PAGE_CACHE_SIZE); |
d3bc1fef WF |
2179 | current->nr_dirtied++; |
2180 | this_cpu_inc(bdp_ratelimits); | |
e3a7cca1 ES |
2181 | } |
2182 | } | |
679ceace | 2183 | EXPORT_SYMBOL(account_page_dirtied); |
e3a7cca1 | 2184 | |
b9ea2515 KK |
2185 | /* |
2186 | * Helper function for deaccounting dirty page without writeback. | |
c4843a75 GT |
2187 | * |
2188 | * Caller must hold mem_cgroup_begin_page_stat(). | |
b9ea2515 | 2189 | */ |
c4843a75 GT |
2190 | void account_page_cleaned(struct page *page, struct address_space *mapping, |
2191 | struct mem_cgroup *memcg) | |
b9ea2515 KK |
2192 | { |
2193 | if (mapping_cap_account_dirty(mapping)) { | |
c4843a75 | 2194 | mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY); |
b9ea2515 | 2195 | dec_zone_page_state(page, NR_FILE_DIRTY); |
91018134 | 2196 | dec_wb_stat(inode_to_wb(mapping->host), WB_RECLAIMABLE); |
b9ea2515 KK |
2197 | task_io_account_cancelled_write(PAGE_CACHE_SIZE); |
2198 | } | |
2199 | } | |
b9ea2515 | 2200 | |
1da177e4 LT |
2201 | /* |
2202 | * For address_spaces which do not use buffers. Just tag the page as dirty in | |
2203 | * its radix tree. | |
2204 | * | |
2205 | * This is also used when a single buffer is being dirtied: we want to set the | |
2206 | * page dirty in that case, but not all the buffers. This is a "bottom-up" | |
2207 | * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. | |
2208 | * | |
2d6d7f98 JW |
2209 | * The caller must ensure this doesn't race with truncation. Most will simply |
2210 | * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and | |
2211 | * the pte lock held, which also locks out truncation. | |
1da177e4 LT |
2212 | */ |
2213 | int __set_page_dirty_nobuffers(struct page *page) | |
2214 | { | |
c4843a75 GT |
2215 | struct mem_cgroup *memcg; |
2216 | ||
2217 | memcg = mem_cgroup_begin_page_stat(page); | |
1da177e4 LT |
2218 | if (!TestSetPageDirty(page)) { |
2219 | struct address_space *mapping = page_mapping(page); | |
a85d9df1 | 2220 | unsigned long flags; |
1da177e4 | 2221 | |
c4843a75 GT |
2222 | if (!mapping) { |
2223 | mem_cgroup_end_page_stat(memcg); | |
8c08540f | 2224 | return 1; |
c4843a75 | 2225 | } |
8c08540f | 2226 | |
a85d9df1 | 2227 | spin_lock_irqsave(&mapping->tree_lock, flags); |
2d6d7f98 JW |
2228 | BUG_ON(page_mapping(page) != mapping); |
2229 | WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); | |
c4843a75 | 2230 | account_page_dirtied(page, mapping, memcg); |
2d6d7f98 JW |
2231 | radix_tree_tag_set(&mapping->page_tree, page_index(page), |
2232 | PAGECACHE_TAG_DIRTY); | |
a85d9df1 | 2233 | spin_unlock_irqrestore(&mapping->tree_lock, flags); |
c4843a75 GT |
2234 | mem_cgroup_end_page_stat(memcg); |
2235 | ||
8c08540f AM |
2236 | if (mapping->host) { |
2237 | /* !PageAnon && !swapper_space */ | |
2238 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | |
1da177e4 | 2239 | } |
4741c9fd | 2240 | return 1; |
1da177e4 | 2241 | } |
c4843a75 | 2242 | mem_cgroup_end_page_stat(memcg); |
4741c9fd | 2243 | return 0; |
1da177e4 LT |
2244 | } |
2245 | EXPORT_SYMBOL(__set_page_dirty_nobuffers); | |
2246 | ||
2f800fbd WF |
2247 | /* |
2248 | * Call this whenever redirtying a page, to de-account the dirty counters | |
2249 | * (NR_DIRTIED, BDI_DIRTIED, tsk->nr_dirtied), so that they match the written | |
2250 | * counters (NR_WRITTEN, BDI_WRITTEN) in long term. The mismatches will lead to | |
2251 | * systematic errors in balanced_dirty_ratelimit and the dirty pages position | |
2252 | * control. | |
2253 | */ | |
2254 | void account_page_redirty(struct page *page) | |
2255 | { | |
2256 | struct address_space *mapping = page->mapping; | |
91018134 | 2257 | |
2f800fbd | 2258 | if (mapping && mapping_cap_account_dirty(mapping)) { |
91018134 TH |
2259 | struct bdi_writeback *wb = inode_to_wb(mapping->host); |
2260 | ||
2f800fbd WF |
2261 | current->nr_dirtied--; |
2262 | dec_zone_page_state(page, NR_DIRTIED); | |
91018134 | 2263 | dec_wb_stat(wb, WB_DIRTIED); |
2f800fbd WF |
2264 | } |
2265 | } | |
2266 | EXPORT_SYMBOL(account_page_redirty); | |
2267 | ||
1da177e4 LT |
2268 | /* |
2269 | * When a writepage implementation decides that it doesn't want to write this | |
2270 | * page for some reason, it should redirty the locked page via | |
2271 | * redirty_page_for_writepage() and it should then unlock the page and return 0 | |
2272 | */ | |
2273 | int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page) | |
2274 | { | |
8d38633c KK |
2275 | int ret; |
2276 | ||
1da177e4 | 2277 | wbc->pages_skipped++; |
8d38633c | 2278 | ret = __set_page_dirty_nobuffers(page); |
2f800fbd | 2279 | account_page_redirty(page); |
8d38633c | 2280 | return ret; |
1da177e4 LT |
2281 | } |
2282 | EXPORT_SYMBOL(redirty_page_for_writepage); | |
2283 | ||
2284 | /* | |
6746aff7 WF |
2285 | * Dirty a page. |
2286 | * | |
2287 | * For pages with a mapping this should be done under the page lock | |
2288 | * for the benefit of asynchronous memory errors who prefer a consistent | |
2289 | * dirty state. This rule can be broken in some special cases, | |
2290 | * but should be better not to. | |
2291 | * | |
1da177e4 LT |
2292 | * If the mapping doesn't provide a set_page_dirty a_op, then |
2293 | * just fall through and assume that it wants buffer_heads. | |
2294 | */ | |
1cf6e7d8 | 2295 | int set_page_dirty(struct page *page) |
1da177e4 LT |
2296 | { |
2297 | struct address_space *mapping = page_mapping(page); | |
2298 | ||
2299 | if (likely(mapping)) { | |
2300 | int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; | |
278df9f4 MK |
2301 | /* |
2302 | * readahead/lru_deactivate_page could remain | |
2303 | * PG_readahead/PG_reclaim due to race with end_page_writeback | |
2304 | * About readahead, if the page is written, the flags would be | |
2305 | * reset. So no problem. | |
2306 | * About lru_deactivate_page, if the page is redirty, the flag | |
2307 | * will be reset. So no problem. but if the page is used by readahead | |
2308 | * it will confuse readahead and make it restart the size rampup | |
2309 | * process. But it's a trivial problem. | |
2310 | */ | |
a4bb3ecd NH |
2311 | if (PageReclaim(page)) |
2312 | ClearPageReclaim(page); | |
9361401e DH |
2313 | #ifdef CONFIG_BLOCK |
2314 | if (!spd) | |
2315 | spd = __set_page_dirty_buffers; | |
2316 | #endif | |
2317 | return (*spd)(page); | |
1da177e4 | 2318 | } |
4741c9fd AM |
2319 | if (!PageDirty(page)) { |
2320 | if (!TestSetPageDirty(page)) | |
2321 | return 1; | |
2322 | } | |
1da177e4 LT |
2323 | return 0; |
2324 | } | |
2325 | EXPORT_SYMBOL(set_page_dirty); | |
2326 | ||
2327 | /* | |
2328 | * set_page_dirty() is racy if the caller has no reference against | |
2329 | * page->mapping->host, and if the page is unlocked. This is because another | |
2330 | * CPU could truncate the page off the mapping and then free the mapping. | |
2331 | * | |
2332 | * Usually, the page _is_ locked, or the caller is a user-space process which | |
2333 | * holds a reference on the inode by having an open file. | |
2334 | * | |
2335 | * In other cases, the page should be locked before running set_page_dirty(). | |
2336 | */ | |
2337 | int set_page_dirty_lock(struct page *page) | |
2338 | { | |
2339 | int ret; | |
2340 | ||
7eaceacc | 2341 | lock_page(page); |
1da177e4 LT |
2342 | ret = set_page_dirty(page); |
2343 | unlock_page(page); | |
2344 | return ret; | |
2345 | } | |
2346 | EXPORT_SYMBOL(set_page_dirty_lock); | |
2347 | ||
11f81bec TH |
2348 | /* |
2349 | * This cancels just the dirty bit on the kernel page itself, it does NOT | |
2350 | * actually remove dirty bits on any mmap's that may be around. It also | |
2351 | * leaves the page tagged dirty, so any sync activity will still find it on | |
2352 | * the dirty lists, and in particular, clear_page_dirty_for_io() will still | |
2353 | * look at the dirty bits in the VM. | |
2354 | * | |
2355 | * Doing this should *normally* only ever be done when a page is truncated, | |
2356 | * and is not actually mapped anywhere at all. However, fs/buffer.c does | |
2357 | * this when it notices that somebody has cleaned out all the buffers on a | |
2358 | * page without actually doing it through the VM. Can you say "ext3 is | |
2359 | * horribly ugly"? Thought you could. | |
2360 | */ | |
2361 | void cancel_dirty_page(struct page *page) | |
2362 | { | |
c4843a75 GT |
2363 | struct address_space *mapping = page_mapping(page); |
2364 | ||
2365 | if (mapping_cap_account_dirty(mapping)) { | |
2366 | struct mem_cgroup *memcg; | |
2367 | ||
2368 | memcg = mem_cgroup_begin_page_stat(page); | |
2369 | ||
2370 | if (TestClearPageDirty(page)) | |
2371 | account_page_cleaned(page, mapping, memcg); | |
2372 | ||
2373 | mem_cgroup_end_page_stat(memcg); | |
2374 | } else { | |
2375 | ClearPageDirty(page); | |
2376 | } | |
11f81bec TH |
2377 | } |
2378 | EXPORT_SYMBOL(cancel_dirty_page); | |
2379 | ||
1da177e4 LT |
2380 | /* |
2381 | * Clear a page's dirty flag, while caring for dirty memory accounting. | |
2382 | * Returns true if the page was previously dirty. | |
2383 | * | |
2384 | * This is for preparing to put the page under writeout. We leave the page | |
2385 | * tagged as dirty in the radix tree so that a concurrent write-for-sync | |
2386 | * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage | |
2387 | * implementation will run either set_page_writeback() or set_page_dirty(), | |
2388 | * at which stage we bring the page's dirty flag and radix-tree dirty tag | |
2389 | * back into sync. | |
2390 | * | |
2391 | * This incoherency between the page's dirty flag and radix-tree tag is | |
2392 | * unfortunate, but it only exists while the page is locked. | |
2393 | */ | |
2394 | int clear_page_dirty_for_io(struct page *page) | |
2395 | { | |
2396 | struct address_space *mapping = page_mapping(page); | |
c4843a75 GT |
2397 | struct mem_cgroup *memcg; |
2398 | int ret = 0; | |
1da177e4 | 2399 | |
79352894 NP |
2400 | BUG_ON(!PageLocked(page)); |
2401 | ||
7658cc28 LT |
2402 | if (mapping && mapping_cap_account_dirty(mapping)) { |
2403 | /* | |
2404 | * Yes, Virginia, this is indeed insane. | |
2405 | * | |
2406 | * We use this sequence to make sure that | |
2407 | * (a) we account for dirty stats properly | |
2408 | * (b) we tell the low-level filesystem to | |
2409 | * mark the whole page dirty if it was | |
2410 | * dirty in a pagetable. Only to then | |
2411 | * (c) clean the page again and return 1 to | |
2412 | * cause the writeback. | |
2413 | * | |
2414 | * This way we avoid all nasty races with the | |
2415 | * dirty bit in multiple places and clearing | |
2416 | * them concurrently from different threads. | |
2417 | * | |
2418 | * Note! Normally the "set_page_dirty(page)" | |
2419 | * has no effect on the actual dirty bit - since | |
2420 | * that will already usually be set. But we | |
2421 | * need the side effects, and it can help us | |
2422 | * avoid races. | |
2423 | * | |
2424 | * We basically use the page "master dirty bit" | |
2425 | * as a serialization point for all the different | |
2426 | * threads doing their things. | |
7658cc28 LT |
2427 | */ |
2428 | if (page_mkclean(page)) | |
2429 | set_page_dirty(page); | |
79352894 NP |
2430 | /* |
2431 | * We carefully synchronise fault handlers against | |
2432 | * installing a dirty pte and marking the page dirty | |
2d6d7f98 JW |
2433 | * at this point. We do this by having them hold the |
2434 | * page lock while dirtying the page, and pages are | |
2435 | * always locked coming in here, so we get the desired | |
2436 | * exclusion. | |
79352894 | 2437 | */ |
c4843a75 | 2438 | memcg = mem_cgroup_begin_page_stat(page); |
7658cc28 | 2439 | if (TestClearPageDirty(page)) { |
c4843a75 | 2440 | mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY); |
8c08540f | 2441 | dec_zone_page_state(page, NR_FILE_DIRTY); |
91018134 | 2442 | dec_wb_stat(inode_to_wb(mapping->host), WB_RECLAIMABLE); |
c4843a75 | 2443 | ret = 1; |
1da177e4 | 2444 | } |
c4843a75 GT |
2445 | mem_cgroup_end_page_stat(memcg); |
2446 | return ret; | |
1da177e4 | 2447 | } |
7658cc28 | 2448 | return TestClearPageDirty(page); |
1da177e4 | 2449 | } |
58bb01a9 | 2450 | EXPORT_SYMBOL(clear_page_dirty_for_io); |
1da177e4 LT |
2451 | |
2452 | int test_clear_page_writeback(struct page *page) | |
2453 | { | |
2454 | struct address_space *mapping = page_mapping(page); | |
d7365e78 | 2455 | struct mem_cgroup *memcg; |
d7365e78 | 2456 | int ret; |
1da177e4 | 2457 | |
6de22619 | 2458 | memcg = mem_cgroup_begin_page_stat(page); |
1da177e4 | 2459 | if (mapping) { |
91018134 TH |
2460 | struct inode *inode = mapping->host; |
2461 | struct backing_dev_info *bdi = inode_to_bdi(inode); | |
1da177e4 LT |
2462 | unsigned long flags; |
2463 | ||
19fd6231 | 2464 | spin_lock_irqsave(&mapping->tree_lock, flags); |
1da177e4 | 2465 | ret = TestClearPageWriteback(page); |
69cb51d1 | 2466 | if (ret) { |
1da177e4 LT |
2467 | radix_tree_tag_clear(&mapping->page_tree, |
2468 | page_index(page), | |
2469 | PAGECACHE_TAG_WRITEBACK); | |
e4ad08fe | 2470 | if (bdi_cap_account_writeback(bdi)) { |
91018134 TH |
2471 | struct bdi_writeback *wb = inode_to_wb(inode); |
2472 | ||
2473 | __dec_wb_stat(wb, WB_WRITEBACK); | |
2474 | __wb_writeout_inc(wb); | |
04fbfdc1 | 2475 | } |
69cb51d1 | 2476 | } |
19fd6231 | 2477 | spin_unlock_irqrestore(&mapping->tree_lock, flags); |
1da177e4 LT |
2478 | } else { |
2479 | ret = TestClearPageWriteback(page); | |
2480 | } | |
99b12e3d | 2481 | if (ret) { |
d7365e78 | 2482 | mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); |
d688abf5 | 2483 | dec_zone_page_state(page, NR_WRITEBACK); |
99b12e3d WF |
2484 | inc_zone_page_state(page, NR_WRITTEN); |
2485 | } | |
6de22619 | 2486 | mem_cgroup_end_page_stat(memcg); |
1da177e4 LT |
2487 | return ret; |
2488 | } | |
2489 | ||
1c8349a1 | 2490 | int __test_set_page_writeback(struct page *page, bool keep_write) |
1da177e4 LT |
2491 | { |
2492 | struct address_space *mapping = page_mapping(page); | |
d7365e78 | 2493 | struct mem_cgroup *memcg; |
d7365e78 | 2494 | int ret; |
1da177e4 | 2495 | |
6de22619 | 2496 | memcg = mem_cgroup_begin_page_stat(page); |
1da177e4 | 2497 | if (mapping) { |
91018134 TH |
2498 | struct inode *inode = mapping->host; |
2499 | struct backing_dev_info *bdi = inode_to_bdi(inode); | |
1da177e4 LT |
2500 | unsigned long flags; |
2501 | ||
19fd6231 | 2502 | spin_lock_irqsave(&mapping->tree_lock, flags); |
1da177e4 | 2503 | ret = TestSetPageWriteback(page); |
69cb51d1 | 2504 | if (!ret) { |
1da177e4 LT |
2505 | radix_tree_tag_set(&mapping->page_tree, |
2506 | page_index(page), | |
2507 | PAGECACHE_TAG_WRITEBACK); | |
e4ad08fe | 2508 | if (bdi_cap_account_writeback(bdi)) |
91018134 | 2509 | __inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK); |
69cb51d1 | 2510 | } |
1da177e4 LT |
2511 | if (!PageDirty(page)) |
2512 | radix_tree_tag_clear(&mapping->page_tree, | |
2513 | page_index(page), | |
2514 | PAGECACHE_TAG_DIRTY); | |
1c8349a1 NJ |
2515 | if (!keep_write) |
2516 | radix_tree_tag_clear(&mapping->page_tree, | |
2517 | page_index(page), | |
2518 | PAGECACHE_TAG_TOWRITE); | |
19fd6231 | 2519 | spin_unlock_irqrestore(&mapping->tree_lock, flags); |
1da177e4 LT |
2520 | } else { |
2521 | ret = TestSetPageWriteback(page); | |
2522 | } | |
3a3c02ec | 2523 | if (!ret) { |
d7365e78 | 2524 | mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); |
3a3c02ec JW |
2525 | inc_zone_page_state(page, NR_WRITEBACK); |
2526 | } | |
6de22619 | 2527 | mem_cgroup_end_page_stat(memcg); |
1da177e4 LT |
2528 | return ret; |
2529 | ||
2530 | } | |
1c8349a1 | 2531 | EXPORT_SYMBOL(__test_set_page_writeback); |
1da177e4 LT |
2532 | |
2533 | /* | |
00128188 | 2534 | * Return true if any of the pages in the mapping are marked with the |
1da177e4 LT |
2535 | * passed tag. |
2536 | */ | |
2537 | int mapping_tagged(struct address_space *mapping, int tag) | |
2538 | { | |
72c47832 | 2539 | return radix_tree_tagged(&mapping->page_tree, tag); |
1da177e4 LT |
2540 | } |
2541 | EXPORT_SYMBOL(mapping_tagged); | |
1d1d1a76 DW |
2542 | |
2543 | /** | |
2544 | * wait_for_stable_page() - wait for writeback to finish, if necessary. | |
2545 | * @page: The page to wait on. | |
2546 | * | |
2547 | * This function determines if the given page is related to a backing device | |
2548 | * that requires page contents to be held stable during writeback. If so, then | |
2549 | * it will wait for any pending writeback to complete. | |
2550 | */ | |
2551 | void wait_for_stable_page(struct page *page) | |
2552 | { | |
de1414a6 CH |
2553 | if (bdi_cap_stable_pages_required(inode_to_bdi(page->mapping->host))) |
2554 | wait_on_page_writeback(page); | |
1d1d1a76 DW |
2555 | } |
2556 | EXPORT_SYMBOL_GPL(wait_for_stable_page); |