Merge tag 'trace-v4.2-rc2-fix2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / mm / backing-dev.c
CommitLineData
3fcfab16
AM
1
2#include <linux/wait.h>
3#include <linux/backing-dev.h>
03ba3782
JA
4#include <linux/kthread.h>
5#include <linux/freezer.h>
3fcfab16 6#include <linux/fs.h>
26160158 7#include <linux/pagemap.h>
03ba3782 8#include <linux/mm.h>
3fcfab16
AM
9#include <linux/sched.h>
10#include <linux/module.h>
cf0ca9fe
PZ
11#include <linux/writeback.h>
12#include <linux/device.h>
455b2864 13#include <trace/events/writeback.h>
cf0ca9fe 14
c3c53206
JA
15static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
16
5129a469
JE
17struct backing_dev_info noop_backing_dev_info = {
18 .name = "noop",
976e48f8 19 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
5129a469 20};
a212b105 21EXPORT_SYMBOL_GPL(noop_backing_dev_info);
5129a469 22
cf0ca9fe 23static struct class *bdi_class;
cfc4ba53
JA
24
25/*
181387da 26 * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
cfc4ba53
JA
27 * locking.
28 */
03ba3782 29DEFINE_SPINLOCK(bdi_lock);
66f3b8e2 30LIST_HEAD(bdi_list);
03ba3782 31
839a8e86
TH
32/* bdi_wq serves all asynchronous writeback tasks */
33struct workqueue_struct *bdi_wq;
34
76f1418b
MS
35#ifdef CONFIG_DEBUG_FS
36#include <linux/debugfs.h>
37#include <linux/seq_file.h>
38
39static struct dentry *bdi_debug_root;
40
41static void bdi_debug_init(void)
42{
43 bdi_debug_root = debugfs_create_dir("bdi", NULL);
44}
45
46static int bdi_debug_stats_show(struct seq_file *m, void *v)
47{
48 struct backing_dev_info *bdi = m->private;
c1955ce3 49 struct bdi_writeback *wb = &bdi->wb;
364aeb28
DR
50 unsigned long background_thresh;
51 unsigned long dirty_thresh;
0d960a38 52 unsigned long wb_thresh;
0ae45f63 53 unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
f09b00d3
JA
54 struct inode *inode;
55
0ae45f63 56 nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
f758eeab 57 spin_lock(&wb->list_lock);
7ccf19a8 58 list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
c1955ce3 59 nr_dirty++;
7ccf19a8 60 list_for_each_entry(inode, &wb->b_io, i_wb_list)
c1955ce3 61 nr_io++;
7ccf19a8 62 list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
c1955ce3 63 nr_more_io++;
0ae45f63
TT
64 list_for_each_entry(inode, &wb->b_dirty_time, i_wb_list)
65 if (inode->i_state & I_DIRTY_TIME)
66 nr_dirty_time++;
f758eeab 67 spin_unlock(&wb->list_lock);
76f1418b 68
16c4042f 69 global_dirty_limits(&background_thresh, &dirty_thresh);
0d960a38 70 wb_thresh = wb_calc_thresh(wb, dirty_thresh);
76f1418b
MS
71
72#define K(x) ((x) << (PAGE_SHIFT - 10))
73 seq_printf(m,
00821b00
WF
74 "BdiWriteback: %10lu kB\n"
75 "BdiReclaimable: %10lu kB\n"
76 "BdiDirtyThresh: %10lu kB\n"
77 "DirtyThresh: %10lu kB\n"
78 "BackgroundThresh: %10lu kB\n"
c8e28ce0 79 "BdiDirtied: %10lu kB\n"
00821b00
WF
80 "BdiWritten: %10lu kB\n"
81 "BdiWriteBandwidth: %10lu kBps\n"
82 "b_dirty: %10lu\n"
83 "b_io: %10lu\n"
84 "b_more_io: %10lu\n"
0ae45f63 85 "b_dirty_time: %10lu\n"
00821b00
WF
86 "bdi_list: %10u\n"
87 "state: %10lx\n",
93f78d88
TH
88 (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
89 (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
0d960a38 90 K(wb_thresh),
f7d2b1ec
JK
91 K(dirty_thresh),
92 K(background_thresh),
93f78d88
TH
93 (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
94 (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
a88a341a 95 (unsigned long) K(wb->write_bandwidth),
f7d2b1ec
JK
96 nr_dirty,
97 nr_io,
98 nr_more_io,
0ae45f63 99 nr_dirty_time,
4452226e 100 !list_empty(&bdi->bdi_list), bdi->wb.state);
76f1418b
MS
101#undef K
102
103 return 0;
104}
105
106static int bdi_debug_stats_open(struct inode *inode, struct file *file)
107{
108 return single_open(file, bdi_debug_stats_show, inode->i_private);
109}
110
111static const struct file_operations bdi_debug_stats_fops = {
112 .open = bdi_debug_stats_open,
113 .read = seq_read,
114 .llseek = seq_lseek,
115 .release = single_release,
116};
117
118static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
119{
120 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
121 bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
122 bdi, &bdi_debug_stats_fops);
123}
124
125static void bdi_debug_unregister(struct backing_dev_info *bdi)
126{
127 debugfs_remove(bdi->debug_stats);
128 debugfs_remove(bdi->debug_dir);
129}
130#else
131static inline void bdi_debug_init(void)
132{
133}
134static inline void bdi_debug_register(struct backing_dev_info *bdi,
135 const char *name)
136{
137}
138static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
139{
140}
141#endif
142
cf0ca9fe
PZ
143static ssize_t read_ahead_kb_store(struct device *dev,
144 struct device_attribute *attr,
145 const char *buf, size_t count)
146{
147 struct backing_dev_info *bdi = dev_get_drvdata(dev);
cf0ca9fe 148 unsigned long read_ahead_kb;
7034ed13 149 ssize_t ret;
cf0ca9fe 150
7034ed13
NJ
151 ret = kstrtoul(buf, 10, &read_ahead_kb);
152 if (ret < 0)
153 return ret;
154
155 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
156
157 return count;
cf0ca9fe
PZ
158}
159
160#define K(pages) ((pages) << (PAGE_SHIFT - 10))
161
162#define BDI_SHOW(name, expr) \
163static ssize_t name##_show(struct device *dev, \
164 struct device_attribute *attr, char *page) \
165{ \
166 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
167 \
168 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
d9e1241e
GKH
169} \
170static DEVICE_ATTR_RW(name);
cf0ca9fe
PZ
171
172BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
173
189d3c4a
PZ
174static ssize_t min_ratio_store(struct device *dev,
175 struct device_attribute *attr, const char *buf, size_t count)
176{
177 struct backing_dev_info *bdi = dev_get_drvdata(dev);
189d3c4a 178 unsigned int ratio;
7034ed13
NJ
179 ssize_t ret;
180
181 ret = kstrtouint(buf, 10, &ratio);
182 if (ret < 0)
183 return ret;
184
185 ret = bdi_set_min_ratio(bdi, ratio);
186 if (!ret)
187 ret = count;
189d3c4a 188
189d3c4a
PZ
189 return ret;
190}
191BDI_SHOW(min_ratio, bdi->min_ratio)
192
a42dde04
PZ
193static ssize_t max_ratio_store(struct device *dev,
194 struct device_attribute *attr, const char *buf, size_t count)
195{
196 struct backing_dev_info *bdi = dev_get_drvdata(dev);
a42dde04 197 unsigned int ratio;
7034ed13
NJ
198 ssize_t ret;
199
200 ret = kstrtouint(buf, 10, &ratio);
201 if (ret < 0)
202 return ret;
203
204 ret = bdi_set_max_ratio(bdi, ratio);
205 if (!ret)
206 ret = count;
a42dde04 207
a42dde04
PZ
208 return ret;
209}
210BDI_SHOW(max_ratio, bdi->max_ratio)
211
7d311cda
DW
212static ssize_t stable_pages_required_show(struct device *dev,
213 struct device_attribute *attr,
214 char *page)
215{
216 struct backing_dev_info *bdi = dev_get_drvdata(dev);
217
218 return snprintf(page, PAGE_SIZE-1, "%d\n",
219 bdi_cap_stable_pages_required(bdi) ? 1 : 0);
220}
d9e1241e
GKH
221static DEVICE_ATTR_RO(stable_pages_required);
222
223static struct attribute *bdi_dev_attrs[] = {
224 &dev_attr_read_ahead_kb.attr,
225 &dev_attr_min_ratio.attr,
226 &dev_attr_max_ratio.attr,
227 &dev_attr_stable_pages_required.attr,
228 NULL,
cf0ca9fe 229};
d9e1241e 230ATTRIBUTE_GROUPS(bdi_dev);
cf0ca9fe
PZ
231
232static __init int bdi_class_init(void)
233{
234 bdi_class = class_create(THIS_MODULE, "bdi");
14421453
AB
235 if (IS_ERR(bdi_class))
236 return PTR_ERR(bdi_class);
237
d9e1241e 238 bdi_class->dev_groups = bdi_dev_groups;
76f1418b 239 bdi_debug_init();
cf0ca9fe
PZ
240 return 0;
241}
76f1418b 242postcore_initcall(bdi_class_init);
cf0ca9fe 243
26160158
JA
244static int __init default_bdi_init(void)
245{
246 int err;
247
839a8e86 248 bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
b5c872dd 249 WQ_UNBOUND | WQ_SYSFS, 0);
839a8e86
TH
250 if (!bdi_wq)
251 return -ENOMEM;
252
976e48f8 253 err = bdi_init(&noop_backing_dev_info);
26160158
JA
254
255 return err;
256}
257subsys_initcall(default_bdi_init);
258
6467716a 259/*
f0054bb1 260 * This function is used when the first inode for this wb is marked dirty. It
6467716a
AB
261 * wakes-up the corresponding bdi thread which should then take care of the
262 * periodic background write-out of dirty inodes. Since the write-out would
263 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
264 * set up a timer which wakes the bdi thread up later.
265 *
266 * Note, we wouldn't bother setting up the timer, but this function is on the
267 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
268 * by delaying the wake-up.
6ca738d6
DB
269 *
270 * We have to be careful not to postpone flush work if it is scheduled for
271 * earlier. Thus we use queue_delayed_work().
6467716a 272 */
f0054bb1 273void wb_wakeup_delayed(struct bdi_writeback *wb)
6467716a
AB
274{
275 unsigned long timeout;
276
277 timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
f0054bb1
TH
278 spin_lock_bh(&wb->work_lock);
279 if (test_bit(WB_registered, &wb->state))
280 queue_delayed_work(bdi_wq, &wb->dwork, timeout);
281 spin_unlock_bh(&wb->work_lock);
03ba3782
JA
282}
283
cfc4ba53 284/*
a88a341a 285 * Initial write bandwidth: 100 MB/s
cfc4ba53 286 */
a88a341a 287#define INIT_BW (100 << (20 - PAGE_SHIFT))
cfc4ba53 288
8395cd9f 289static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
a13f35e8 290 int blkcg_id, gfp_t gfp)
cf0ca9fe 291{
93f78d88 292 int i, err;
cf0ca9fe 293
6467716a 294 memset(wb, 0, sizeof(*wb));
f1d0b063 295
6467716a
AB
296 wb->bdi = bdi;
297 wb->last_old_flush = jiffies;
298 INIT_LIST_HEAD(&wb->b_dirty);
299 INIT_LIST_HEAD(&wb->b_io);
300 INIT_LIST_HEAD(&wb->b_more_io);
0ae45f63 301 INIT_LIST_HEAD(&wb->b_dirty_time);
f758eeab 302 spin_lock_init(&wb->list_lock);
66f3b8e2 303
a88a341a
TH
304 wb->bw_time_stamp = jiffies;
305 wb->balanced_dirty_ratelimit = INIT_BW;
306 wb->dirty_ratelimit = INIT_BW;
307 wb->write_bandwidth = INIT_BW;
308 wb->avg_write_bandwidth = INIT_BW;
cf0ca9fe 309
f0054bb1
TH
310 spin_lock_init(&wb->work_lock);
311 INIT_LIST_HEAD(&wb->work_list);
312 INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
c284de61 313
a13f35e8
TH
314 wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
315 if (!wb->congested)
316 return -ENOMEM;
317
8395cd9f 318 err = fprop_local_init_percpu(&wb->completions, gfp);
a88a341a 319 if (err)
a13f35e8 320 goto out_put_cong;
c284de61 321
93f78d88 322 for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
8395cd9f 323 err = percpu_counter_init(&wb->stat[i], 0, gfp);
a13f35e8
TH
324 if (err)
325 goto out_destroy_stat;
93f78d88 326 }
cf0ca9fe 327
93f78d88 328 return 0;
a13f35e8
TH
329
330out_destroy_stat:
331 while (--i)
332 percpu_counter_destroy(&wb->stat[i]);
333 fprop_local_destroy_percpu(&wb->completions);
334out_put_cong:
335 wb_congested_put(wb->congested);
336 return err;
cf0ca9fe 337}
cf0ca9fe 338
03ba3782
JA
339/*
340 * Remove bdi from the global list and shutdown any threads we have running
341 */
46100071 342static void wb_shutdown(struct bdi_writeback *wb)
66f3b8e2 343{
c4db59d3 344 /* Make sure nobody queues further work */
46100071
TH
345 spin_lock_bh(&wb->work_lock);
346 if (!test_and_clear_bit(WB_registered, &wb->state)) {
347 spin_unlock_bh(&wb->work_lock);
03ba3782 348 return;
c4db59d3 349 }
46100071 350 spin_unlock_bh(&wb->work_lock);
03ba3782
JA
351
352 /*
46100071
TH
353 * Drain work list and shutdown the delayed_work. !WB_registered
354 * tells wb_workfn() that @wb is dying and its work_list needs to
355 * be drained no matter what.
03ba3782 356 */
46100071
TH
357 mod_delayed_work(bdi_wq, &wb->dwork, 0);
358 flush_delayed_work(&wb->dwork);
359 WARN_ON(!list_empty(&wb->work_list));
360}
361
f0054bb1 362static void wb_exit(struct bdi_writeback *wb)
93f78d88
TH
363{
364 int i;
365
366 WARN_ON(delayed_work_pending(&wb->dwork));
367
368 for (i = 0; i < NR_WB_STAT_ITEMS; i++)
369 percpu_counter_destroy(&wb->stat[i]);
6467716a 370
a88a341a 371 fprop_local_destroy_percpu(&wb->completions);
a13f35e8 372 wb_congested_put(wb->congested);
a88a341a 373}
e98be2d5 374
52ebea74
TH
375#ifdef CONFIG_CGROUP_WRITEBACK
376
377#include <linux/memcontrol.h>
378
379/*
380 * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
381 * blkcg->cgwb_list, and memcg->cgwb_list. bdi->cgwb_tree is also RCU
382 * protected. cgwb_release_wait is used to wait for the completion of cgwb
383 * releases from bdi destruction path.
384 */
385static DEFINE_SPINLOCK(cgwb_lock);
386static DECLARE_WAIT_QUEUE_HEAD(cgwb_release_wait);
387
388/**
389 * wb_congested_get_create - get or create a wb_congested
390 * @bdi: associated bdi
391 * @blkcg_id: ID of the associated blkcg
392 * @gfp: allocation mask
393 *
394 * Look up the wb_congested for @blkcg_id on @bdi. If missing, create one.
395 * The returned wb_congested has its reference count incremented. Returns
396 * NULL on failure.
397 */
398struct bdi_writeback_congested *
399wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
400{
401 struct bdi_writeback_congested *new_congested = NULL, *congested;
402 struct rb_node **node, *parent;
403 unsigned long flags;
52ebea74
TH
404retry:
405 spin_lock_irqsave(&cgwb_lock, flags);
406
407 node = &bdi->cgwb_congested_tree.rb_node;
408 parent = NULL;
409
410 while (*node != NULL) {
411 parent = *node;
412 congested = container_of(parent, struct bdi_writeback_congested,
413 rb_node);
414 if (congested->blkcg_id < blkcg_id)
415 node = &parent->rb_left;
416 else if (congested->blkcg_id > blkcg_id)
417 node = &parent->rb_right;
418 else
419 goto found;
420 }
421
422 if (new_congested) {
423 /* !found and storage for new one already allocated, insert */
424 congested = new_congested;
425 new_congested = NULL;
426 rb_link_node(&congested->rb_node, parent, node);
427 rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
52ebea74
TH
428 goto found;
429 }
430
431 spin_unlock_irqrestore(&cgwb_lock, flags);
432
433 /* allocate storage for new one and retry */
434 new_congested = kzalloc(sizeof(*new_congested), gfp);
435 if (!new_congested)
436 return NULL;
437
438 atomic_set(&new_congested->refcnt, 0);
439 new_congested->bdi = bdi;
440 new_congested->blkcg_id = blkcg_id;
441 goto retry;
442
443found:
444 atomic_inc(&congested->refcnt);
445 spin_unlock_irqrestore(&cgwb_lock, flags);
446 kfree(new_congested);
447 return congested;
448}
449
450/**
451 * wb_congested_put - put a wb_congested
452 * @congested: wb_congested to put
453 *
454 * Put @congested and destroy it if the refcnt reaches zero.
455 */
456void wb_congested_put(struct bdi_writeback_congested *congested)
457{
52ebea74
TH
458 unsigned long flags;
459
52ebea74
TH
460 local_irq_save(flags);
461 if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
462 local_irq_restore(flags);
463 return;
464 }
465
a20135ff
TH
466 /* bdi might already have been destroyed leaving @congested unlinked */
467 if (congested->bdi) {
468 rb_erase(&congested->rb_node,
469 &congested->bdi->cgwb_congested_tree);
470 congested->bdi = NULL;
471 }
472
52ebea74
TH
473 spin_unlock_irqrestore(&cgwb_lock, flags);
474 kfree(congested);
52ebea74
TH
475}
476
477static void cgwb_release_workfn(struct work_struct *work)
478{
479 struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
480 release_work);
481 struct backing_dev_info *bdi = wb->bdi;
482
483 wb_shutdown(wb);
484
485 css_put(wb->memcg_css);
486 css_put(wb->blkcg_css);
52ebea74 487
841710aa 488 fprop_local_destroy_percpu(&wb->memcg_completions);
52ebea74
TH
489 percpu_ref_exit(&wb->refcnt);
490 wb_exit(wb);
491 kfree_rcu(wb, rcu);
492
493 if (atomic_dec_and_test(&bdi->usage_cnt))
494 wake_up_all(&cgwb_release_wait);
495}
496
497static void cgwb_release(struct percpu_ref *refcnt)
498{
499 struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
500 refcnt);
501 schedule_work(&wb->release_work);
502}
503
504static void cgwb_kill(struct bdi_writeback *wb)
505{
506 lockdep_assert_held(&cgwb_lock);
507
508 WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
509 list_del(&wb->memcg_node);
510 list_del(&wb->blkcg_node);
511 percpu_ref_kill(&wb->refcnt);
512}
513
514static int cgwb_create(struct backing_dev_info *bdi,
515 struct cgroup_subsys_state *memcg_css, gfp_t gfp)
516{
517 struct mem_cgroup *memcg;
518 struct cgroup_subsys_state *blkcg_css;
519 struct blkcg *blkcg;
520 struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
521 struct bdi_writeback *wb;
522 unsigned long flags;
523 int ret = 0;
524
525 memcg = mem_cgroup_from_css(memcg_css);
526 blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &blkio_cgrp_subsys);
527 blkcg = css_to_blkcg(blkcg_css);
528 memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
529 blkcg_cgwb_list = &blkcg->cgwb_list;
530
531 /* look up again under lock and discard on blkcg mismatch */
532 spin_lock_irqsave(&cgwb_lock, flags);
533 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
534 if (wb && wb->blkcg_css != blkcg_css) {
535 cgwb_kill(wb);
536 wb = NULL;
537 }
538 spin_unlock_irqrestore(&cgwb_lock, flags);
539 if (wb)
540 goto out_put;
541
542 /* need to create a new one */
543 wb = kmalloc(sizeof(*wb), gfp);
544 if (!wb)
545 return -ENOMEM;
546
a13f35e8 547 ret = wb_init(wb, bdi, blkcg_css->id, gfp);
52ebea74
TH
548 if (ret)
549 goto err_free;
550
551 ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
552 if (ret)
553 goto err_wb_exit;
554
841710aa
TH
555 ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
556 if (ret)
557 goto err_ref_exit;
558
52ebea74
TH
559 wb->memcg_css = memcg_css;
560 wb->blkcg_css = blkcg_css;
561 INIT_WORK(&wb->release_work, cgwb_release_workfn);
562 set_bit(WB_registered, &wb->state);
03ba3782
JA
563
564 /*
52ebea74
TH
565 * The root wb determines the registered state of the whole bdi and
566 * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
567 * whether they're still online. Don't link @wb if any is dead.
568 * See wb_memcg_offline() and wb_blkcg_offline().
03ba3782 569 */
52ebea74
TH
570 ret = -ENODEV;
571 spin_lock_irqsave(&cgwb_lock, flags);
572 if (test_bit(WB_registered, &bdi->wb.state) &&
573 blkcg_cgwb_list->next && memcg_cgwb_list->next) {
574 /* we might have raced another instance of this function */
575 ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
576 if (!ret) {
577 atomic_inc(&bdi->usage_cnt);
578 list_add(&wb->memcg_node, memcg_cgwb_list);
579 list_add(&wb->blkcg_node, blkcg_cgwb_list);
580 css_get(memcg_css);
581 css_get(blkcg_css);
582 }
583 }
584 spin_unlock_irqrestore(&cgwb_lock, flags);
585 if (ret) {
586 if (ret == -EEXIST)
587 ret = 0;
a13f35e8 588 goto err_fprop_exit;
52ebea74
TH
589 }
590 goto out_put;
591
841710aa
TH
592err_fprop_exit:
593 fprop_local_destroy_percpu(&wb->memcg_completions);
52ebea74
TH
594err_ref_exit:
595 percpu_ref_exit(&wb->refcnt);
596err_wb_exit:
597 wb_exit(wb);
598err_free:
599 kfree(wb);
600out_put:
601 css_put(blkcg_css);
602 return ret;
66f3b8e2
JA
603}
604
52ebea74
TH
605/**
606 * wb_get_create - get wb for a given memcg, create if necessary
607 * @bdi: target bdi
608 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
609 * @gfp: allocation mask to use
610 *
611 * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to
612 * create one. The returned wb has its refcount incremented.
613 *
614 * This function uses css_get() on @memcg_css and thus expects its refcnt
615 * to be positive on invocation. IOW, rcu_read_lock() protection on
616 * @memcg_css isn't enough. try_get it before calling this function.
617 *
618 * A wb is keyed by its associated memcg. As blkcg implicitly enables
619 * memcg on the default hierarchy, memcg association is guaranteed to be
620 * more specific (equal or descendant to the associated blkcg) and thus can
621 * identify both the memcg and blkcg associations.
622 *
623 * Because the blkcg associated with a memcg may change as blkcg is enabled
624 * and disabled closer to root in the hierarchy, each wb keeps track of
625 * both the memcg and blkcg associated with it and verifies the blkcg on
626 * each lookup. On mismatch, the existing wb is discarded and a new one is
627 * created.
628 */
629struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
630 struct cgroup_subsys_state *memcg_css,
631 gfp_t gfp)
6467716a 632{
52ebea74
TH
633 struct bdi_writeback *wb;
634
635 might_sleep_if(gfp & __GFP_WAIT);
636
637 if (!memcg_css->parent)
638 return &bdi->wb;
639
640 do {
641 rcu_read_lock();
642 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
643 if (wb) {
644 struct cgroup_subsys_state *blkcg_css;
645
646 /* see whether the blkcg association has changed */
647 blkcg_css = cgroup_get_e_css(memcg_css->cgroup,
648 &blkio_cgrp_subsys);
649 if (unlikely(wb->blkcg_css != blkcg_css ||
650 !wb_tryget(wb)))
651 wb = NULL;
652 css_put(blkcg_css);
653 }
654 rcu_read_unlock();
655 } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
656
657 return wb;
658}
6467716a 659
a13f35e8 660static int cgwb_bdi_init(struct backing_dev_info *bdi)
52ebea74 661{
a13f35e8
TH
662 int ret;
663
52ebea74
TH
664 INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
665 bdi->cgwb_congested_tree = RB_ROOT;
666 atomic_set(&bdi->usage_cnt, 1);
a13f35e8
TH
667
668 ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
669 if (!ret) {
670 bdi->wb.memcg_css = mem_cgroup_root_css;
671 bdi->wb.blkcg_css = blkcg_root_css;
672 }
673 return ret;
6467716a
AB
674}
675
52ebea74
TH
676static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
677{
678 struct radix_tree_iter iter;
a20135ff 679 struct bdi_writeback_congested *congested, *congested_n;
52ebea74
TH
680 void **slot;
681
682 WARN_ON(test_bit(WB_registered, &bdi->wb.state));
683
684 spin_lock_irq(&cgwb_lock);
a20135ff 685
52ebea74
TH
686 radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
687 cgwb_kill(*slot);
a20135ff
TH
688
689 rbtree_postorder_for_each_entry_safe(congested, congested_n,
690 &bdi->cgwb_congested_tree, rb_node) {
691 rb_erase(&congested->rb_node, &bdi->cgwb_congested_tree);
692 congested->bdi = NULL; /* mark @congested unlinked */
693 }
694
52ebea74
TH
695 spin_unlock_irq(&cgwb_lock);
696
697 /*
698 * All cgwb's and their congested states must be shutdown and
699 * released before returning. Drain the usage counter to wait for
700 * all cgwb's and cgwb_congested's ever created on @bdi.
701 */
702 atomic_dec(&bdi->usage_cnt);
703 wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt));
704}
705
706/**
707 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
708 * @memcg: memcg being offlined
709 *
710 * Also prevents creation of any new wb's associated with @memcg.
e98be2d5 711 */
52ebea74
TH
712void wb_memcg_offline(struct mem_cgroup *memcg)
713{
714 LIST_HEAD(to_destroy);
715 struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
716 struct bdi_writeback *wb, *next;
717
718 spin_lock_irq(&cgwb_lock);
719 list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
720 cgwb_kill(wb);
721 memcg_cgwb_list->next = NULL; /* prevent new wb's */
722 spin_unlock_irq(&cgwb_lock);
723}
724
725/**
726 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
727 * @blkcg: blkcg being offlined
728 *
729 * Also prevents creation of any new wb's associated with @blkcg.
730 */
731void wb_blkcg_offline(struct blkcg *blkcg)
732{
733 LIST_HEAD(to_destroy);
734 struct bdi_writeback *wb, *next;
735
736 spin_lock_irq(&cgwb_lock);
737 list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
738 cgwb_kill(wb);
739 blkcg->cgwb_list.next = NULL; /* prevent new wb's */
740 spin_unlock_irq(&cgwb_lock);
741}
742
743#else /* CONFIG_CGROUP_WRITEBACK */
744
a13f35e8
TH
745static int cgwb_bdi_init(struct backing_dev_info *bdi)
746{
747 int err;
748
749 bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
750 if (!bdi->wb_congested)
751 return -ENOMEM;
752
753 err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
754 if (err) {
755 kfree(bdi->wb_congested);
756 return err;
757 }
758 return 0;
759}
760
52ebea74
TH
761static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { }
762
763#endif /* CONFIG_CGROUP_WRITEBACK */
e98be2d5 764
b2e8fb6e
PZ
765int bdi_init(struct backing_dev_info *bdi)
766{
cf0ca9fe
PZ
767 bdi->dev = NULL;
768
189d3c4a 769 bdi->min_ratio = 0;
a42dde04 770 bdi->max_ratio = 100;
eb608e3a 771 bdi->max_prop_frac = FPROP_FRAC_BASE;
66f3b8e2 772 INIT_LIST_HEAD(&bdi->bdi_list);
cc395d7f 773 init_waitqueue_head(&bdi->wb_waitq);
03ba3782 774
a13f35e8 775 return cgwb_bdi_init(bdi);
b2e8fb6e
PZ
776}
777EXPORT_SYMBOL(bdi_init);
e98be2d5 778
46100071
TH
779int bdi_register(struct backing_dev_info *bdi, struct device *parent,
780 const char *fmt, ...)
781{
782 va_list args;
783 struct device *dev;
e98be2d5 784
46100071
TH
785 if (bdi->dev) /* The driver needs to use separate queues per device */
786 return 0;
e98be2d5 787
46100071
TH
788 va_start(args, fmt);
789 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
790 va_end(args);
791 if (IS_ERR(dev))
792 return PTR_ERR(dev);
04fbfdc1 793
46100071 794 bdi->dev = dev;
b2e8fb6e 795
46100071
TH
796 bdi_debug_register(bdi, dev_name(dev));
797 set_bit(WB_registered, &bdi->wb.state);
798
799 spin_lock_bh(&bdi_lock);
800 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
801 spin_unlock_bh(&bdi_lock);
802
803 trace_writeback_bdi_register(bdi);
804 return 0;
b2e8fb6e 805}
46100071 806EXPORT_SYMBOL(bdi_register);
b2e8fb6e 807
46100071 808int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
b2e8fb6e 809{
46100071
TH
810 return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
811}
812EXPORT_SYMBOL(bdi_register_dev);
813
814/*
815 * Remove bdi from bdi_list, and ensure that it is no longer visible
816 */
817static void bdi_remove_from_list(struct backing_dev_info *bdi)
818{
819 spin_lock_bh(&bdi_lock);
820 list_del_rcu(&bdi->bdi_list);
821 spin_unlock_bh(&bdi_lock);
b2e8fb6e 822
46100071
TH
823 synchronize_rcu_expedited();
824}
cf0ca9fe 825
b2e8fb6e
PZ
826void bdi_destroy(struct backing_dev_info *bdi)
827{
f0054bb1
TH
828 /* make sure nobody finds us on the bdi_list anymore */
829 bdi_remove_from_list(bdi);
830 wb_shutdown(&bdi->wb);
52ebea74 831 cgwb_bdi_destroy(bdi);
7a401a97 832
c4db59d3
CH
833 if (bdi->dev) {
834 bdi_debug_unregister(bdi);
835 device_unregister(bdi->dev);
836 bdi->dev = NULL;
837 }
838
f0054bb1 839 wb_exit(&bdi->wb);
b2e8fb6e
PZ
840}
841EXPORT_SYMBOL(bdi_destroy);
842
c3c53206
JA
843/*
844 * For use from filesystems to quickly init and register a bdi associated
845 * with dirty writeback
846 */
b4caecd4 847int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
c3c53206 848{
c3c53206
JA
849 int err;
850
851 bdi->name = name;
b4caecd4 852 bdi->capabilities = 0;
c3c53206
JA
853 err = bdi_init(bdi);
854 if (err)
855 return err;
856
02aa2a37
KC
857 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
858 atomic_long_inc_return(&bdi_seq));
c3c53206
JA
859 if (err) {
860 bdi_destroy(bdi);
861 return err;
862 }
863
864 return 0;
865}
866EXPORT_SYMBOL(bdi_setup_and_register);
867
3fcfab16
AM
868static wait_queue_head_t congestion_wqh[2] = {
869 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
870 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
871 };
ec8a6f26 872static atomic_t nr_wb_congested[2];
3fcfab16 873
ec8a6f26 874void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
3fcfab16 875{
1faa16d2 876 wait_queue_head_t *wqh = &congestion_wqh[sync];
ec8a6f26 877 enum wb_state bit;
3fcfab16 878
4452226e 879 bit = sync ? WB_sync_congested : WB_async_congested;
ec8a6f26
TH
880 if (test_and_clear_bit(bit, &congested->state))
881 atomic_dec(&nr_wb_congested[sync]);
4e857c58 882 smp_mb__after_atomic();
3fcfab16
AM
883 if (waitqueue_active(wqh))
884 wake_up(wqh);
885}
ec8a6f26 886EXPORT_SYMBOL(clear_wb_congested);
3fcfab16 887
ec8a6f26 888void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
3fcfab16 889{
4452226e 890 enum wb_state bit;
3fcfab16 891
4452226e 892 bit = sync ? WB_sync_congested : WB_async_congested;
ec8a6f26
TH
893 if (!test_and_set_bit(bit, &congested->state))
894 atomic_inc(&nr_wb_congested[sync]);
3fcfab16 895}
ec8a6f26 896EXPORT_SYMBOL(set_wb_congested);
3fcfab16
AM
897
898/**
899 * congestion_wait - wait for a backing_dev to become uncongested
8aa7e847 900 * @sync: SYNC or ASYNC IO
3fcfab16
AM
901 * @timeout: timeout in jiffies
902 *
903 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
904 * write congestion. If no backing_devs are congested then just wait for the
905 * next write to be completed.
906 */
8aa7e847 907long congestion_wait(int sync, long timeout)
3fcfab16
AM
908{
909 long ret;
52bb9198 910 unsigned long start = jiffies;
3fcfab16 911 DEFINE_WAIT(wait);
8aa7e847 912 wait_queue_head_t *wqh = &congestion_wqh[sync];
3fcfab16
AM
913
914 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
915 ret = io_schedule_timeout(timeout);
916 finish_wait(wqh, &wait);
52bb9198
MG
917
918 trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
919 jiffies_to_usecs(jiffies - start));
920
3fcfab16
AM
921 return ret;
922}
923EXPORT_SYMBOL(congestion_wait);
04fbfdc1 924
0e093d99
MG
925/**
926 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
927 * @zone: A zone to check if it is heavily congested
928 * @sync: SYNC or ASYNC IO
929 * @timeout: timeout in jiffies
930 *
931 * In the event of a congested backing_dev (any backing_dev) and the given
932 * @zone has experienced recent congestion, this waits for up to @timeout
933 * jiffies for either a BDI to exit congestion of the given @sync queue
934 * or a write to complete.
935 *
25985edc 936 * In the absence of zone congestion, cond_resched() is called to yield
0e093d99
MG
937 * the processor if necessary but otherwise does not sleep.
938 *
939 * The return value is 0 if the sleep is for the full timeout. Otherwise,
940 * it is the number of jiffies that were still remaining when the function
941 * returned. return_value == timeout implies the function did not sleep.
942 */
943long wait_iff_congested(struct zone *zone, int sync, long timeout)
944{
945 long ret;
946 unsigned long start = jiffies;
947 DEFINE_WAIT(wait);
948 wait_queue_head_t *wqh = &congestion_wqh[sync];
949
950 /*
951 * If there is no congestion, or heavy congestion is not being
952 * encountered in the current zone, yield if necessary instead
953 * of sleeping on the congestion queue
954 */
ec8a6f26 955 if (atomic_read(&nr_wb_congested[sync]) == 0 ||
57054651 956 !test_bit(ZONE_CONGESTED, &zone->flags)) {
0e093d99
MG
957 cond_resched();
958
959 /* In case we scheduled, work out time remaining */
960 ret = timeout - (jiffies - start);
961 if (ret < 0)
962 ret = 0;
963
964 goto out;
965 }
966
967 /* Sleep until uncongested or a write happens */
968 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
969 ret = io_schedule_timeout(timeout);
970 finish_wait(wqh, &wait);
971
972out:
973 trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
974 jiffies_to_usecs(jiffies - start));
975
976 return ret;
977}
978EXPORT_SYMBOL(wait_iff_congested);
3965c9ae
WL
979
980int pdflush_proc_obsolete(struct ctl_table *table, int write,
981 void __user *buffer, size_t *lenp, loff_t *ppos)
982{
983 char kbuf[] = "0\n";
984
4c3bffc2 985 if (*ppos || *lenp < sizeof(kbuf)) {
3965c9ae
WL
986 *lenp = 0;
987 return 0;
988 }
989
990 if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
991 return -EFAULT;
992 printk_once(KERN_WARNING "%s exported in /proc is scheduled for removal\n",
993 table->procname);
994
995 *lenp = 2;
996 *ppos += *lenp;
997 return 2;
998}