Merge tag 'mips_5.18_1' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux
[linux-block.git] / include / linux / backing-dev.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2/*
3 * include/linux/backing-dev.h
4 *
5 * low-level device information and state which is propagated up through
6 * to high-level code.
7 */
8
9#ifndef _LINUX_BACKING_DEV_H
10#define _LINUX_BACKING_DEV_H
11
cf0ca9fe 12#include <linux/kernel.h>
e4ad08fe 13#include <linux/fs.h>
03ba3782 14#include <linux/sched.h>
68f23b89 15#include <linux/device.h>
03ba3782 16#include <linux/writeback.h>
66114cad 17#include <linux/backing-dev-defs.h>
a13f35e8 18#include <linux/slab.h>
de1414a6 19
e41d12f5
CH
20struct blkcg;
21
d03f6cdc
JK
22static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
23{
24 kref_get(&bdi->refcnt);
25 return bdi;
26}
27
34f8fe50 28struct backing_dev_info *bdi_get_by_id(u64 id);
d03f6cdc 29void bdi_put(struct backing_dev_info *bdi);
b2e8fb6e 30
7c4cc300
JK
31__printf(2, 3)
32int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
a93f00b3 33__printf(2, 0)
7c4cc300
JK
34int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
35 va_list args);
3c5d202b 36void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner);
b02176f3
TH
37void bdi_unregister(struct backing_dev_info *bdi);
38
aef33c2f 39struct backing_dev_info *bdi_alloc(int node_id);
b02176f3 40
9ecf4866 41void wb_start_background_writeback(struct bdi_writeback *wb);
f0054bb1 42void wb_workfn(struct work_struct *work);
f0054bb1 43void wb_wakeup_delayed(struct bdi_writeback *wb);
cf0ca9fe 44
5b9cce4c
TH
45void wb_wait_for_completion(struct wb_completion *done);
46
03ba3782 47extern spinlock_t bdi_lock;
66f3b8e2
JA
48extern struct list_head bdi_list;
49
839a8e86 50extern struct workqueue_struct *bdi_wq;
d3f77dfd 51extern struct workqueue_struct *bdi_async_bio_wq;
839a8e86 52
d6c10f1f 53static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
03ba3782 54{
d6c10f1f 55 return test_bit(WB_has_dirty_io, &wb->state);
03ba3782
JA
56}
57
95a46c65
TH
58static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
59{
60 /*
61 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
62 * any dirty wbs. See wb_update_write_bandwidth().
63 */
64 return atomic_long_read(&bdi->tot_write_bandwidth);
03ba3782
JA
65}
66
bd3488e7 67static inline void wb_stat_mod(struct bdi_writeback *wb,
93f78d88 68 enum wb_stat_item item, s64 amount)
b2e8fb6e 69{
104b4e51 70 percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
b2e8fb6e
PZ
71}
72
93f78d88 73static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
b2e8fb6e 74{
bd3488e7 75 wb_stat_mod(wb, item, 1);
b2e8fb6e
PZ
76}
77
93f78d88 78static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
b2e8fb6e 79{
bd3488e7 80 wb_stat_mod(wb, item, -1);
b2e8fb6e
PZ
81}
82
93f78d88 83static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
b2e8fb6e 84{
93f78d88 85 return percpu_counter_read_positive(&wb->stat[item]);
b2e8fb6e
PZ
86}
87
93f78d88 88static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
e0bf68dd 89{
e3d3910a 90 return percpu_counter_sum_positive(&wb->stat[item]);
e0bf68dd
PZ
91}
92
93f78d88 93extern void wb_writeout_inc(struct bdi_writeback *wb);
dd5656e5 94
b2e8fb6e
PZ
95/*
96 * maximal error of a stat counter.
97 */
2bce774e 98static inline unsigned long wb_stat_error(void)
e0bf68dd 99{
b2e8fb6e 100#ifdef CONFIG_SMP
93f78d88 101 return nr_cpu_ids * WB_STAT_BATCH;
b2e8fb6e
PZ
102#else
103 return 1;
104#endif
e0bf68dd 105}
1da177e4 106
189d3c4a 107int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
a42dde04 108int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
189d3c4a 109
1da177e4
LT
110/*
111 * Flags in backing_dev_info::capability
e4ad08fe 112 *
f56753ac
CH
113 * BDI_CAP_WRITEBACK: Supports dirty page writeback, and dirty pages
114 * should contribute to accounting
115 * BDI_CAP_WRITEBACK_ACCT: Automatically account writeback pages
116 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold
1da177e4 117 */
f56753ac
CH
118#define BDI_CAP_WRITEBACK (1 << 0)
119#define BDI_CAP_WRITEBACK_ACCT (1 << 1)
120#define BDI_CAP_STRICTLIMIT (1 << 2)
e4ad08fe 121
5129a469 122extern struct backing_dev_info noop_backing_dev_info;
1da177e4 123
bc05873d
TH
124/**
125 * writeback_in_progress - determine whether there is writeback in progress
126 * @wb: bdi_writeback of interest
127 *
128 * Determine whether there is writeback waiting to be handled against a
129 * bdi_writeback.
130 */
131static inline bool writeback_in_progress(struct bdi_writeback *wb)
1da177e4 132{
bc05873d 133 return test_bit(WB_writeback_running, &wb->state);
1da177e4
LT
134}
135
ccdf7741 136struct backing_dev_info *inode_to_bdi(struct inode *inode);
1da177e4 137
f56753ac 138static inline bool mapping_can_writeback(struct address_space *mapping)
e4ad08fe 139{
f56753ac 140 return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK;
e4ad08fe 141}
1da177e4 142
03ba3782
JA
143static inline int bdi_sched_wait(void *word)
144{
145 schedule();
146 return 0;
147}
148
89e9b9e0
TH
149#ifdef CONFIG_CGROUP_WRITEBACK
150
ed288dc0
TH
151struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
152 struct cgroup_subsys_state *memcg_css);
52ebea74
TH
153struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
154 struct cgroup_subsys_state *memcg_css,
155 gfp_t gfp);
52ebea74
TH
156void wb_memcg_offline(struct mem_cgroup *memcg);
157void wb_blkcg_offline(struct blkcg *blkcg);
158
89e9b9e0
TH
159/**
160 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
161 * @inode: inode of interest
162 *
ed7b6b4f
CH
163 * Cgroup writeback requires support from the filesystem. Also, both memcg and
164 * iocg have to be on the default hierarchy. Test whether all conditions are
165 * met.
9badce00
TH
166 *
167 * Note that the test result may change dynamically on the same inode
168 * depending on how memcg and iocg are configured.
89e9b9e0
TH
169 */
170static inline bool inode_cgwb_enabled(struct inode *inode)
171{
172 struct backing_dev_info *bdi = inode_to_bdi(inode);
173
c0522908
TH
174 return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
175 cgroup_subsys_on_dfl(io_cgrp_subsys) &&
f56753ac 176 (bdi->capabilities & BDI_CAP_WRITEBACK) &&
46b15caa 177 (inode->i_sb->s_iflags & SB_I_CGROUPWB);
89e9b9e0
TH
178}
179
52ebea74
TH
180/**
181 * wb_find_current - find wb for %current on a bdi
182 * @bdi: bdi of interest
183 *
184 * Find the wb of @bdi which matches both the memcg and blkcg of %current.
185 * Must be called under rcu_read_lock() which protects the returend wb.
186 * NULL if not found.
187 */
188static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
189{
190 struct cgroup_subsys_state *memcg_css;
191 struct bdi_writeback *wb;
192
193 memcg_css = task_css(current, memory_cgrp_id);
194 if (!memcg_css->parent)
195 return &bdi->wb;
196
197 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
198
199 /*
200 * %current's blkcg equals the effective blkcg of its memcg. No
201 * need to use the relatively expensive cgroup_get_e_css().
202 */
c165b3e3 203 if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
52ebea74
TH
204 return wb;
205 return NULL;
206}
207
208/**
209 * wb_get_create_current - get or create wb for %current on a bdi
210 * @bdi: bdi of interest
211 * @gfp: allocation mask
212 *
213 * Equivalent to wb_get_create() on %current's memcg. This function is
214 * called from a relatively hot path and optimizes the common cases using
215 * wb_find_current().
216 */
217static inline struct bdi_writeback *
218wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
219{
220 struct bdi_writeback *wb;
221
222 rcu_read_lock();
223 wb = wb_find_current(bdi);
224 if (wb && unlikely(!wb_tryget(wb)))
225 wb = NULL;
226 rcu_read_unlock();
227
228 if (unlikely(!wb)) {
229 struct cgroup_subsys_state *memcg_css;
230
231 memcg_css = task_get_css(current, memory_cgrp_id);
232 wb = wb_get_create(bdi, memcg_css, gfp);
233 css_put(memcg_css);
234 }
235 return wb;
236}
237
aaa2cacf
TH
238/**
239 * inode_to_wb_is_valid - test whether an inode has a wb associated
240 * @inode: inode of interest
241 *
242 * Returns %true if @inode has a wb associated. May be called without any
243 * locking.
244 */
245static inline bool inode_to_wb_is_valid(struct inode *inode)
246{
247 return inode->i_wb;
248}
249
52ebea74
TH
250/**
251 * inode_to_wb - determine the wb of an inode
252 * @inode: inode of interest
253 *
aaa2cacf 254 * Returns the wb @inode is currently associated with. The caller must be
b93b0163 255 * holding either @inode->i_lock, the i_pages lock, or the
aaa2cacf 256 * associated wb's list_lock.
52ebea74 257 */
05b93801 258static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
52ebea74 259{
aaa2cacf
TH
260#ifdef CONFIG_LOCKDEP
261 WARN_ON_ONCE(debug_locks &&
262 (!lockdep_is_held(&inode->i_lock) &&
b93b0163 263 !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
aaa2cacf
TH
264 !lockdep_is_held(&inode->i_wb->list_lock)));
265#endif
52ebea74
TH
266 return inode->i_wb;
267}
268
fee468fd
JK
269static inline struct bdi_writeback *inode_to_wb_wbc(
270 struct inode *inode,
271 struct writeback_control *wbc)
272{
273 /*
274 * If wbc does not have inode attached, it means cgroup writeback was
275 * disabled when wbc started. Just use the default wb in that case.
276 */
277 return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb;
278}
279
682aa8e1
TH
280/**
281 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
282 * @inode: target inode
2e898e4c 283 * @cookie: output param, to be passed to the end function
682aa8e1
TH
284 *
285 * The caller wants to access the wb associated with @inode but isn't
b93b0163 286 * holding inode->i_lock, the i_pages lock or wb->list_lock. This
682aa8e1
TH
287 * function determines the wb associated with @inode and ensures that the
288 * association doesn't change until the transaction is finished with
289 * unlocked_inode_to_wb_end().
290 *
2e898e4c
GT
291 * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
292 * can't sleep during the transaction. IRQs may or may not be disabled on
293 * return.
682aa8e1
TH
294 */
295static inline struct bdi_writeback *
2e898e4c 296unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
682aa8e1
TH
297{
298 rcu_read_lock();
299
300 /*
a9519def 301 * Paired with store_release in inode_switch_wbs_work_fn() and
682aa8e1
TH
302 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
303 */
2e898e4c 304 cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
682aa8e1 305
2e898e4c
GT
306 if (unlikely(cookie->locked))
307 xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
aaa2cacf
TH
308
309 /*
b93b0163
MW
310 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
311 * lock. inode_to_wb() will bark. Deref directly.
aaa2cacf
TH
312 */
313 return inode->i_wb;
682aa8e1
TH
314}
315
316/**
317 * unlocked_inode_to_wb_end - end inode wb access transaction
318 * @inode: target inode
2e898e4c 319 * @cookie: @cookie from unlocked_inode_to_wb_begin()
682aa8e1 320 */
2e898e4c
GT
321static inline void unlocked_inode_to_wb_end(struct inode *inode,
322 struct wb_lock_cookie *cookie)
682aa8e1 323{
2e898e4c
GT
324 if (unlikely(cookie->locked))
325 xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags);
682aa8e1
TH
326
327 rcu_read_unlock();
328}
329
89e9b9e0
TH
330#else /* CONFIG_CGROUP_WRITEBACK */
331
332static inline bool inode_cgwb_enabled(struct inode *inode)
333{
334 return false;
335}
336
52ebea74
TH
337static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
338{
339 return &bdi->wb;
340}
341
342static inline struct bdi_writeback *
343wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
344{
345 return &bdi->wb;
346}
347
aaa2cacf
TH
348static inline bool inode_to_wb_is_valid(struct inode *inode)
349{
350 return true;
351}
352
52ebea74
TH
353static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
354{
355 return &inode_to_bdi(inode)->wb;
356}
357
fee468fd
JK
358static inline struct bdi_writeback *inode_to_wb_wbc(
359 struct inode *inode,
360 struct writeback_control *wbc)
361{
362 return inode_to_wb(inode);
363}
364
365
682aa8e1 366static inline struct bdi_writeback *
2e898e4c 367unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
682aa8e1
TH
368{
369 return inode_to_wb(inode);
370}
371
2e898e4c
GT
372static inline void unlocked_inode_to_wb_end(struct inode *inode,
373 struct wb_lock_cookie *cookie)
682aa8e1
TH
374{
375}
376
52ebea74
TH
377static inline void wb_memcg_offline(struct mem_cgroup *memcg)
378{
379}
380
381static inline void wb_blkcg_offline(struct blkcg *blkcg)
382{
383}
384
89e9b9e0
TH
385#endif /* CONFIG_CGROUP_WRITEBACK */
386
eb7ae5e0 387const char *bdi_dev_name(struct backing_dev_info *bdi);
68f23b89 388
89e9b9e0 389#endif /* _LINUX_BACKING_DEV_H */