Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* |
3 | * include/linux/backing-dev.h | |
4 | * | |
5 | * low-level device information and state which is propagated up through | |
6 | * to high-level code. | |
7 | */ | |
8 | ||
9 | #ifndef _LINUX_BACKING_DEV_H | |
10 | #define _LINUX_BACKING_DEV_H | |
11 | ||
cf0ca9fe | 12 | #include <linux/kernel.h> |
e4ad08fe | 13 | #include <linux/fs.h> |
03ba3782 | 14 | #include <linux/sched.h> |
68f23b89 | 15 | #include <linux/device.h> |
03ba3782 | 16 | #include <linux/writeback.h> |
66114cad | 17 | #include <linux/backing-dev-defs.h> |
a13f35e8 | 18 | #include <linux/slab.h> |
de1414a6 | 19 | |
d03f6cdc JK |
20 | static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi) |
21 | { | |
22 | kref_get(&bdi->refcnt); | |
23 | return bdi; | |
24 | } | |
25 | ||
34f8fe50 | 26 | struct backing_dev_info *bdi_get_by_id(u64 id); |
d03f6cdc | 27 | void bdi_put(struct backing_dev_info *bdi); |
b2e8fb6e | 28 | |
7c4cc300 JK |
29 | __printf(2, 3) |
30 | int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...); | |
a93f00b3 | 31 | __printf(2, 0) |
7c4cc300 JK |
32 | int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, |
33 | va_list args); | |
3c5d202b | 34 | void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner); |
b02176f3 TH |
35 | void bdi_unregister(struct backing_dev_info *bdi); |
36 | ||
aef33c2f | 37 | struct backing_dev_info *bdi_alloc(int node_id); |
b02176f3 | 38 | |
9ecf4866 | 39 | void wb_start_background_writeback(struct bdi_writeback *wb); |
f0054bb1 | 40 | void wb_workfn(struct work_struct *work); |
cf0ca9fe | 41 | |
5b9cce4c TH |
42 | void wb_wait_for_completion(struct wb_completion *done); |
43 | ||
03ba3782 | 44 | extern spinlock_t bdi_lock; |
66f3b8e2 JA |
45 | extern struct list_head bdi_list; |
46 | ||
839a8e86 TH |
47 | extern struct workqueue_struct *bdi_wq; |
48 | ||
d6c10f1f | 49 | static inline bool wb_has_dirty_io(struct bdi_writeback *wb) |
03ba3782 | 50 | { |
d6c10f1f | 51 | return test_bit(WB_has_dirty_io, &wb->state); |
03ba3782 JA |
52 | } |
53 | ||
95a46c65 TH |
54 | static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi) |
55 | { | |
56 | /* | |
57 | * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are | |
58 | * any dirty wbs. See wb_update_write_bandwidth(). | |
59 | */ | |
60 | return atomic_long_read(&bdi->tot_write_bandwidth); | |
03ba3782 JA |
61 | } |
62 | ||
bd3488e7 | 63 | static inline void wb_stat_mod(struct bdi_writeback *wb, |
93f78d88 | 64 | enum wb_stat_item item, s64 amount) |
b2e8fb6e | 65 | { |
104b4e51 | 66 | percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH); |
b2e8fb6e PZ |
67 | } |
68 | ||
93f78d88 | 69 | static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) |
b2e8fb6e | 70 | { |
bd3488e7 | 71 | wb_stat_mod(wb, item, 1); |
b2e8fb6e PZ |
72 | } |
73 | ||
93f78d88 | 74 | static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) |
b2e8fb6e | 75 | { |
bd3488e7 | 76 | wb_stat_mod(wb, item, -1); |
b2e8fb6e PZ |
77 | } |
78 | ||
93f78d88 | 79 | static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) |
b2e8fb6e | 80 | { |
93f78d88 | 81 | return percpu_counter_read_positive(&wb->stat[item]); |
b2e8fb6e PZ |
82 | } |
83 | ||
93f78d88 | 84 | static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item) |
e0bf68dd | 85 | { |
e3d3910a | 86 | return percpu_counter_sum_positive(&wb->stat[item]); |
e0bf68dd PZ |
87 | } |
88 | ||
93f78d88 | 89 | extern void wb_writeout_inc(struct bdi_writeback *wb); |
dd5656e5 | 90 | |
b2e8fb6e PZ |
91 | /* |
92 | * maximal error of a stat counter. | |
93 | */ | |
2bce774e | 94 | static inline unsigned long wb_stat_error(void) |
e0bf68dd | 95 | { |
b2e8fb6e | 96 | #ifdef CONFIG_SMP |
93f78d88 | 97 | return nr_cpu_ids * WB_STAT_BATCH; |
b2e8fb6e PZ |
98 | #else |
99 | return 1; | |
100 | #endif | |
e0bf68dd | 101 | } |
1da177e4 | 102 | |
ae82291e SR |
103 | /* BDI ratio is expressed as part per 1000000 for finer granularity. */ |
104 | #define BDI_RATIO_SCALE 10000 | |
105 | ||
712c00d6 | 106 | u64 bdi_get_min_bytes(struct backing_dev_info *bdi); |
00df7d51 | 107 | u64 bdi_get_max_bytes(struct backing_dev_info *bdi); |
189d3c4a | 108 | int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio); |
a42dde04 | 109 | int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); |
2c44af4f | 110 | int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio); |
4e230b40 | 111 | int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio); |
803c9805 | 112 | int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes); |
1bf27e98 | 113 | int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes); |
8e9d5ead | 114 | int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit); |
189d3c4a | 115 | |
1da177e4 LT |
116 | /* |
117 | * Flags in backing_dev_info::capability | |
e4ad08fe | 118 | * |
f56753ac CH |
119 | * BDI_CAP_WRITEBACK: Supports dirty page writeback, and dirty pages |
120 | * should contribute to accounting | |
121 | * BDI_CAP_WRITEBACK_ACCT: Automatically account writeback pages | |
122 | * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold | |
1da177e4 | 123 | */ |
f56753ac CH |
124 | #define BDI_CAP_WRITEBACK (1 << 0) |
125 | #define BDI_CAP_WRITEBACK_ACCT (1 << 1) | |
126 | #define BDI_CAP_STRICTLIMIT (1 << 2) | |
e4ad08fe | 127 | |
5129a469 | 128 | extern struct backing_dev_info noop_backing_dev_info; |
1da177e4 | 129 | |
4bca7e80 JK |
130 | int bdi_init(struct backing_dev_info *bdi); |
131 | ||
bc05873d TH |
132 | /** |
133 | * writeback_in_progress - determine whether there is writeback in progress | |
134 | * @wb: bdi_writeback of interest | |
135 | * | |
136 | * Determine whether there is writeback waiting to be handled against a | |
137 | * bdi_writeback. | |
138 | */ | |
139 | static inline bool writeback_in_progress(struct bdi_writeback *wb) | |
1da177e4 | 140 | { |
bc05873d | 141 | return test_bit(WB_writeback_running, &wb->state); |
1da177e4 LT |
142 | } |
143 | ||
ccdf7741 | 144 | struct backing_dev_info *inode_to_bdi(struct inode *inode); |
1da177e4 | 145 | |
f56753ac | 146 | static inline bool mapping_can_writeback(struct address_space *mapping) |
e4ad08fe | 147 | { |
f56753ac | 148 | return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK; |
e4ad08fe | 149 | } |
1da177e4 | 150 | |
89e9b9e0 TH |
151 | #ifdef CONFIG_CGROUP_WRITEBACK |
152 | ||
ed288dc0 TH |
153 | struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, |
154 | struct cgroup_subsys_state *memcg_css); | |
52ebea74 TH |
155 | struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, |
156 | struct cgroup_subsys_state *memcg_css, | |
157 | gfp_t gfp); | |
52ebea74 | 158 | void wb_memcg_offline(struct mem_cgroup *memcg); |
dec223c9 | 159 | void wb_blkcg_offline(struct cgroup_subsys_state *css); |
52ebea74 | 160 | |
89e9b9e0 TH |
161 | /** |
162 | * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode | |
163 | * @inode: inode of interest | |
164 | * | |
ed7b6b4f CH |
165 | * Cgroup writeback requires support from the filesystem. Also, both memcg and |
166 | * iocg have to be on the default hierarchy. Test whether all conditions are | |
167 | * met. | |
9badce00 TH |
168 | * |
169 | * Note that the test result may change dynamically on the same inode | |
170 | * depending on how memcg and iocg are configured. | |
89e9b9e0 TH |
171 | */ |
172 | static inline bool inode_cgwb_enabled(struct inode *inode) | |
173 | { | |
174 | struct backing_dev_info *bdi = inode_to_bdi(inode); | |
175 | ||
c0522908 TH |
176 | return cgroup_subsys_on_dfl(memory_cgrp_subsys) && |
177 | cgroup_subsys_on_dfl(io_cgrp_subsys) && | |
f56753ac | 178 | (bdi->capabilities & BDI_CAP_WRITEBACK) && |
46b15caa | 179 | (inode->i_sb->s_iflags & SB_I_CGROUPWB); |
89e9b9e0 TH |
180 | } |
181 | ||
52ebea74 TH |
182 | /** |
183 | * wb_find_current - find wb for %current on a bdi | |
184 | * @bdi: bdi of interest | |
185 | * | |
186 | * Find the wb of @bdi which matches both the memcg and blkcg of %current. | |
187 | * Must be called under rcu_read_lock() which protects the returend wb. | |
188 | * NULL if not found. | |
189 | */ | |
190 | static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) | |
191 | { | |
192 | struct cgroup_subsys_state *memcg_css; | |
193 | struct bdi_writeback *wb; | |
194 | ||
195 | memcg_css = task_css(current, memory_cgrp_id); | |
196 | if (!memcg_css->parent) | |
197 | return &bdi->wb; | |
198 | ||
199 | wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); | |
200 | ||
201 | /* | |
202 | * %current's blkcg equals the effective blkcg of its memcg. No | |
203 | * need to use the relatively expensive cgroup_get_e_css(). | |
204 | */ | |
c165b3e3 | 205 | if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id))) |
52ebea74 TH |
206 | return wb; |
207 | return NULL; | |
208 | } | |
209 | ||
210 | /** | |
211 | * wb_get_create_current - get or create wb for %current on a bdi | |
212 | * @bdi: bdi of interest | |
213 | * @gfp: allocation mask | |
214 | * | |
215 | * Equivalent to wb_get_create() on %current's memcg. This function is | |
216 | * called from a relatively hot path and optimizes the common cases using | |
217 | * wb_find_current(). | |
218 | */ | |
219 | static inline struct bdi_writeback * | |
220 | wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) | |
221 | { | |
222 | struct bdi_writeback *wb; | |
223 | ||
224 | rcu_read_lock(); | |
225 | wb = wb_find_current(bdi); | |
226 | if (wb && unlikely(!wb_tryget(wb))) | |
227 | wb = NULL; | |
228 | rcu_read_unlock(); | |
229 | ||
230 | if (unlikely(!wb)) { | |
231 | struct cgroup_subsys_state *memcg_css; | |
232 | ||
233 | memcg_css = task_get_css(current, memory_cgrp_id); | |
234 | wb = wb_get_create(bdi, memcg_css, gfp); | |
235 | css_put(memcg_css); | |
236 | } | |
237 | return wb; | |
238 | } | |
239 | ||
52ebea74 TH |
240 | /** |
241 | * inode_to_wb - determine the wb of an inode | |
242 | * @inode: inode of interest | |
243 | * | |
aaa2cacf | 244 | * Returns the wb @inode is currently associated with. The caller must be |
b93b0163 | 245 | * holding either @inode->i_lock, the i_pages lock, or the |
aaa2cacf | 246 | * associated wb's list_lock. |
52ebea74 | 247 | */ |
05b93801 | 248 | static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) |
52ebea74 | 249 | { |
aaa2cacf TH |
250 | #ifdef CONFIG_LOCKDEP |
251 | WARN_ON_ONCE(debug_locks && | |
9e888998 | 252 | (inode->i_sb->s_iflags & SB_I_CGROUPWB) && |
aaa2cacf | 253 | (!lockdep_is_held(&inode->i_lock) && |
b93b0163 | 254 | !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && |
aaa2cacf TH |
255 | !lockdep_is_held(&inode->i_wb->list_lock))); |
256 | #endif | |
52ebea74 TH |
257 | return inode->i_wb; |
258 | } | |
259 | ||
fee468fd JK |
260 | static inline struct bdi_writeback *inode_to_wb_wbc( |
261 | struct inode *inode, | |
262 | struct writeback_control *wbc) | |
263 | { | |
264 | /* | |
265 | * If wbc does not have inode attached, it means cgroup writeback was | |
266 | * disabled when wbc started. Just use the default wb in that case. | |
267 | */ | |
268 | return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb; | |
269 | } | |
270 | ||
682aa8e1 TH |
271 | /** |
272 | * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction | |
273 | * @inode: target inode | |
2e898e4c | 274 | * @cookie: output param, to be passed to the end function |
682aa8e1 TH |
275 | * |
276 | * The caller wants to access the wb associated with @inode but isn't | |
b93b0163 | 277 | * holding inode->i_lock, the i_pages lock or wb->list_lock. This |
682aa8e1 TH |
278 | * function determines the wb associated with @inode and ensures that the |
279 | * association doesn't change until the transaction is finished with | |
280 | * unlocked_inode_to_wb_end(). | |
281 | * | |
2e898e4c GT |
282 | * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and |
283 | * can't sleep during the transaction. IRQs may or may not be disabled on | |
284 | * return. | |
682aa8e1 TH |
285 | */ |
286 | static inline struct bdi_writeback * | |
2e898e4c | 287 | unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) |
682aa8e1 TH |
288 | { |
289 | rcu_read_lock(); | |
290 | ||
291 | /* | |
a9519def | 292 | * Paired with store_release in inode_switch_wbs_work_fn() and |
682aa8e1 TH |
293 | * ensures that we see the new wb if we see cleared I_WB_SWITCH. |
294 | */ | |
2e898e4c | 295 | cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; |
682aa8e1 | 296 | |
2e898e4c GT |
297 | if (unlikely(cookie->locked)) |
298 | xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); | |
aaa2cacf TH |
299 | |
300 | /* | |
b93b0163 MW |
301 | * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages |
302 | * lock. inode_to_wb() will bark. Deref directly. | |
aaa2cacf TH |
303 | */ |
304 | return inode->i_wb; | |
682aa8e1 TH |
305 | } |
306 | ||
307 | /** | |
308 | * unlocked_inode_to_wb_end - end inode wb access transaction | |
309 | * @inode: target inode | |
2e898e4c | 310 | * @cookie: @cookie from unlocked_inode_to_wb_begin() |
682aa8e1 | 311 | */ |
2e898e4c GT |
312 | static inline void unlocked_inode_to_wb_end(struct inode *inode, |
313 | struct wb_lock_cookie *cookie) | |
682aa8e1 | 314 | { |
2e898e4c GT |
315 | if (unlikely(cookie->locked)) |
316 | xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); | |
682aa8e1 TH |
317 | |
318 | rcu_read_unlock(); | |
319 | } | |
320 | ||
89e9b9e0 TH |
321 | #else /* CONFIG_CGROUP_WRITEBACK */ |
322 | ||
323 | static inline bool inode_cgwb_enabled(struct inode *inode) | |
324 | { | |
325 | return false; | |
326 | } | |
327 | ||
52ebea74 TH |
328 | static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) |
329 | { | |
330 | return &bdi->wb; | |
331 | } | |
332 | ||
333 | static inline struct bdi_writeback * | |
334 | wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) | |
335 | { | |
336 | return &bdi->wb; | |
337 | } | |
338 | ||
52ebea74 TH |
339 | static inline struct bdi_writeback *inode_to_wb(struct inode *inode) |
340 | { | |
341 | return &inode_to_bdi(inode)->wb; | |
342 | } | |
343 | ||
fee468fd JK |
344 | static inline struct bdi_writeback *inode_to_wb_wbc( |
345 | struct inode *inode, | |
346 | struct writeback_control *wbc) | |
347 | { | |
348 | return inode_to_wb(inode); | |
349 | } | |
350 | ||
351 | ||
682aa8e1 | 352 | static inline struct bdi_writeback * |
2e898e4c | 353 | unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) |
682aa8e1 TH |
354 | { |
355 | return inode_to_wb(inode); | |
356 | } | |
357 | ||
2e898e4c GT |
358 | static inline void unlocked_inode_to_wb_end(struct inode *inode, |
359 | struct wb_lock_cookie *cookie) | |
682aa8e1 TH |
360 | { |
361 | } | |
362 | ||
52ebea74 TH |
363 | static inline void wb_memcg_offline(struct mem_cgroup *memcg) |
364 | { | |
365 | } | |
366 | ||
dec223c9 | 367 | static inline void wb_blkcg_offline(struct cgroup_subsys_state *css) |
52ebea74 TH |
368 | { |
369 | } | |
370 | ||
89e9b9e0 TH |
371 | #endif /* CONFIG_CGROUP_WRITEBACK */ |
372 | ||
eb7ae5e0 | 373 | const char *bdi_dev_name(struct backing_dev_info *bdi); |
68f23b89 | 374 | |
89e9b9e0 | 375 | #endif /* _LINUX_BACKING_DEV_H */ |